Index: stable/9/sys/dev/cxgb/common/cxgb_ael1002.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_ael1002.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_ael1002.c (revision 277344) @@ -1,2296 +1,2303 @@ /************************************************************************** Copyright (c) 2007-2009, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #undef msleep #define msleep t3_os_sleep enum { PMD_RSD = 10, /* PMA/PMD receive signal detect register */ PCS_STAT1_X = 24, /* 10GBASE-X PCS status 1 register */ PCS_STAT1_R = 32, /* 10GBASE-R PCS status 1 register */ XS_LN_STAT = 24 /* XS lane status register */ }; enum { AEL100X_TX_DISABLE = 9, AEL100X_TX_CONFIG1 = 0xc002, AEL1002_PWR_DOWN_HI = 0xc011, AEL1002_PWR_DOWN_LO = 0xc012, AEL1002_XFI_EQL = 0xc015, AEL1002_LB_EN = 0xc017, AEL_OPT_SETTINGS = 0xc017, AEL_I2C_CTRL = 0xc30a, AEL_I2C_DATA = 0xc30b, AEL_I2C_STAT = 0xc30c, AEL2005_GPIO_CTRL = 0xc214, AEL2005_GPIO_STAT = 0xc215, AEL2020_GPIO_INTR = 0xc103, AEL2020_GPIO_CTRL = 0xc108, AEL2020_GPIO_STAT = 0xc10c, AEL2020_GPIO_CFG = 0xc110, AEL2020_GPIO_SDA = 0, AEL2020_GPIO_MODDET = 1, AEL2020_GPIO_0 = 3, AEL2020_GPIO_1 = 2, AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, }; enum { edc_none, edc_sr, edc_twinax }; /* PHY module I2C device address */ enum { MODULE_DEV_ADDR = 0xa0, SFF_DEV_ADDR = 0xa2, }; /* PHY transceiver type */ enum { phy_transtype_unknown = 0, phy_transtype_sfp = 3, phy_transtype_xfp = 6, }; #define AEL2005_MODDET_IRQ 4 struct reg_val { unsigned short mmd_addr; unsigned short reg_addr; unsigned short clear_bits; unsigned short set_bits; }; static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms); static int set_phy_regs(struct cphy *phy, const struct reg_val *rv) { int err; for (err = 0; rv->mmd_addr && !err; rv++) { if (rv->clear_bits == 0xffff) err = mdio_write(phy, rv->mmd_addr, rv->reg_addr, rv->set_bits); else err = t3_mdio_change_bits(phy, rv->mmd_addr, rv->reg_addr, rv->clear_bits, rv->set_bits); } return err; } static void ael100x_txon(struct cphy *phy) { int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; msleep(100); t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); msleep(30); } /* * Read an 8-bit word from a device attached to the PHY's i2c bus. */ static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) { int i, err; unsigned int stat, data; err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL_I2C_CTRL, (dev_addr << 8) | (1 << 8) | word_addr); if (err) return err; for (i = 0; i < 200; i++) { msleep(1); err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_STAT, &stat); if (err) return err; if ((stat & 3) == 1) { err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_DATA, &data); if (err) return err; return data >> 8; } } CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %x.%x timed out\n", phy->addr, dev_addr, word_addr); return -ETIMEDOUT; } /* * Write an 8-bit word to a device attached to the PHY's i2c bus. */ static int ael_i2c_wr(struct cphy *phy, int dev_addr, int word_addr, int data) { int i, err; unsigned int stat; err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL_I2C_DATA, data); if (err) return err; err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL_I2C_CTRL, (dev_addr << 8) | word_addr); if (err) return err; for (i = 0; i < 200; i++) { msleep(1); err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL_I2C_STAT, &stat); if (err) return err; if ((stat & 3) == 1) return 0; } CH_WARN(phy->adapter, "PHY %u i2c Write of dev.addr %x.%x = %#x timed out\n", phy->addr, dev_addr, word_addr, data); return -ETIMEDOUT; } static int get_phytrans_type(struct cphy *phy) { int v; v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0); if (v < 0) return phy_transtype_unknown; return v; } static int ael_laser_down(struct cphy *phy, int enable) { int v, dev_addr; v = get_phytrans_type(phy); if (v < 0) return v; if (v == phy_transtype_sfp) { /* Check SFF Soft TX disable is supported */ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 93); if (v < 0) return v; v &= 0x40; if (!v) return v; dev_addr = SFF_DEV_ADDR; } else if (v == phy_transtype_xfp) dev_addr = MODULE_DEV_ADDR; else return v; v = ael_i2c_rd(phy, dev_addr, 110); if (v < 0) return v; if (enable) v |= 0x40; else v &= ~0x40; v = ael_i2c_wr(phy, dev_addr, 110, v); return v; } static int ael1002_power_down(struct cphy *phy, int enable) { int err; err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable); if (!err) err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, BMCR_PDOWN, enable ? BMCR_PDOWN : 0); return err; } static int ael1002_get_module_type(struct cphy *phy, int delay_ms) { int v; if (delay_ms) msleep(delay_ms); v = ael2xxx_get_module_type(phy, delay_ms); return (v == -ETIMEDOUT ? phy_modtype_none : v); } static int ael1002_reset(struct cphy *phy, int wait) { int err; if ((err = ael1002_power_down(phy, 0)) || (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) || (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) || (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) || (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) || (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN, 0, 1 << 5))) return err; err = ael1002_get_module_type(phy, 300); if (err >= 0) phy->modtype = err; return 0; } static int ael1002_intr_noop(struct cphy *phy) { return 0; } /* * Get link status for a 10GBASE-R device. */ -static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, +static int get_link_status_r(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc) { - if (link_ok) { + if (link_state) { unsigned int stat0, stat1, stat2; int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0); if (!err) err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_R, &stat1); if (!err) err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2); if (err) return err; - *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1; - if (*link_ok == 0) - return (0); + stat0 &= 1; + stat1 &= 1; + stat2 = (stat2 >> 12) & 1; + if (stat0 & stat1 & stat2) + *link_state = PHY_LINK_UP; + else if (stat0 == 1 && stat1 == 0 && stat2 == 1) + *link_state = PHY_LINK_PARTIAL; + else + *link_state = PHY_LINK_DOWN; } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops ael1002_ops = { ael1002_reset, ael1002_intr_noop, ael1002_intr_noop, ael1002_intr_noop, ael1002_intr_noop, NULL, NULL, NULL, NULL, NULL, get_link_status_r, ael1002_power_down, }; #else static struct cphy_ops ael1002_ops = { .reset = ael1002_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, .intr_clear = ael1002_intr_noop, .intr_handler = ael1002_intr_noop, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, }; #endif int t3_ael1002_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { int err; struct cphy *phy = &pinfo->phy; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &ael1002_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, "10GBASE-R"); ael100x_txon(phy); ael_laser_down(phy, 0); err = ael1002_get_module_type(phy, 0); if (err >= 0) phy->modtype = err; return 0; } static int ael1006_reset(struct cphy *phy, int wait) { int err; err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait); if (err) return err; t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, F_GPIO6_OUT_VAL, 0); msleep(125); t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, F_GPIO6_OUT_VAL, F_GPIO6_OUT_VAL); msleep(125); err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait); if (err) return err; msleep(125); err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, 1, 1); if (err) return err; msleep(125); err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, 1, 0); return err; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops ael1006_ops = { ael1006_reset, t3_phy_lasi_intr_enable, t3_phy_lasi_intr_disable, t3_phy_lasi_intr_clear, t3_phy_lasi_intr_handler, NULL, NULL, NULL, NULL, NULL, get_link_status_r, ael1002_power_down, }; #else static struct cphy_ops ael1006_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, .intr_clear = t3_phy_lasi_intr_clear, .intr_handler = t3_phy_lasi_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, }; #endif int t3_ael1006_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { struct cphy *phy = &pinfo->phy; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &ael1006_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, "10GBASE-SR"); phy->modtype = phy_modtype_sr; ael100x_txon(phy); return 0; } /* * Decode our module type. */ static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms) { int v; if (delay_ms) msleep(delay_ms); v = get_phytrans_type(phy); if (v == phy_transtype_sfp) { /* SFP: see SFF-8472 for below */ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3); if (v < 0) return v; if (v == 0x1) goto twinax; if (v == 0x10) return phy_modtype_sr; if (v == 0x20) return phy_modtype_lr; if (v == 0x40) return phy_modtype_lrm; v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 8); if (v < 0) return v; if (v == 4) { v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 60); if (v < 0) return v; if (v & 0x1) goto twinax; } v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6); if (v < 0) return v; if (v != 4) return phy_modtype_unknown; v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10); if (v < 0) return v; if (v & 0x80) { twinax: v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12); if (v < 0) return v; return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax; } } else if (v == phy_transtype_xfp) { /* XFP: See INF-8077i for details. */ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 127); if (v < 0) return v; if (v != 1) { /* XXX: set page select to table 1 yourself */ return phy_modtype_unknown; } v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 131); if (v < 0) return v; v &= 0xf0; if (v == 0x10) return phy_modtype_lrm; if (v == 0x40) return phy_modtype_lr; if (v == 0x80) return phy_modtype_sr; } return phy_modtype_unknown; } /* * Code to support the Aeluros/NetLogic 2005 10Gb PHY. */ static int ael2005_setup_sr_edc(struct cphy *phy) { static struct reg_val regs[] = { { MDIO_DEV_PMA_PMD, 0xc003, 0xffff, 0x181 }, { MDIO_DEV_PMA_PMD, 0xc010, 0xffff, 0x448a }, { MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5200 }, { 0, 0, 0, 0 } }; static u16 sr_edc[] = { 0xcc00, 0x2ff4, 0xcc01, 0x3cd4, 0xcc02, 0x2015, 0xcc03, 0x3105, 0xcc04, 0x6524, 0xcc05, 0x27ff, 0xcc06, 0x300f, 0xcc07, 0x2c8b, 0xcc08, 0x300b, 0xcc09, 0x4009, 0xcc0a, 0x400e, 0xcc0b, 0x2f72, 0xcc0c, 0x3002, 0xcc0d, 0x1002, 0xcc0e, 0x2172, 0xcc0f, 0x3012, 0xcc10, 0x1002, 0xcc11, 0x25d2, 0xcc12, 0x3012, 0xcc13, 0x1002, 0xcc14, 0xd01e, 0xcc15, 0x27d2, 0xcc16, 0x3012, 0xcc17, 0x1002, 0xcc18, 0x2004, 0xcc19, 0x3c84, 0xcc1a, 0x6436, 0xcc1b, 0x2007, 0xcc1c, 0x3f87, 0xcc1d, 0x8676, 0xcc1e, 0x40b7, 0xcc1f, 0xa746, 0xcc20, 0x4047, 0xcc21, 0x5673, 0xcc22, 0x2982, 0xcc23, 0x3002, 0xcc24, 0x13d2, 0xcc25, 0x8bbd, 0xcc26, 0x2862, 0xcc27, 0x3012, 0xcc28, 0x1002, 0xcc29, 0x2092, 0xcc2a, 0x3012, 0xcc2b, 0x1002, 0xcc2c, 0x5cc3, 0xcc2d, 0x314, 0xcc2e, 0x2942, 0xcc2f, 0x3002, 0xcc30, 0x1002, 0xcc31, 0xd019, 0xcc32, 0x2032, 0xcc33, 0x3012, 0xcc34, 0x1002, 0xcc35, 0x2a04, 0xcc36, 0x3c74, 0xcc37, 0x6435, 0xcc38, 0x2fa4, 0xcc39, 0x3cd4, 0xcc3a, 0x6624, 0xcc3b, 0x5563, 0xcc3c, 0x2d42, 0xcc3d, 0x3002, 0xcc3e, 0x13d2, 0xcc3f, 0x464d, 0xcc40, 0x2862, 0xcc41, 0x3012, 0xcc42, 0x1002, 0xcc43, 0x2032, 0xcc44, 0x3012, 0xcc45, 0x1002, 0xcc46, 0x2fb4, 0xcc47, 0x3cd4, 0xcc48, 0x6624, 0xcc49, 0x5563, 0xcc4a, 0x2d42, 0xcc4b, 0x3002, 0xcc4c, 0x13d2, 0xcc4d, 0x2ed2, 0xcc4e, 0x3002, 0xcc4f, 0x1002, 0xcc50, 0x2fd2, 0xcc51, 0x3002, 0xcc52, 0x1002, 0xcc53, 0x004, 0xcc54, 0x2942, 0xcc55, 0x3002, 0xcc56, 0x1002, 0xcc57, 0x2092, 0xcc58, 0x3012, 0xcc59, 0x1002, 0xcc5a, 0x5cc3, 0xcc5b, 0x317, 0xcc5c, 0x2f72, 0xcc5d, 0x3002, 0xcc5e, 0x1002, 0xcc5f, 0x2942, 0xcc60, 0x3002, 0xcc61, 0x1002, 0xcc62, 0x22cd, 0xcc63, 0x301d, 0xcc64, 0x2862, 0xcc65, 0x3012, 0xcc66, 0x1002, 0xcc67, 0x2ed2, 0xcc68, 0x3002, 0xcc69, 0x1002, 0xcc6a, 0x2d72, 0xcc6b, 0x3002, 0xcc6c, 0x1002, 0xcc6d, 0x628f, 0xcc6e, 0x2112, 0xcc6f, 0x3012, 0xcc70, 0x1002, 0xcc71, 0x5aa3, 0xcc72, 0x2dc2, 0xcc73, 0x3002, 0xcc74, 0x1312, 0xcc75, 0x6f72, 0xcc76, 0x1002, 0xcc77, 0x2807, 0xcc78, 0x31a7, 0xcc79, 0x20c4, 0xcc7a, 0x3c24, 0xcc7b, 0x6724, 0xcc7c, 0x1002, 0xcc7d, 0x2807, 0xcc7e, 0x3187, 0xcc7f, 0x20c4, 0xcc80, 0x3c24, 0xcc81, 0x6724, 0xcc82, 0x1002, 0xcc83, 0x2514, 0xcc84, 0x3c64, 0xcc85, 0x6436, 0xcc86, 0xdff4, 0xcc87, 0x6436, 0xcc88, 0x1002, 0xcc89, 0x40a4, 0xcc8a, 0x643c, 0xcc8b, 0x4016, 0xcc8c, 0x8c6c, 0xcc8d, 0x2b24, 0xcc8e, 0x3c24, 0xcc8f, 0x6435, 0xcc90, 0x1002, 0xcc91, 0x2b24, 0xcc92, 0x3c24, 0xcc93, 0x643a, 0xcc94, 0x4025, 0xcc95, 0x8a5a, 0xcc96, 0x1002, 0xcc97, 0x2731, 0xcc98, 0x3011, 0xcc99, 0x1001, 0xcc9a, 0xc7a0, 0xcc9b, 0x100, 0xcc9c, 0xc502, 0xcc9d, 0x53ac, 0xcc9e, 0xc503, 0xcc9f, 0xd5d5, 0xcca0, 0xc600, 0xcca1, 0x2a6d, 0xcca2, 0xc601, 0xcca3, 0x2a4c, 0xcca4, 0xc602, 0xcca5, 0x111, 0xcca6, 0xc60c, 0xcca7, 0x5900, 0xcca8, 0xc710, 0xcca9, 0x700, 0xccaa, 0xc718, 0xccab, 0x700, 0xccac, 0xc720, 0xccad, 0x4700, 0xccae, 0xc801, 0xccaf, 0x7f50, 0xccb0, 0xc802, 0xccb1, 0x7760, 0xccb2, 0xc803, 0xccb3, 0x7fce, 0xccb4, 0xc804, 0xccb5, 0x5700, 0xccb6, 0xc805, 0xccb7, 0x5f11, 0xccb8, 0xc806, 0xccb9, 0x4751, 0xccba, 0xc807, 0xccbb, 0x57e1, 0xccbc, 0xc808, 0xccbd, 0x2700, 0xccbe, 0xc809, 0xccbf, 0x000, 0xccc0, 0xc821, 0xccc1, 0x002, 0xccc2, 0xc822, 0xccc3, 0x014, 0xccc4, 0xc832, 0xccc5, 0x1186, 0xccc6, 0xc847, 0xccc7, 0x1e02, 0xccc8, 0xc013, 0xccc9, 0xf341, 0xccca, 0xc01a, 0xcccb, 0x446, 0xcccc, 0xc024, 0xcccd, 0x1000, 0xccce, 0xc025, 0xcccf, 0xa00, 0xccd0, 0xc026, 0xccd1, 0xc0c, 0xccd2, 0xc027, 0xccd3, 0xc0c, 0xccd4, 0xc029, 0xccd5, 0x0a0, 0xccd6, 0xc030, 0xccd7, 0xa00, 0xccd8, 0xc03c, 0xccd9, 0x01c, 0xccda, 0xc005, 0xccdb, 0x7a06, 0xccdc, 0x000, 0xccdd, 0x2731, 0xccde, 0x3011, 0xccdf, 0x1001, 0xcce0, 0xc620, 0xcce1, 0x000, 0xcce2, 0xc621, 0xcce3, 0x03f, 0xcce4, 0xc622, 0xcce5, 0x000, 0xcce6, 0xc623, 0xcce7, 0x000, 0xcce8, 0xc624, 0xcce9, 0x000, 0xccea, 0xc625, 0xcceb, 0x000, 0xccec, 0xc627, 0xcced, 0x000, 0xccee, 0xc628, 0xccef, 0x000, 0xccf0, 0xc62c, 0xccf1, 0x000, 0xccf2, 0x000, 0xccf3, 0x2806, 0xccf4, 0x3cb6, 0xccf5, 0xc161, 0xccf6, 0x6134, 0xccf7, 0x6135, 0xccf8, 0x5443, 0xccf9, 0x303, 0xccfa, 0x6524, 0xccfb, 0x00b, 0xccfc, 0x1002, 0xccfd, 0x2104, 0xccfe, 0x3c24, 0xccff, 0x2105, 0xcd00, 0x3805, 0xcd01, 0x6524, 0xcd02, 0xdff4, 0xcd03, 0x4005, 0xcd04, 0x6524, 0xcd05, 0x1002, 0xcd06, 0x5dd3, 0xcd07, 0x306, 0xcd08, 0x2ff7, 0xcd09, 0x38f7, 0xcd0a, 0x60b7, 0xcd0b, 0xdffd, 0xcd0c, 0x00a, 0xcd0d, 0x1002, 0xcd0e, 0 }; int i, err; err = set_phy_regs(phy, regs); if (err) return err; msleep(50); for (i = 0; i < ARRAY_SIZE(sr_edc) && !err; i += 2) err = mdio_write(phy, MDIO_DEV_PMA_PMD, sr_edc[i], sr_edc[i + 1]); if (!err) phy->priv = edc_sr; return err; } static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) { static struct reg_val regs[] = { { MDIO_DEV_PMA_PMD, 0xc04a, 0xffff, 0x5a00 }, { 0, 0, 0, 0 } }; static struct reg_val preemphasis[] = { { MDIO_DEV_PMA_PMD, 0xc014, 0xffff, 0xfe16 }, { MDIO_DEV_PMA_PMD, 0xc015, 0xffff, 0xa000 }, { 0, 0, 0, 0 } }; static u16 twinax_edc[] = { 0xcc00, 0x4009, 0xcc01, 0x27ff, 0xcc02, 0x300f, 0xcc03, 0x40aa, 0xcc04, 0x401c, 0xcc05, 0x401e, 0xcc06, 0x2ff4, 0xcc07, 0x3cd4, 0xcc08, 0x2035, 0xcc09, 0x3145, 0xcc0a, 0x6524, 0xcc0b, 0x26a2, 0xcc0c, 0x3012, 0xcc0d, 0x1002, 0xcc0e, 0x29c2, 0xcc0f, 0x3002, 0xcc10, 0x1002, 0xcc11, 0x2072, 0xcc12, 0x3012, 0xcc13, 0x1002, 0xcc14, 0x22cd, 0xcc15, 0x301d, 0xcc16, 0x2e52, 0xcc17, 0x3012, 0xcc18, 0x1002, 0xcc19, 0x28e2, 0xcc1a, 0x3002, 0xcc1b, 0x1002, 0xcc1c, 0x628f, 0xcc1d, 0x2ac2, 0xcc1e, 0x3012, 0xcc1f, 0x1002, 0xcc20, 0x5553, 0xcc21, 0x2ae2, 0xcc22, 0x3002, 0xcc23, 0x1302, 0xcc24, 0x401e, 0xcc25, 0x2be2, 0xcc26, 0x3012, 0xcc27, 0x1002, 0xcc28, 0x2da2, 0xcc29, 0x3012, 0xcc2a, 0x1002, 0xcc2b, 0x2ba2, 0xcc2c, 0x3002, 0xcc2d, 0x1002, 0xcc2e, 0x5ee3, 0xcc2f, 0x305, 0xcc30, 0x400e, 0xcc31, 0x2bc2, 0xcc32, 0x3002, 0xcc33, 0x1002, 0xcc34, 0x2b82, 0xcc35, 0x3012, 0xcc36, 0x1002, 0xcc37, 0x5663, 0xcc38, 0x302, 0xcc39, 0x401e, 0xcc3a, 0x6f72, 0xcc3b, 0x1002, 0xcc3c, 0x628f, 0xcc3d, 0x2be2, 0xcc3e, 0x3012, 0xcc3f, 0x1002, 0xcc40, 0x22cd, 0xcc41, 0x301d, 0xcc42, 0x2e52, 0xcc43, 0x3012, 0xcc44, 0x1002, 0xcc45, 0x2522, 0xcc46, 0x3012, 0xcc47, 0x1002, 0xcc48, 0x2da2, 0xcc49, 0x3012, 0xcc4a, 0x1002, 0xcc4b, 0x2ca2, 0xcc4c, 0x3012, 0xcc4d, 0x1002, 0xcc4e, 0x2fa4, 0xcc4f, 0x3cd4, 0xcc50, 0x6624, 0xcc51, 0x410b, 0xcc52, 0x56b3, 0xcc53, 0x3c4, 0xcc54, 0x2fb2, 0xcc55, 0x3002, 0xcc56, 0x1002, 0xcc57, 0x220b, 0xcc58, 0x303b, 0xcc59, 0x56b3, 0xcc5a, 0x3c3, 0xcc5b, 0x866b, 0xcc5c, 0x400c, 0xcc5d, 0x23a2, 0xcc5e, 0x3012, 0xcc5f, 0x1002, 0xcc60, 0x2da2, 0xcc61, 0x3012, 0xcc62, 0x1002, 0xcc63, 0x2ca2, 0xcc64, 0x3012, 0xcc65, 0x1002, 0xcc66, 0x2fb4, 0xcc67, 0x3cd4, 0xcc68, 0x6624, 0xcc69, 0x56b3, 0xcc6a, 0x3c3, 0xcc6b, 0x866b, 0xcc6c, 0x401c, 0xcc6d, 0x2205, 0xcc6e, 0x3035, 0xcc6f, 0x5b53, 0xcc70, 0x2c52, 0xcc71, 0x3002, 0xcc72, 0x13c2, 0xcc73, 0x5cc3, 0xcc74, 0x317, 0xcc75, 0x2522, 0xcc76, 0x3012, 0xcc77, 0x1002, 0xcc78, 0x2da2, 0xcc79, 0x3012, 0xcc7a, 0x1002, 0xcc7b, 0x2b82, 0xcc7c, 0x3012, 0xcc7d, 0x1002, 0xcc7e, 0x5663, 0xcc7f, 0x303, 0xcc80, 0x401e, 0xcc81, 0x004, 0xcc82, 0x2c42, 0xcc83, 0x3012, 0xcc84, 0x1002, 0xcc85, 0x6f72, 0xcc86, 0x1002, 0xcc87, 0x628f, 0xcc88, 0x2304, 0xcc89, 0x3c84, 0xcc8a, 0x6436, 0xcc8b, 0xdff4, 0xcc8c, 0x6436, 0xcc8d, 0x2ff5, 0xcc8e, 0x3005, 0xcc8f, 0x8656, 0xcc90, 0xdfba, 0xcc91, 0x56a3, 0xcc92, 0xd05a, 0xcc93, 0x21c2, 0xcc94, 0x3012, 0xcc95, 0x1392, 0xcc96, 0xd05a, 0xcc97, 0x56a3, 0xcc98, 0xdfba, 0xcc99, 0x383, 0xcc9a, 0x6f72, 0xcc9b, 0x1002, 0xcc9c, 0x28c5, 0xcc9d, 0x3005, 0xcc9e, 0x4178, 0xcc9f, 0x5653, 0xcca0, 0x384, 0xcca1, 0x22b2, 0xcca2, 0x3012, 0xcca3, 0x1002, 0xcca4, 0x2be5, 0xcca5, 0x3005, 0xcca6, 0x41e8, 0xcca7, 0x5653, 0xcca8, 0x382, 0xcca9, 0x002, 0xccaa, 0x4258, 0xccab, 0x2474, 0xccac, 0x3c84, 0xccad, 0x6437, 0xccae, 0xdff4, 0xccaf, 0x6437, 0xccb0, 0x2ff5, 0xccb1, 0x3c05, 0xccb2, 0x8757, 0xccb3, 0xb888, 0xccb4, 0x9787, 0xccb5, 0xdff4, 0xccb6, 0x6724, 0xccb7, 0x866a, 0xccb8, 0x6f72, 0xccb9, 0x1002, 0xccba, 0x2d01, 0xccbb, 0x3011, 0xccbc, 0x1001, 0xccbd, 0xc620, 0xccbe, 0x14e5, 0xccbf, 0xc621, 0xccc0, 0xc53d, 0xccc1, 0xc622, 0xccc2, 0x3cbe, 0xccc3, 0xc623, 0xccc4, 0x4452, 0xccc5, 0xc624, 0xccc6, 0xc5c5, 0xccc7, 0xc625, 0xccc8, 0xe01e, 0xccc9, 0xc627, 0xccca, 0x000, 0xcccb, 0xc628, 0xcccc, 0x000, 0xcccd, 0xc62b, 0xccce, 0x000, 0xcccf, 0xc62c, 0xccd0, 0x000, 0xccd1, 0x000, 0xccd2, 0x2d01, 0xccd3, 0x3011, 0xccd4, 0x1001, 0xccd5, 0xc620, 0xccd6, 0x000, 0xccd7, 0xc621, 0xccd8, 0x000, 0xccd9, 0xc622, 0xccda, 0x0ce, 0xccdb, 0xc623, 0xccdc, 0x07f, 0xccdd, 0xc624, 0xccde, 0x032, 0xccdf, 0xc625, 0xcce0, 0x000, 0xcce1, 0xc627, 0xcce2, 0x000, 0xcce3, 0xc628, 0xcce4, 0x000, 0xcce5, 0xc62b, 0xcce6, 0x000, 0xcce7, 0xc62c, 0xcce8, 0x000, 0xcce9, 0x000, 0xccea, 0x2d01, 0xcceb, 0x3011, 0xccec, 0x1001, 0xcced, 0xc502, 0xccee, 0x609f, 0xccef, 0xc600, 0xccf0, 0x2a6e, 0xccf1, 0xc601, 0xccf2, 0x2a2c, 0xccf3, 0xc60c, 0xccf4, 0x5400, 0xccf5, 0xc710, 0xccf6, 0x700, 0xccf7, 0xc718, 0xccf8, 0x700, 0xccf9, 0xc720, 0xccfa, 0x4700, 0xccfb, 0xc728, 0xccfc, 0x700, 0xccfd, 0xc729, 0xccfe, 0x1207, 0xccff, 0xc801, 0xcd00, 0x7f50, 0xcd01, 0xc802, 0xcd02, 0x7760, 0xcd03, 0xc803, 0xcd04, 0x7fce, 0xcd05, 0xc804, 0xcd06, 0x520e, 0xcd07, 0xc805, 0xcd08, 0x5c11, 0xcd09, 0xc806, 0xcd0a, 0x3c51, 0xcd0b, 0xc807, 0xcd0c, 0x4061, 0xcd0d, 0xc808, 0xcd0e, 0x49c1, 0xcd0f, 0xc809, 0xcd10, 0x3840, 0xcd11, 0xc80a, 0xcd12, 0x000, 0xcd13, 0xc821, 0xcd14, 0x002, 0xcd15, 0xc822, 0xcd16, 0x046, 0xcd17, 0xc844, 0xcd18, 0x182f, 0xcd19, 0xc013, 0xcd1a, 0xf341, 0xcd1b, 0xc01a, 0xcd1c, 0x446, 0xcd1d, 0xc024, 0xcd1e, 0x1000, 0xcd1f, 0xc025, 0xcd20, 0xa00, 0xcd21, 0xc026, 0xcd22, 0xc0c, 0xcd23, 0xc027, 0xcd24, 0xc0c, 0xcd25, 0xc029, 0xcd26, 0x0a0, 0xcd27, 0xc030, 0xcd28, 0xa00, 0xcd29, 0xc03c, 0xcd2a, 0x01c, 0xcd2b, 0x000, 0xcd2c, 0x2b84, 0xcd2d, 0x3c74, 0xcd2e, 0x6435, 0xcd2f, 0xdff4, 0xcd30, 0x6435, 0xcd31, 0x2806, 0xcd32, 0x3006, 0xcd33, 0x8565, 0xcd34, 0x2b24, 0xcd35, 0x3c24, 0xcd36, 0x6436, 0xcd37, 0x1002, 0xcd38, 0x2b24, 0xcd39, 0x3c24, 0xcd3a, 0x6436, 0xcd3b, 0x4045, 0xcd3c, 0x8656, 0xcd3d, 0x1002, 0xcd3e, 0x2807, 0xcd3f, 0x31a7, 0xcd40, 0x20c4, 0xcd41, 0x3c24, 0xcd42, 0x6724, 0xcd43, 0x1002, 0xcd44, 0x2807, 0xcd45, 0x3187, 0xcd46, 0x20c4, 0xcd47, 0x3c24, 0xcd48, 0x6724, 0xcd49, 0x1002, 0xcd4a, 0x2514, 0xcd4b, 0x3c64, 0xcd4c, 0x6436, 0xcd4d, 0xdff4, 0xcd4e, 0x6436, 0xcd4f, 0x1002, 0xcd50, 0x2806, 0xcd51, 0x3cb6, 0xcd52, 0xc161, 0xcd53, 0x6134, 0xcd54, 0x6135, 0xcd55, 0x5443, 0xcd56, 0x303, 0xcd57, 0x6524, 0xcd58, 0x00b, 0xcd59, 0x1002, 0xcd5a, 0xd019, 0xcd5b, 0x2104, 0xcd5c, 0x3c24, 0xcd5d, 0x2105, 0xcd5e, 0x3805, 0xcd5f, 0x6524, 0xcd60, 0xdff4, 0xcd61, 0x4005, 0xcd62, 0x6524, 0xcd63, 0x2e8d, 0xcd64, 0x303d, 0xcd65, 0x5dd3, 0xcd66, 0x306, 0xcd67, 0x2ff7, 0xcd68, 0x38f7, 0xcd69, 0x60b7, 0xcd6a, 0xdffd, 0xcd6b, 0x00a, 0xcd6c, 0x1002, 0xcd6d, 0 }; int i, err; err = set_phy_regs(phy, regs); if (!err && modtype == phy_modtype_twinax_long) err = set_phy_regs(phy, preemphasis); if (err) return err; msleep(50); for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2) err = mdio_write(phy, MDIO_DEV_PMA_PMD, twinax_edc[i], twinax_edc[i + 1]); if (!err) phy->priv = edc_twinax; return err; } static int ael2005_get_module_type(struct cphy *phy, int delay_ms) { int v; unsigned int stat; v = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, &stat); if (v) return v; if (stat & (1 << 8)) /* module absent */ return phy_modtype_none; return ael2xxx_get_module_type(phy, delay_ms); } static int ael2005_intr_enable(struct cphy *phy) { int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x200); return err ? err : t3_phy_lasi_intr_enable(phy); } static int ael2005_intr_disable(struct cphy *phy) { int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0x100); return err ? err : t3_phy_lasi_intr_disable(phy); } static int ael2005_intr_clear(struct cphy *phy) { int err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0xd00); return err ? err : t3_phy_lasi_intr_clear(phy); } static int ael2005_reset(struct cphy *phy, int wait) { static struct reg_val regs0[] = { { MDIO_DEV_PMA_PMD, 0xc001, 0, 1 << 5 }, { MDIO_DEV_PMA_PMD, 0xc017, 0, 1 << 5 }, { MDIO_DEV_PMA_PMD, 0xc013, 0xffff, 0xf341 }, { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 }, { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8100 }, { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0x8000 }, { MDIO_DEV_PMA_PMD, 0xc210, 0xffff, 0 }, { 0, 0, 0, 0 } }; static struct reg_val regs1[] = { { MDIO_DEV_PMA_PMD, 0xca00, 0xffff, 0x0080 }, { MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0 }, { 0, 0, 0, 0 } }; int err; unsigned int lasi_ctrl; err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, &lasi_ctrl); if (err) return err; err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, 0); if (err) return err; msleep(125); phy->priv = edc_none; err = set_phy_regs(phy, regs0); if (err) return err; msleep(50); err = ael2005_get_module_type(phy, 0); if (err < 0) return err; phy->modtype = (u8)err; if (err == phy_modtype_none) err = 0; else if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) err = ael2005_setup_twinax_edc(phy, err); else err = ael2005_setup_sr_edc(phy); if (err) return err; err = set_phy_regs(phy, regs1); if (err) return err; /* reset wipes out interrupts, reenable them if they were on */ if (lasi_ctrl & 1) err = ael2005_intr_enable(phy); return err; } static int ael2005_intr_handler(struct cphy *phy) { unsigned int stat; int ret, edc_needed, cause = 0; ret = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_STAT, &stat); if (ret) return ret; if (stat & AEL2005_MODDET_IRQ) { ret = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL2005_GPIO_CTRL, 0xd00); if (ret) return ret; /* modules have max 300 ms init time after hot plug */ ret = ael2005_get_module_type(phy, 300); if (ret < 0) return ret; phy->modtype = (u8)ret; if (ret == phy_modtype_none) edc_needed = phy->priv; /* on unplug retain EDC */ else if (ret == phy_modtype_twinax || ret == phy_modtype_twinax_long) edc_needed = edc_twinax; else edc_needed = edc_sr; if (edc_needed != phy->priv) { ret = ael2005_reset(phy, 0); return ret ? ret : cphy_cause_module_change; } cause = cphy_cause_module_change; } ret = t3_phy_lasi_intr_handler(phy); if (ret < 0) return ret; ret |= cause; - if (!ret) { - (void) ael2005_reset(phy, 0); + if (!ret) ret |= cphy_cause_link_change; - } return ret; } static struct cphy_ops ael2005_ops = { #ifdef C99_NOT_SUPPORTED ael2005_reset, ael2005_intr_enable, ael2005_intr_disable, ael2005_intr_clear, ael2005_intr_handler, NULL, NULL, NULL, NULL, NULL, get_link_status_r, ael1002_power_down, #else .reset = ael2005_reset, .intr_enable = ael2005_intr_enable, .intr_disable = ael2005_intr_disable, .intr_clear = ael2005_intr_clear, .intr_handler = ael2005_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, #endif }; int t3_ael2005_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { int err; struct cphy *phy = &pinfo->phy; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &ael2005_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | SUPPORTED_IRQ, "10GBASE-R"); msleep(125); ael_laser_down(phy, 0); err = ael2005_get_module_type(phy, 0); if (err >= 0) phy->modtype = err; return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL_OPT_SETTINGS, 0, 1 << 5); } /* * Setup EDC and other parameters for operation with an optical module. */ static int ael2020_setup_sr_edc(struct cphy *phy) { static struct reg_val regs[] = { { MDIO_DEV_PMA_PMD, 0xcc01, 0xffff, 0x488a }, { MDIO_DEV_PMA_PMD, 0xcb1b, 0xffff, 0x0200 }, { MDIO_DEV_PMA_PMD, 0xcb1c, 0xffff, 0x00f0 }, { MDIO_DEV_PMA_PMD, 0xcc06, 0xffff, 0x00e0 }, /* end */ { 0, 0, 0, 0 } }; int err; err = set_phy_regs(phy, regs); msleep(50); if (err) return err; phy->priv = edc_sr; return 0; } /* * Setup EDC and other parameters for operation with an TWINAX module. */ static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) { static struct reg_val uCclock40MHz[] = { { MDIO_DEV_PMA_PMD, 0xff28, 0xffff, 0x4001 }, { MDIO_DEV_PMA_PMD, 0xff2a, 0xffff, 0x0002 }, { 0, 0, 0, 0 } }; static struct reg_val uCclockActivate[] = { { MDIO_DEV_PMA_PMD, 0xd000, 0xffff, 0x5200 }, { 0, 0, 0, 0 } }; static struct reg_val uCactivate[] = { { MDIO_DEV_PMA_PMD, 0xd080, 0xffff, 0x0100 }, { MDIO_DEV_PMA_PMD, 0xd092, 0xffff, 0x0000 }, { 0, 0, 0, 0 } }; static u16 twinax_edc[] = { 0xd800, 0x4009, 0xd801, 0x2fff, 0xd802, 0x300f, 0xd803, 0x40aa, 0xd804, 0x401c, 0xd805, 0x401e, 0xd806, 0x20c5, 0xd807, 0x3c05, 0xd808, 0x6536, 0xd809, 0x2fe4, 0xd80a, 0x3dc4, 0xd80b, 0x6624, 0xd80c, 0x2ff4, 0xd80d, 0x3dc4, 0xd80e, 0x2035, 0xd80f, 0x30a5, 0xd810, 0x6524, 0xd811, 0x2ca2, 0xd812, 0x3012, 0xd813, 0x1002, 0xd814, 0x27e2, 0xd815, 0x3022, 0xd816, 0x1002, 0xd817, 0x28d2, 0xd818, 0x3022, 0xd819, 0x1002, 0xd81a, 0x2892, 0xd81b, 0x3012, 0xd81c, 0x1002, 0xd81d, 0x24e2, 0xd81e, 0x3022, 0xd81f, 0x1002, 0xd820, 0x27e2, 0xd821, 0x3012, 0xd822, 0x1002, 0xd823, 0x2422, 0xd824, 0x3022, 0xd825, 0x1002, 0xd826, 0x22cd, 0xd827, 0x301d, 0xd828, 0x28f2, 0xd829, 0x3022, 0xd82a, 0x1002, 0xd82b, 0x5553, 0xd82c, 0x0307, 0xd82d, 0x2572, 0xd82e, 0x3022, 0xd82f, 0x1002, 0xd830, 0x21a2, 0xd831, 0x3012, 0xd832, 0x1002, 0xd833, 0x4016, 0xd834, 0x5e63, 0xd835, 0x0344, 0xd836, 0x21a2, 0xd837, 0x3012, 0xd838, 0x1002, 0xd839, 0x400e, 0xd83a, 0x2572, 0xd83b, 0x3022, 0xd83c, 0x1002, 0xd83d, 0x2b22, 0xd83e, 0x3012, 0xd83f, 0x1002, 0xd840, 0x2842, 0xd841, 0x3022, 0xd842, 0x1002, 0xd843, 0x26e2, 0xd844, 0x3022, 0xd845, 0x1002, 0xd846, 0x2fa4, 0xd847, 0x3dc4, 0xd848, 0x6624, 0xd849, 0x2e8b, 0xd84a, 0x303b, 0xd84b, 0x56b3, 0xd84c, 0x03c6, 0xd84d, 0x866b, 0xd84e, 0x400c, 0xd84f, 0x2782, 0xd850, 0x3012, 0xd851, 0x1002, 0xd852, 0x2c4b, 0xd853, 0x309b, 0xd854, 0x56b3, 0xd855, 0x03c3, 0xd856, 0x866b, 0xd857, 0x400c, 0xd858, 0x22a2, 0xd859, 0x3022, 0xd85a, 0x1002, 0xd85b, 0x2842, 0xd85c, 0x3022, 0xd85d, 0x1002, 0xd85e, 0x26e2, 0xd85f, 0x3022, 0xd860, 0x1002, 0xd861, 0x2fb4, 0xd862, 0x3dc4, 0xd863, 0x6624, 0xd864, 0x56b3, 0xd865, 0x03c3, 0xd866, 0x866b, 0xd867, 0x401c, 0xd868, 0x2c45, 0xd869, 0x3095, 0xd86a, 0x5b53, 0xd86b, 0x23d2, 0xd86c, 0x3012, 0xd86d, 0x13c2, 0xd86e, 0x5cc3, 0xd86f, 0x2782, 0xd870, 0x3012, 0xd871, 0x1312, 0xd872, 0x2b22, 0xd873, 0x3012, 0xd874, 0x1002, 0xd875, 0x2842, 0xd876, 0x3022, 0xd877, 0x1002, 0xd878, 0x2622, 0xd879, 0x3022, 0xd87a, 0x1002, 0xd87b, 0x21a2, 0xd87c, 0x3012, 0xd87d, 0x1002, 0xd87e, 0x628f, 0xd87f, 0x2985, 0xd880, 0x33a5, 0xd881, 0x26e2, 0xd882, 0x3022, 0xd883, 0x1002, 0xd884, 0x5653, 0xd885, 0x03d2, 0xd886, 0x401e, 0xd887, 0x6f72, 0xd888, 0x1002, 0xd889, 0x628f, 0xd88a, 0x2304, 0xd88b, 0x3c84, 0xd88c, 0x6436, 0xd88d, 0xdff4, 0xd88e, 0x6436, 0xd88f, 0x2ff5, 0xd890, 0x3005, 0xd891, 0x8656, 0xd892, 0xdfba, 0xd893, 0x56a3, 0xd894, 0xd05a, 0xd895, 0x29e2, 0xd896, 0x3012, 0xd897, 0x1392, 0xd898, 0xd05a, 0xd899, 0x56a3, 0xd89a, 0xdfba, 0xd89b, 0x0383, 0xd89c, 0x6f72, 0xd89d, 0x1002, 0xd89e, 0x2a64, 0xd89f, 0x3014, 0xd8a0, 0x2005, 0xd8a1, 0x3d75, 0xd8a2, 0xc451, 0xd8a3, 0x29a2, 0xd8a4, 0x3022, 0xd8a5, 0x1002, 0xd8a6, 0x178c, 0xd8a7, 0x1898, 0xd8a8, 0x19a4, 0xd8a9, 0x1ab0, 0xd8aa, 0x1bbc, 0xd8ab, 0x1cc8, 0xd8ac, 0x1dd3, 0xd8ad, 0x1ede, 0xd8ae, 0x1fe9, 0xd8af, 0x20f4, 0xd8b0, 0x21ff, 0xd8b1, 0x0000, 0xd8b2, 0x2741, 0xd8b3, 0x3021, 0xd8b4, 0x1001, 0xd8b5, 0xc620, 0xd8b6, 0x0000, 0xd8b7, 0xc621, 0xd8b8, 0x0000, 0xd8b9, 0xc622, 0xd8ba, 0x00e2, 0xd8bb, 0xc623, 0xd8bc, 0x007f, 0xd8bd, 0xc624, 0xd8be, 0x00ce, 0xd8bf, 0xc625, 0xd8c0, 0x0000, 0xd8c1, 0xc627, 0xd8c2, 0x0000, 0xd8c3, 0xc628, 0xd8c4, 0x0000, 0xd8c5, 0xc90a, 0xd8c6, 0x3a7c, 0xd8c7, 0xc62c, 0xd8c8, 0x0000, 0xd8c9, 0x0000, 0xd8ca, 0x2741, 0xd8cb, 0x3021, 0xd8cc, 0x1001, 0xd8cd, 0xc502, 0xd8ce, 0x53ac, 0xd8cf, 0xc503, 0xd8d0, 0x2cd3, 0xd8d1, 0xc600, 0xd8d2, 0x2a6e, 0xd8d3, 0xc601, 0xd8d4, 0x2a2c, 0xd8d5, 0xc605, 0xd8d6, 0x5557, 0xd8d7, 0xc60c, 0xd8d8, 0x5400, 0xd8d9, 0xc710, 0xd8da, 0x0700, 0xd8db, 0xc711, 0xd8dc, 0x0f06, 0xd8dd, 0xc718, 0xd8de, 0x700, 0xd8df, 0xc719, 0xd8e0, 0x0f06, 0xd8e1, 0xc720, 0xd8e2, 0x4700, 0xd8e3, 0xc721, 0xd8e4, 0x0f06, 0xd8e5, 0xc728, 0xd8e6, 0x0700, 0xd8e7, 0xc729, 0xd8e8, 0x1207, 0xd8e9, 0xc801, 0xd8ea, 0x7f50, 0xd8eb, 0xc802, 0xd8ec, 0x7760, 0xd8ed, 0xc803, 0xd8ee, 0x7fce, 0xd8ef, 0xc804, 0xd8f0, 0x520e, 0xd8f1, 0xc805, 0xd8f2, 0x5c11, 0xd8f3, 0xc806, 0xd8f4, 0x3c51, 0xd8f5, 0xc807, 0xd8f6, 0x4061, 0xd8f7, 0xc808, 0xd8f8, 0x49c1, 0xd8f9, 0xc809, 0xd8fa, 0x3840, 0xd8fb, 0xc80a, 0xd8fc, 0x0000, 0xd8fd, 0xc821, 0xd8fe, 0x0002, 0xd8ff, 0xc822, 0xd900, 0x0046, 0xd901, 0xc844, 0xd902, 0x182f, 0xd903, 0xc849, 0xd904, 0x0400, 0xd905, 0xc84a, 0xd906, 0x0002, 0xd907, 0xc013, 0xd908, 0xf341, 0xd909, 0xc084, 0xd90a, 0x0030, 0xd90b, 0xc904, 0xd90c, 0x1401, 0xd90d, 0xcb0c, 0xd90e, 0x0004, 0xd90f, 0xcb0e, 0xd910, 0xa00a, 0xd911, 0xcb0f, 0xd912, 0xc0c0, 0xd913, 0xcb10, 0xd914, 0xc0c0, 0xd915, 0xcb11, 0xd916, 0x00a0, 0xd917, 0xcb12, 0xd918, 0x0007, 0xd919, 0xc241, 0xd91a, 0xa000, 0xd91b, 0xc243, 0xd91c, 0x7fe0, 0xd91d, 0xc604, 0xd91e, 0x000e, 0xd91f, 0xc609, 0xd920, 0x00f5, 0xd921, 0xc611, 0xd922, 0x000e, 0xd923, 0xc660, 0xd924, 0x9600, 0xd925, 0xc687, 0xd926, 0x0004, 0xd927, 0xc60a, 0xd928, 0x04f5, 0xd929, 0x0000, 0xd92a, 0x2741, 0xd92b, 0x3021, 0xd92c, 0x1001, 0xd92d, 0xc620, 0xd92e, 0x14e5, 0xd92f, 0xc621, 0xd930, 0xc53d, 0xd931, 0xc622, 0xd932, 0x3cbe, 0xd933, 0xc623, 0xd934, 0x4452, 0xd935, 0xc624, 0xd936, 0xc5c5, 0xd937, 0xc625, 0xd938, 0xe01e, 0xd939, 0xc627, 0xd93a, 0x0000, 0xd93b, 0xc628, 0xd93c, 0x0000, 0xd93d, 0xc62c, 0xd93e, 0x0000, 0xd93f, 0xc90a, 0xd940, 0x3a7c, 0xd941, 0x0000, 0xd942, 0x2b84, 0xd943, 0x3c74, 0xd944, 0x6435, 0xd945, 0xdff4, 0xd946, 0x6435, 0xd947, 0x2806, 0xd948, 0x3006, 0xd949, 0x8565, 0xd94a, 0x2b24, 0xd94b, 0x3c24, 0xd94c, 0x6436, 0xd94d, 0x1002, 0xd94e, 0x2b24, 0xd94f, 0x3c24, 0xd950, 0x6436, 0xd951, 0x4045, 0xd952, 0x8656, 0xd953, 0x5663, 0xd954, 0x0302, 0xd955, 0x401e, 0xd956, 0x1002, 0xd957, 0x2807, 0xd958, 0x31a7, 0xd959, 0x20c4, 0xd95a, 0x3c24, 0xd95b, 0x6724, 0xd95c, 0x2ff7, 0xd95d, 0x30f7, 0xd95e, 0x20c4, 0xd95f, 0x3c04, 0xd960, 0x6724, 0xd961, 0x1002, 0xd962, 0x2807, 0xd963, 0x3187, 0xd964, 0x20c4, 0xd965, 0x3c24, 0xd966, 0x6724, 0xd967, 0x2fe4, 0xd968, 0x3dc4, 0xd969, 0x6437, 0xd96a, 0x20c4, 0xd96b, 0x3c04, 0xd96c, 0x6724, 0xd96d, 0x1002, 0xd96e, 0x24f4, 0xd96f, 0x3c64, 0xd970, 0x6436, 0xd971, 0xdff4, 0xd972, 0x6436, 0xd973, 0x1002, 0xd974, 0x2006, 0xd975, 0x3d76, 0xd976, 0xc161, 0xd977, 0x6134, 0xd978, 0x6135, 0xd979, 0x5443, 0xd97a, 0x0303, 0xd97b, 0x6524, 0xd97c, 0x00fb, 0xd97d, 0x1002, 0xd97e, 0x20d4, 0xd97f, 0x3c24, 0xd980, 0x2025, 0xd981, 0x3005, 0xd982, 0x6524, 0xd983, 0x1002, 0xd984, 0xd019, 0xd985, 0x2104, 0xd986, 0x3c24, 0xd987, 0x2105, 0xd988, 0x3805, 0xd989, 0x6524, 0xd98a, 0xdff4, 0xd98b, 0x4005, 0xd98c, 0x6524, 0xd98d, 0x2e8d, 0xd98e, 0x303d, 0xd98f, 0x2408, 0xd990, 0x35d8, 0xd991, 0x5dd3, 0xd992, 0x0307, 0xd993, 0x8887, 0xd994, 0x63a7, 0xd995, 0x8887, 0xd996, 0x63a7, 0xd997, 0xdffd, 0xd998, 0x00f9, 0xd999, 0x1002, 0xd99a, 0x866a, 0xd99b, 0x6138, 0xd99c, 0x5883, 0xd99d, 0x2aa2, 0xd99e, 0x3022, 0xd99f, 0x1302, 0xd9a0, 0x2ff7, 0xd9a1, 0x3007, 0xd9a2, 0x8785, 0xd9a3, 0xb887, 0xd9a4, 0x8786, 0xd9a5, 0xb8c6, 0xd9a6, 0x5a53, 0xd9a7, 0x29b2, 0xd9a8, 0x3022, 0xd9a9, 0x13c2, 0xd9aa, 0x2474, 0xd9ab, 0x3c84, 0xd9ac, 0x64d7, 0xd9ad, 0x64d7, 0xd9ae, 0x2ff5, 0xd9af, 0x3c05, 0xd9b0, 0x8757, 0xd9b1, 0xb886, 0xd9b2, 0x9767, 0xd9b3, 0x67c4, 0xd9b4, 0x6f72, 0xd9b5, 0x1002, 0xd9b6, 0x0000, }; int i, err; /* set uC clock and activate it */ err = set_phy_regs(phy, uCclock40MHz); msleep(500); if (err) return err; err = set_phy_regs(phy, uCclockActivate); msleep(500); if (err) return err; for (i = 0; i < ARRAY_SIZE(twinax_edc) && !err; i += 2) err = mdio_write(phy, MDIO_DEV_PMA_PMD, twinax_edc[i], twinax_edc[i + 1]); /* activate uC */ err = set_phy_regs(phy, uCactivate); if (!err) phy->priv = edc_twinax; return err; } /* * Return Module Type. */ static int ael2020_get_module_type(struct cphy *phy, int delay_ms) { int v; unsigned int stat; v = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2020_GPIO_STAT, &stat); if (v) return v; if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) { /* module absent */ return phy_modtype_none; } return ael2xxx_get_module_type(phy, delay_ms); } /* * Enable PHY interrupts. We enable "Module Detection" interrupts (on any * state transition) and then generic Link Alarm Status Interrupt (LASI). */ static int ael2020_intr_enable(struct cphy *phy) { struct reg_val regs[] = { { MDIO_DEV_PMA_PMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, 0xffff, 0x4 }, { MDIO_DEV_PMA_PMD, AEL2020_GPIO_CTRL, 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) }, { MDIO_DEV_PMA_PMD, AEL2020_GPIO_CTRL, 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) }, /* end */ { 0, 0, 0, 0 } }; int err; err = set_phy_regs(phy, regs); if (err) return err; /* enable standard Link Alarm Status Interrupts */ err = t3_phy_lasi_intr_enable(phy); if (err) return err; return 0; } /* * Disable PHY interrupts. The mirror of the above ... */ static int ael2020_intr_disable(struct cphy *phy) { struct reg_val regs[] = { { MDIO_DEV_PMA_PMD, AEL2020_GPIO_CTRL, 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, { MDIO_DEV_PMA_PMD, AEL2020_GPIO_CTRL, 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) }, /* end */ { 0, 0, 0, 0 } }; int err; err = set_phy_regs(phy, regs); if (err) return err; /* disable standard Link Alarm Status Interrupts */ return t3_phy_lasi_intr_disable(phy); } /* * Clear PHY interrupt state. */ static int ael2020_intr_clear(struct cphy *phy) { unsigned int stat; int err = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2020_GPIO_INTR, &stat); return err ? err : t3_phy_lasi_intr_clear(phy); } /* * Common register settings for the AEL2020 when it comes out of reset. */ static struct reg_val ael2020_reset_regs[] = { { MDIO_DEV_PMA_PMD, 0xc003, 0xffff, 0x3101 }, { MDIO_DEV_PMA_PMD, 0xcd40, 0xffff, 0x0001 }, { MDIO_DEV_PMA_PMD, 0xca12, 0xffff, 0x0100 }, { MDIO_DEV_PMA_PMD, 0xca22, 0xffff, 0x0100 }, { MDIO_DEV_PMA_PMD, 0xca42, 0xffff, 0x0100 }, { MDIO_DEV_PMA_PMD, 0xff02, 0xffff, 0x0023 }, { MDIO_DEV_PMA_PMD, 0xff03, 0xffff, 0x0000 }, { MDIO_DEV_PMA_PMD, 0xff04, 0xffff, 0x0000 }, { MDIO_DEV_PMA_PMD, 0xc20d, 0xffff, 0x0002 }, /* end */ { 0, 0, 0, 0 } }; /* * Reset the PHY and put it into a canonical operating state. */ static int ael2020_reset(struct cphy *phy, int wait) { int err; unsigned int lasi_ctrl; /* grab current interrupt state */ err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, &lasi_ctrl); if (err) return err; err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, 125); if (err) return err; msleep(100); /* basic initialization for all module types */ phy->priv = edc_none; err = set_phy_regs(phy, ael2020_reset_regs); if (err) return err; msleep(100); /* determine module type and perform appropriate initialization */ err = ael2020_get_module_type(phy, 0); if (err < 0) return err; phy->modtype = (u8)err; if (err == phy_modtype_none) err = 0; else if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) err = ael2020_setup_twinax_edc(phy, err); else err = ael2020_setup_sr_edc(phy); if (err) return err; /* reset wipes out interrupts, reenable them if they were on */ if (lasi_ctrl & 1) err = ael2020_intr_enable(phy); return err; } /* * Handle a PHY interrupt. */ static int ael2020_intr_handler(struct cphy *phy) { unsigned int stat; int ret, edc_needed, cause = 0; ret = mdio_read(phy, MDIO_DEV_PMA_PMD, AEL2020_GPIO_INTR, &stat); if (ret) return ret; if (stat & (0x1 << AEL2020_GPIO_MODDET)) { /* modules have max 300 ms init time after hot plug */ ret = ael2020_get_module_type(phy, 300); if (ret < 0) return ret; phy->modtype = (u8)ret; if (ret == phy_modtype_none) edc_needed = phy->priv; /* on unplug retain EDC */ else if (ret == phy_modtype_twinax || ret == phy_modtype_twinax_long) edc_needed = edc_twinax; else edc_needed = edc_sr; if (edc_needed != phy->priv) { ret = ael2020_reset(phy, 0); return ret ? ret : cphy_cause_module_change; } cause = cphy_cause_module_change; } ret = t3_phy_lasi_intr_handler(phy); if (ret < 0) return ret; ret |= cause; if (!ret) ret |= cphy_cause_link_change; return ret; } static struct cphy_ops ael2020_ops = { #ifdef C99_NOT_SUPPORTED ael2020_reset, ael2020_intr_enable, ael2020_intr_disable, ael2020_intr_clear, ael2020_intr_handler, NULL, NULL, NULL, NULL, NULL, get_link_status_r, ael1002_power_down, #else .reset = ael2020_reset, .intr_enable = ael2020_intr_enable, .intr_disable = ael2020_intr_disable, .intr_clear = ael2020_intr_clear, .intr_handler = ael2020_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, #endif }; int t3_ael2020_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { int err; struct cphy *phy = &pinfo->phy; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &ael2020_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | SUPPORTED_IRQ, "10GBASE-R"); msleep(125); err = set_phy_regs(phy, ael2020_reset_regs); if (err) return err; msleep(100); err = ael2020_get_module_type(phy, 0); if (err >= 0) phy->modtype = err; ael_laser_down(phy, 0); return 0; } /* * Get link status for a 10GBASE-X device. */ -static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, +static int get_link_status_x(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc) { - if (link_ok) { + if (link_state) { unsigned int stat0, stat1, stat2; int err = mdio_read(phy, MDIO_DEV_PMA_PMD, PMD_RSD, &stat0); if (!err) err = mdio_read(phy, MDIO_DEV_PCS, PCS_STAT1_X, &stat1); if (!err) err = mdio_read(phy, MDIO_DEV_XGXS, XS_LN_STAT, &stat2); if (err) return err; - *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1; + if ((stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1) + *link_state = PHY_LINK_UP; + else + *link_state = PHY_LINK_DOWN; } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops qt2045_ops = { ael1006_reset, t3_phy_lasi_intr_enable, t3_phy_lasi_intr_disable, t3_phy_lasi_intr_clear, t3_phy_lasi_intr_handler, NULL, NULL, NULL, NULL, NULL, get_link_status_x, ael1002_power_down, }; #else static struct cphy_ops qt2045_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, .intr_clear = t3_phy_lasi_intr_clear, .intr_handler = t3_phy_lasi_intr_handler, .get_link_status = get_link_status_x, .power_down = ael1002_power_down, }; #endif int t3_qt2045_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { unsigned int stat; struct cphy *phy = &pinfo->phy; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &qt2045_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, "10GBASE-CX4"); /* * Some cards where the PHY is supposed to be at address 0 actually * have it at 1. */ if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) && stat == 0xffff) phy->addr = 1; return 0; } static int xaui_direct_reset(struct cphy *phy, int wait) { return 0; } -static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, +static int xaui_direct_get_link_status(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc) { - if (link_ok) { + if (link_state) { unsigned int status; adapter_t *adapter = phy->adapter; status = t3_read_reg(adapter, XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) | t3_read_reg(adapter, XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) | t3_read_reg(adapter, XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) | t3_read_reg(adapter, XGM_REG(A_XGM_SERDES_STAT3, phy->addr)); - *link_ok = !(status & F_LOWSIG0); + *link_state = status & F_LOWSIG0 ? PHY_LINK_DOWN : PHY_LINK_UP; } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } static int xaui_direct_power_down(struct cphy *phy, int enable) { return 0; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops xaui_direct_ops = { xaui_direct_reset, ael1002_intr_noop, ael1002_intr_noop, ael1002_intr_noop, ael1002_intr_noop, NULL, NULL, NULL, NULL, NULL, xaui_direct_get_link_status, xaui_direct_power_down, }; #else static struct cphy_ops xaui_direct_ops = { .reset = xaui_direct_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, .intr_clear = ael1002_intr_noop, .intr_handler = ael1002_intr_noop, .get_link_status = xaui_direct_get_link_status, .power_down = xaui_direct_power_down, }; #endif int t3_xaui_direct_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(&pinfo->phy, pinfo->adapter, pinfo, phy_addr, &xaui_direct_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, "10GBASE-CX4"); return 0; } Index: stable/9/sys/dev/cxgb/common/cxgb_aq100x.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_aq100x.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_aq100x.c (revision 277344) @@ -1,542 +1,542 @@ /************************************************************************** Copyright (c) 2009 Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #undef msleep #define msleep t3_os_sleep enum { /* MDIO_DEV_PMA_PMD registers */ AQ_LINK_STAT = 0xe800, /* MDIO_DEV_XGXS registers */ AQ_XAUI_RX_CFG = 0xc400, AQ_XAUI_KX_CFG = 0xc440, AQ_XAUI_TX_CFG = 0xe400, /* MDIO_DEV_ANEG registers */ AQ_100M_CTRL = 0x0010, AQ_10G_CTRL = 0x0020, AQ_1G_CTRL = 0xc400, AQ_ANEG_STAT = 0xc800, /* MDIO_DEV_VEND1 registers */ AQ_FW_VERSION = 0x0020, AQ_THERMAL_THR = 0xc421, AQ_THERMAL1 = 0xc820, AQ_THERMAL2 = 0xc821, AQ_IFLAG_GLOBAL = 0xfc00, AQ_IMASK_GLOBAL = 0xff00, }; #define AQBIT(x) (1 << (0x##x)) #define ADV_1G_FULL AQBIT(f) #define ADV_1G_HALF AQBIT(e) #define ADV_10G_FULL AQBIT(c) #define AQ_WRITE_REGS(phy, regs) do { \ int i; \ for (i = 0; i < ARRAY_SIZE(regs); i++) { \ (void) mdio_write(phy, regs[i].mmd, regs[i].reg, regs[i].val); \ } \ } while (0) #define AQ_READ_REGS(phy, regs) do { \ unsigned i, v; \ for (i = 0; i < ARRAY_SIZE(regs); i++) { \ (void) mdio_read(phy, regs[i].mmd, regs[i].reg, &v); \ } \ } while (0) /* * Return value is temperature in celcius, 0xffff for error or don't know. */ static int aq100x_temperature(struct cphy *phy) { unsigned int v; if (mdio_read(phy, MDIO_DEV_VEND1, AQ_THERMAL2, &v) || v == 0xffff || (v & 1) != 1) return (0xffff); if (mdio_read(phy, MDIO_DEV_VEND1, AQ_THERMAL1, &v)) return (0xffff); return ((int)((signed char)(v >> 8))); } static int aq100x_set_defaults(struct cphy *phy) { return mdio_write(phy, MDIO_DEV_VEND1, AQ_THERMAL_THR, 0x6c00); } static int aq100x_reset(struct cphy *phy, int wait) { int err; err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait); if (!err) err = aq100x_set_defaults(phy); return (err); } static int aq100x_intr_enable(struct cphy *phy) { struct { int mmd; int reg; int val; } imasks[] = { {MDIO_DEV_VEND1, 0xd400, AQBIT(e)}, {MDIO_DEV_VEND1, 0xff01, AQBIT(2)}, {MDIO_DEV_VEND1, AQ_IMASK_GLOBAL, AQBIT(0)} }; AQ_WRITE_REGS(phy, imasks); return (0); } static int aq100x_intr_disable(struct cphy *phy) { struct { int mmd; int reg; int val; } imasks[] = { {MDIO_DEV_VEND1, 0xd400, 0}, {MDIO_DEV_VEND1, 0xff01, 0}, {MDIO_DEV_VEND1, AQ_IMASK_GLOBAL, 0} }; AQ_WRITE_REGS(phy, imasks); return (0); } static int aq100x_intr_clear(struct cphy *phy) { struct { int mmd; int reg; } iclr[] = { {MDIO_DEV_VEND1, 0xcc00}, {MDIO_DEV_VEND1, AQ_IMASK_GLOBAL} /* needed? */ }; AQ_READ_REGS(phy, iclr); return (0); } static int aq100x_vendor_intr(struct cphy *phy, int *rc) { int err; unsigned int cause, v; err = mdio_read(phy, MDIO_DEV_VEND1, 0xfc01, &cause); if (err) return (err); if (cause & AQBIT(2)) { err = mdio_read(phy, MDIO_DEV_VEND1, 0xcc00, &v); if (err) return (err); if (v & AQBIT(e)) { CH_WARN(phy->adapter, "PHY%d: temperature is now %dC\n", phy->addr, aq100x_temperature(phy)); t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, phy->addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL, 0); *rc |= cphy_cause_alarm; } cause &= ~4; } if (cause) CH_WARN(phy->adapter, "PHY%d: unhandled vendor interrupt" " (0x%x)\n", phy->addr, cause); return (0); } static int aq100x_intr_handler(struct cphy *phy) { int err, rc = 0; unsigned int cause; err = mdio_read(phy, MDIO_DEV_VEND1, AQ_IFLAG_GLOBAL, &cause); if (err) return (err); if (cause & AQBIT(0)) { err = aq100x_vendor_intr(phy, &rc); if (err) return (err); cause &= ~AQBIT(0); } if (cause) CH_WARN(phy->adapter, "PHY%d: unhandled interrupt (0x%x)\n", phy->addr, cause); return (rc); } static int aq100x_power_down(struct cphy *phy, int off) { int err, wait = 500; unsigned int v; err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, BMCR_PDOWN, off ? BMCR_PDOWN : 0); if (err || off) return (err); msleep(300); do { err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMCR, &v); if (err) return (err); v &= BMCR_RESET; if (v) msleep(10); } while (v && --wait); if (v) { CH_WARN(phy->adapter, "PHY%d: power-up timed out (0x%x).\n", phy->addr, v); return (ETIMEDOUT); } return (0); } static int aq100x_autoneg_enable(struct cphy *phy) { int err; err = aq100x_power_down(phy, 0); if (!err) err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, MII_BMCR, BMCR_RESET, BMCR_ANENABLE | BMCR_ANRESTART); return (err); } static int aq100x_autoneg_restart(struct cphy *phy) { return aq100x_autoneg_enable(phy); } static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map) { unsigned int adv; int err; /* 10G advertisement */ adv = 0; if (advertise_map & ADVERTISED_10000baseT_Full) adv |= ADV_10G_FULL; err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, AQ_10G_CTRL, ADV_10G_FULL, adv); if (err) return (err); /* 1G advertisement */ adv = 0; if (advertise_map & ADVERTISED_1000baseT_Full) adv |= ADV_1G_FULL; if (advertise_map & ADVERTISED_1000baseT_Half) adv |= ADV_1G_HALF; err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, AQ_1G_CTRL, ADV_1G_FULL | ADV_1G_HALF, adv); if (err) return (err); /* 100M, pause advertisement */ adv = 0; if (advertise_map & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise_map & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise_map & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise_map & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, AQ_100M_CTRL, 0xfe0, adv); return (err); } static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable) { return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, BMCR_LOOPBACK, enable ? BMCR_LOOPBACK : 0); } static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex) { int err, set; if (speed == SPEED_100) set = BMCR_SPEED100; else if (speed == SPEED_1000) set = BMCR_SPEED1000; else if (speed == SPEED_10000) set = BMCR_SPEED1000 | BMCR_SPEED100; else return (EINVAL); if (duplex != DUPLEX_FULL) return (EINVAL); err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART, 0); if (err) return (err); err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, BMCR_SPEED1000 | BMCR_SPEED100, set); if (err) return (err); return (0); } static int -aq100x_get_link_status(struct cphy *phy, int *link_ok, int *speed, int *duplex, +aq100x_get_link_status(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc) { int err; unsigned int v, link = 0; err = mdio_read(phy, MDIO_DEV_PMA_PMD, AQ_LINK_STAT, &v); if (err) return (err); if (v == 0xffff || !(v & 1)) goto done; err = mdio_read(phy, MDIO_DEV_ANEG, MII_BMCR, &v); if (err) return (err); if (v & 0x8000) goto done; if (v & BMCR_ANENABLE) { err = mdio_read(phy, MDIO_DEV_ANEG, 1, &v); if (err) return (err); if ((v & 0x20) == 0) goto done; err = mdio_read(phy, MDIO_DEV_ANEG, AQ_ANEG_STAT, &v); if (err) return (err); if (speed) { switch (v & 0x6) { case 0x6: *speed = SPEED_10000; break; case 0x4: *speed = SPEED_1000; break; case 0x2: *speed = SPEED_100; break; case 0x0: *speed = SPEED_10; break; } } if (duplex) *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF; if (fc) { unsigned int lpa, adv; err = mdio_read(phy, MDIO_DEV_ANEG, 0x13, &lpa); if (!err) err = mdio_read(phy, MDIO_DEV_ANEG, AQ_100M_CTRL, &adv); if (err) return err; if (lpa & adv & ADVERTISE_PAUSE_CAP) *fc = PAUSE_RX | PAUSE_TX; else if (lpa & ADVERTISE_PAUSE_CAP && lpa & ADVERTISE_PAUSE_ASYM && adv & ADVERTISE_PAUSE_ASYM) *fc = PAUSE_TX; else if (lpa & ADVERTISE_PAUSE_ASYM && adv & ADVERTISE_PAUSE_CAP) *fc = PAUSE_RX; else *fc = 0; } } else { err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMCR, &v); if (err) return (err); v &= BMCR_SPEED1000 | BMCR_SPEED100; if (speed) { if (v == (BMCR_SPEED1000 | BMCR_SPEED100)) *speed = SPEED_10000; else if (v == BMCR_SPEED1000) *speed = SPEED_1000; else if (v == BMCR_SPEED100) *speed = SPEED_100; else *speed = SPEED_10; } if (duplex) *duplex = DUPLEX_FULL; } link = 1; done: - if (link_ok) - *link_ok = link; + if (link_state) + *link_state = link ? PHY_LINK_UP : PHY_LINK_DOWN; return (0); } static struct cphy_ops aq100x_ops = { .reset = aq100x_reset, .intr_enable = aq100x_intr_enable, .intr_disable = aq100x_intr_disable, .intr_clear = aq100x_intr_clear, .intr_handler = aq100x_intr_handler, .autoneg_enable = aq100x_autoneg_enable, .autoneg_restart = aq100x_autoneg_restart, .advertise = aq100x_advertise, .set_loopback = aq100x_set_loopback, .set_speed_duplex = aq100x_set_speed_duplex, .get_link_status = aq100x_get_link_status, .power_down = aq100x_power_down, }; int t3_aq100x_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { struct cphy *phy = &pinfo->phy; unsigned int v, v2, gpio, wait; int err; adapter_t *adapter = pinfo->adapter; cphy_init(&pinfo->phy, adapter, pinfo, phy_addr, &aq100x_ops, mdio_ops, SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI | SUPPORTED_MISC_IRQ, "1000/10GBASE-T"); /* * Hard reset the PHY. */ gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL; t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0); msleep(1); t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio); /* * Give it enough time to load the firmware and get ready for mdio. */ msleep(1000); wait = 500; /* in 10ms increments */ do { err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMCR, &v); if (err || v == 0xffff) { /* Allow prep_adapter to succeed when ffff is read */ CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n", phy_addr, err, v); goto done; } v &= BMCR_RESET; if (v) msleep(10); } while (v && --wait); if (v) { CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n", phy_addr, v); goto done; /* let prep_adapter succeed */ } /* Firmware version check. */ (void) mdio_read(phy, MDIO_DEV_VEND1, AQ_FW_VERSION, &v); if (v < 0x115) CH_WARN(adapter, "PHY%d: unknown firmware %d.%d\n", phy_addr, v >> 8, v & 0xff); /* The PHY should start in really-low-power mode. */ (void) mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMCR, &v); if ((v & BMCR_PDOWN) == 0) CH_WARN(adapter, "PHY%d does not start in low power mode.\n", phy_addr); /* * Verify XAUI and 1000-X settings, but let prep succeed no matter what. */ v = v2 = 0; (void) mdio_read(phy, MDIO_DEV_XGXS, AQ_XAUI_RX_CFG, &v); (void) mdio_read(phy, MDIO_DEV_XGXS, AQ_XAUI_TX_CFG, &v2); if (v != 0x1b || v2 != 0x1b) CH_WARN(adapter, "PHY%d: incorrect XAUI settings " "(0x%x, 0x%x).\n", phy_addr, v, v2); v = 0; (void) mdio_read(phy, MDIO_DEV_XGXS, AQ_XAUI_KX_CFG, &v); if ((v & 0xf) != 0xf) CH_WARN(adapter, "PHY%d: incorrect 1000-X settings " "(0x%x).\n", phy_addr, v); (void) aq100x_set_defaults(phy); done: return (err); } Index: stable/9/sys/dev/cxgb/common/cxgb_common.h =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_common.h (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_common.h (revision 277344) @@ -1,859 +1,866 @@ /************************************************************************** Copyright (c) 2007-2009, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. $FreeBSD$ ***************************************************************************/ #ifndef __CHELSIO_COMMON_H #define __CHELSIO_COMMON_H #include enum { MAX_FRAME_SIZE = 10240, /* max MAC frame size, includes header + FCS */ EEPROMSIZE = 8192, /* Serial EEPROM size */ SERNUM_LEN = 16, /* Serial # length */ ECNUM_LEN = 16, /* EC # length */ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ NTX_SCHED = 8, /* # of HW Tx scheduling queues */ PROTO_SRAM_LINES = 128, /* size of protocol sram */ EXACT_ADDR_FILTERS = 8, /* # of HW exact match filters */ }; #define MAX_RX_COALESCING_LEN 12288U enum { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; enum { SUPPORTED_LINK_IRQ = 1 << 24, /* skip 25 */ SUPPORTED_MISC_IRQ = 1 << 26, SUPPORTED_IRQ = (SUPPORTED_LINK_IRQ | SUPPORTED_MISC_IRQ), }; enum { /* adapter interrupt-maintained statistics */ STAT_ULP_CH0_PBL_OOB, STAT_ULP_CH1_PBL_OOB, STAT_PCI_CORR_ECC, IRQ_NUM_STATS /* keep last */ }; enum { TP_VERSION_MAJOR = 1, TP_VERSION_MINOR = 1, TP_VERSION_MICRO = 0 }; #define S_TP_VERSION_MAJOR 16 #define M_TP_VERSION_MAJOR 0xFF #define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR) #define G_TP_VERSION_MAJOR(x) \ (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR) #define S_TP_VERSION_MINOR 8 #define M_TP_VERSION_MINOR 0xFF #define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR) #define G_TP_VERSION_MINOR(x) \ (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR) #define S_TP_VERSION_MICRO 0 #define M_TP_VERSION_MICRO 0xFF #define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO) #define G_TP_VERSION_MICRO(x) \ (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO) enum { FW_VERSION_MAJOR = 7, FW_VERSION_MINOR = 11, FW_VERSION_MICRO = 0 }; enum { LA_CTRL = 0x80, LA_DATA = 0x84, LA_ENTRIES = 512 }; enum { IOQ_ENTRIES = 7 }; enum { SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ }; enum sge_context_type { /* SGE egress context types */ SGE_CNTXT_RDMA = 0, SGE_CNTXT_ETH = 2, SGE_CNTXT_OFLD = 4, SGE_CNTXT_CTRL = 5 }; enum { AN_PKT_SIZE = 32, /* async notification packet size */ IMMED_PKT_SIZE = 48 /* packet size for immediate data */ }; struct sg_ent { /* SGE scatter/gather entry */ __be32 len[2]; __be64 addr[2]; }; #ifndef SGE_NUM_GENBITS /* Must be 1 or 2 */ # define SGE_NUM_GENBITS 2 #endif #define TX_DESC_FLITS 16U #define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS) #define MAX_PHYINTRS 4 struct cphy; struct mdio_ops { int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *val); int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val); }; struct adapter_info { unsigned char nports0; /* # of ports on channel 0 */ unsigned char nports1; /* # of ports on channel 1 */ unsigned char phy_base_addr; /* MDIO PHY base address */ unsigned int gpio_out; /* GPIO output settings */ unsigned char gpio_intr[MAX_PHYINTRS]; /* GPIO PHY IRQ pins */ unsigned long caps; /* adapter capabilities */ const struct mdio_ops *mdio_ops; /* MDIO operations */ const char *desc; /* product description */ }; struct mc5_stats { unsigned long parity_err; unsigned long active_rgn_full; unsigned long nfa_srch_err; unsigned long unknown_cmd; unsigned long reqq_parity_err; unsigned long dispq_parity_err; unsigned long del_act_empty; }; struct mc7_stats { unsigned long corr_err; unsigned long uncorr_err; unsigned long parity_err; unsigned long addr_err; }; struct mac_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_octets_bad; /* total # of octets in error frames */ u64 tx_frames; /* all good frames */ u64 tx_mcast_frames; /* good multicast frames */ u64 tx_bcast_frames; /* good broadcast frames */ u64 tx_pause; /* # of transmitted pause frames */ u64 tx_deferred; /* frames with deferred transmissions */ u64 tx_late_collisions; /* # of late collisions */ u64 tx_total_collisions; /* # of total collisions */ u64 tx_excess_collisions; /* frame errors from excessive collissions */ u64 tx_underrun; /* # of Tx FIFO underruns */ u64 tx_len_errs; /* # of Tx length errors */ u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */ u64 tx_excess_deferral; /* # of frames with excessive deferral */ u64 tx_fcs_errs; /* # of frames with bad FCS */ u64 tx_frames_64; /* # of Tx frames in a particular range */ u64 tx_frames_65_127; u64 tx_frames_128_255; u64 tx_frames_256_511; u64 tx_frames_512_1023; u64 tx_frames_1024_1518; u64 tx_frames_1519_max; u64 rx_octets; /* total # of octets in good frames */ u64 rx_octets_bad; /* total # of octets in error frames */ u64 rx_frames; /* all good frames */ u64 rx_mcast_frames; /* good multicast frames */ u64 rx_bcast_frames; /* good broadcast frames */ u64 rx_pause; /* # of received pause frames */ u64 rx_fcs_errs; /* # of received frames with bad FCS */ u64 rx_align_errs; /* alignment errors */ u64 rx_symbol_errs; /* symbol errors */ u64 rx_data_errs; /* data errors */ u64 rx_sequence_errs; /* sequence errors */ u64 rx_runt; /* # of runt frames */ u64 rx_jabber; /* # of jabber frames */ u64 rx_short; /* # of short frames */ u64 rx_too_long; /* # of oversized frames */ u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */ u64 rx_frames_64; /* # of Rx frames in a particular range */ u64 rx_frames_65_127; u64 rx_frames_128_255; u64 rx_frames_256_511; u64 rx_frames_512_1023; u64 rx_frames_1024_1518; u64 rx_frames_1519_max; u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */ unsigned long tx_fifo_parity_err; unsigned long rx_fifo_parity_err; unsigned long tx_fifo_urun; unsigned long rx_fifo_ovfl; unsigned long serdes_signal_loss; unsigned long xaui_pcs_ctc_err; unsigned long xaui_pcs_align_change; unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ unsigned long num_resets; /* # times reset due to stuck TX */ unsigned long link_faults; /* # detected link faults */ }; struct tp_mib_stats { u32 ipInReceive_hi; u32 ipInReceive_lo; u32 ipInHdrErrors_hi; u32 ipInHdrErrors_lo; u32 ipInAddrErrors_hi; u32 ipInAddrErrors_lo; u32 ipInUnknownProtos_hi; u32 ipInUnknownProtos_lo; u32 ipInDiscards_hi; u32 ipInDiscards_lo; u32 ipInDelivers_hi; u32 ipInDelivers_lo; u32 ipOutRequests_hi; u32 ipOutRequests_lo; u32 ipOutDiscards_hi; u32 ipOutDiscards_lo; u32 ipOutNoRoutes_hi; u32 ipOutNoRoutes_lo; u32 ipReasmTimeout; u32 ipReasmReqds; u32 ipReasmOKs; u32 ipReasmFails; u32 reserved[8]; u32 tcpActiveOpens; u32 tcpPassiveOpens; u32 tcpAttemptFails; u32 tcpEstabResets; u32 tcpOutRsts; u32 tcpCurrEstab; u32 tcpInSegs_hi; u32 tcpInSegs_lo; u32 tcpOutSegs_hi; u32 tcpOutSegs_lo; u32 tcpRetransSeg_hi; u32 tcpRetransSeg_lo; u32 tcpInErrs_hi; u32 tcpInErrs_lo; u32 tcpRtoMin; u32 tcpRtoMax; }; struct tp_params { unsigned int nchan; /* # of channels */ unsigned int pmrx_size; /* total PMRX capacity */ unsigned int pmtx_size; /* total PMTX capacity */ unsigned int cm_size; /* total CM capacity */ unsigned int chan_rx_size; /* per channel Rx size */ unsigned int chan_tx_size; /* per channel Tx size */ unsigned int rx_pg_size; /* Rx page size */ unsigned int tx_pg_size; /* Tx page size */ unsigned int rx_num_pgs; /* # of Rx pages */ unsigned int tx_num_pgs; /* # of Tx pages */ unsigned int ntimer_qs; /* # of timer queues */ unsigned int tre; /* log2 of core clocks per TP tick */ unsigned int dack_re; /* DACK timer resolution */ }; struct qset_params { /* SGE queue set parameters */ unsigned int polling; /* polling/interrupt service for rspq */ unsigned int lro; /* large receive offload */ unsigned int coalesce_usecs; /* irq coalescing timer */ unsigned int rspq_size; /* # of entries in response queue */ unsigned int fl_size; /* # of entries in regular free list */ unsigned int jumbo_size; /* # of entries in jumbo free list */ unsigned int jumbo_buf_size; /* buffer size of jumbo entry */ unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ unsigned int cong_thres; /* FL congestion threshold */ unsigned int vector; /* Interrupt (line or vector) number */ }; struct sge_params { unsigned int max_pkt_size; /* max offload pkt size */ struct qset_params qset[SGE_QSETS]; }; struct mc5_params { unsigned int mode; /* selects MC5 width */ unsigned int nservers; /* size of server region */ unsigned int nfilters; /* size of filter region */ unsigned int nroutes; /* size of routing region */ }; /* Default MC5 region sizes */ enum { DEFAULT_NSERVERS = 512, DEFAULT_NFILTERS = 128 }; /* MC5 modes, these must be non-0 */ enum { MC5_MODE_144_BIT = 1, MC5_MODE_72_BIT = 2 }; /* MC5 min active region size */ enum { MC5_MIN_TIDS = 16 }; struct vpd_params { unsigned int cclk; unsigned int mclk; unsigned int uclk; unsigned int mdc; unsigned int mem_timing; u8 sn[SERNUM_LEN + 1]; u8 ec[ECNUM_LEN + 1]; u8 eth_base[6]; u8 port_type[MAX_NPORTS]; unsigned short xauicfg[2]; }; struct generic_vpd { u32 offset; u32 len; u8 *data; }; enum { MAX_VPD_BYTES = 32000 }; struct pci_params { unsigned int vpd_cap_addr; unsigned int pcie_cap_addr; unsigned short speed; unsigned char width; unsigned char variant; }; enum { PCI_VARIANT_PCI, PCI_VARIANT_PCIX_MODE1_PARITY, PCI_VARIANT_PCIX_MODE1_ECC, PCI_VARIANT_PCIX_266_MODE2, PCI_VARIANT_PCIE }; struct adapter_params { struct sge_params sge; struct mc5_params mc5; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; const struct adapter_info *info; unsigned short mtus[NMTUS]; unsigned short a_wnd[NCCTRL_WIN]; unsigned short b_wnd[NCCTRL_WIN]; unsigned int nports; /* # of ethernet ports */ unsigned int chan_map; /* bitmap of in-use Tx channels */ unsigned int stats_update_period; /* MAC stats accumulation period */ unsigned int linkpoll_period; /* link poll period in 0.1s */ unsigned int rev; /* chip revision */ unsigned int offload; }; enum { /* chip revisions */ T3_REV_A = 0, T3_REV_B = 2, T3_REV_B2 = 3, T3_REV_C = 4, }; struct trace_params { u32 sip; u32 sip_mask; u32 dip; u32 dip_mask; u16 sport; u16 sport_mask; u16 dport; u16 dport_mask; u32 vlan:12; u32 vlan_mask:12; u32 intf:4; u32 intf_mask:4; u8 proto; u8 proto_mask; }; struct link_config { unsigned int supported; /* link capabilities */ unsigned int advertising; /* advertised capabilities */ unsigned short requested_speed; /* speed user has requested */ unsigned short speed; /* actual link speed */ unsigned char requested_duplex; /* duplex user has requested */ unsigned char duplex; /* actual link duplex */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ unsigned int link_ok; /* link up? */ }; #define SPEED_INVALID 0xffff #define DUPLEX_INVALID 0xff struct mc5 { adapter_t *adapter; unsigned int tcam_size; unsigned char part_type; unsigned char parity_enabled; unsigned char mode; struct mc5_stats stats; }; static inline unsigned int t3_mc5_size(const struct mc5 *p) { return p->tcam_size; } struct mc7 { adapter_t *adapter; /* backpointer to adapter */ unsigned int size; /* memory size in bytes */ unsigned int width; /* MC7 interface width */ unsigned int offset; /* register address offset for MC7 instance */ const char *name; /* name of MC7 instance */ struct mc7_stats stats; /* MC7 statistics */ }; static inline unsigned int t3_mc7_size(const struct mc7 *p) { return p->size; } struct cmac { adapter_t *adapter; unsigned int offset; unsigned char nucast; /* # of address filters for unicast MACs */ unsigned char multiport; /* multiple ports connected to this MAC */ unsigned char ext_port; /* external MAC port */ unsigned char promisc_map; /* which external ports are promiscuous */ unsigned int tx_tcnt; unsigned int tx_xcnt; u64 tx_mcnt; unsigned int rx_xcnt; unsigned int rx_ocnt; u64 rx_mcnt; unsigned int toggle_cnt; unsigned int txen; unsigned int was_reset; u64 rx_pause; struct mac_stats stats; }; enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2, MAC_RXFIFO_SIZE = 32768 }; /* IEEE 802.3 specified MDIO devices */ enum { MDIO_DEV_PMA_PMD = 1, MDIO_DEV_WIS = 2, MDIO_DEV_PCS = 3, MDIO_DEV_XGXS = 4, MDIO_DEV_ANEG = 7, MDIO_DEV_VEND1 = 30, MDIO_DEV_VEND2 = 31 }; /* LASI control and status registers */ enum { RX_ALARM_CTRL = 0x9000, TX_ALARM_CTRL = 0x9001, LASI_CTRL = 0x9002, RX_ALARM_STAT = 0x9003, TX_ALARM_STAT = 0x9004, LASI_STAT = 0x9005 }; /* PHY loopback direction */ enum { PHY_LOOPBACK_TX = 1, PHY_LOOPBACK_RX = 2 }; /* PHY interrupt types */ enum { cphy_cause_link_change = 1, cphy_cause_fifo_error = 2, cphy_cause_module_change = 4, cphy_cause_alarm = 8, }; /* PHY module types */ enum { phy_modtype_none, phy_modtype_sr, phy_modtype_lr, phy_modtype_lrm, phy_modtype_twinax, phy_modtype_twinax_long, phy_modtype_unknown }; +enum { + PHY_LINK_DOWN = 0, + PHY_LINK_UP, + PHY_LINK_PARTIAL +}; + /* PHY operations */ struct cphy_ops { int (*reset)(struct cphy *phy, int wait); int (*intr_enable)(struct cphy *phy); int (*intr_disable)(struct cphy *phy); int (*intr_clear)(struct cphy *phy); int (*intr_handler)(struct cphy *phy); int (*autoneg_enable)(struct cphy *phy); int (*autoneg_restart)(struct cphy *phy); int (*advertise)(struct cphy *phy, unsigned int advertise_map); int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable); int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); - int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, + int (*get_link_status)(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc); int (*power_down)(struct cphy *phy, int enable); }; /* A PHY instance */ struct cphy { u8 addr; /* PHY address */ u8 modtype; /* PHY module type */ + u8 rst; unsigned int priv; /* scratch pad */ unsigned int caps; /* PHY capabilities */ adapter_t *adapter; /* associated adapter */ pinfo_t *pinfo; /* associated port */ const char *desc; /* PHY description */ unsigned long fifo_errors; /* FIFO over/under-flows */ const struct cphy_ops *ops; /* PHY operations */ int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *val); int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val); }; /* Convenience MDIO read/write wrappers */ static inline int mdio_read(struct cphy *phy, int mmd, int reg, unsigned int *valp) { return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp); } static inline int mdio_write(struct cphy *phy, int mmd, int reg, unsigned int val) { return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val); } /* Convenience initializer */ static inline void cphy_init(struct cphy *phy, adapter_t *adapter, pinfo_t *pinfo, int phy_addr, struct cphy_ops *phy_ops, const struct mdio_ops *mdio_ops, unsigned int caps, const char *desc) { phy->addr = (u8)phy_addr; phy->caps = caps; phy->adapter = adapter; phy->pinfo = pinfo; phy->desc = desc; phy->ops = phy_ops; if (mdio_ops) { phy->mdio_read = mdio_ops->read; phy->mdio_write = mdio_ops->write; } } /* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */ #define MAC_STATS_ACCUM_SECS 180 /* The external MAC needs accumulation every 30 seconds */ #define VSC_STATS_ACCUM_SECS 30 #define XGM_REG(reg_addr, idx) \ ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR)) struct addr_val_pair { unsigned int reg_addr; unsigned int val; }; #include #ifndef PCI_VENDOR_ID_CHELSIO # define PCI_VENDOR_ID_CHELSIO 0x1425 #endif #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) #define adapter_info(adap) ((adap)->params.info) static inline int uses_xaui(const adapter_t *adap) { return adapter_info(adap)->caps & SUPPORTED_AUI; } static inline int is_10G(const adapter_t *adap) { return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full; } static inline int is_offload(const adapter_t *adap) { return adap->params.offload; } static inline unsigned int core_ticks_per_usec(const adapter_t *adap) { return adap->params.vpd.cclk / 1000; } static inline unsigned int dack_ticks_to_usec(const adapter_t *adap, unsigned int ticks) { return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap); } static inline unsigned int is_pcie(const adapter_t *adap) { return adap->params.pci.variant == PCI_VARIANT_PCIE; } void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val); void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n, unsigned int offset); int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp); static inline int t3_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, unsigned int set); int t3_phy_reset(struct cphy *phy, int mmd, int wait); int t3_phy_advertise(struct cphy *phy, unsigned int advert); int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert); int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex); int t3_phy_lasi_intr_enable(struct cphy *phy); int t3_phy_lasi_intr_disable(struct cphy *phy); int t3_phy_lasi_intr_clear(struct cphy *phy); int t3_phy_lasi_intr_handler(struct cphy *phy); void t3_intr_enable(adapter_t *adapter); void t3_intr_disable(adapter_t *adapter); void t3_intr_clear(adapter_t *adapter); void t3_xgm_intr_enable(adapter_t *adapter, int idx); void t3_xgm_intr_disable(adapter_t *adapter, int idx); void t3_port_intr_enable(adapter_t *adapter, int idx); void t3_port_intr_disable(adapter_t *adapter, int idx); void t3_port_intr_clear(adapter_t *adapter, int idx); int t3_slow_intr_handler(adapter_t *adapter); void t3_link_changed(adapter_t *adapter, int port_id); int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); const struct adapter_info *t3_get_adapter_info(unsigned int board_id); int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data); int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data); int t3_seeprom_wp(adapter_t *adapter, int enable); int t3_get_vpd_len(adapter_t *adapter, struct generic_vpd *vpd); int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd); int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t3_get_tp_version(adapter_t *adapter, u32 *vers); int t3_check_tpsram_version(adapter_t *adapter); int t3_check_tpsram(adapter_t *adapter, const u8 *tp_ram, unsigned int size); int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size); int t3_get_fw_version(adapter_t *adapter, u32 *vers); int t3_check_fw_version(adapter_t *adapter); int t3_load_boot(adapter_t *adapter, u8 *fw_data, unsigned int size); int t3_init_hw(adapter_t *adapter, u32 fw_params); void mac_prep(struct cmac *mac, adapter_t *adapter, int index); void early_hw_init(adapter_t *adapter, const struct adapter_info *ai); int t3_reset_adapter(adapter_t *adapter); int t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset); int t3_reinit_adapter(adapter_t *adap); void t3_led_ready(adapter_t *adapter); void t3_fatal_err(adapter_t *adapter); void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on); void t3_enable_filters(adapter_t *adap); void t3_disable_filters(adapter_t *adap); void t3_tp_set_offload_mode(adapter_t *adap, int enable); void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus, const u16 *rspq); int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map); int t3_set_proto_sram(adapter_t *adap, const u8 *data); int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask); void t3_port_failover(adapter_t *adapter, int port); void t3_failover_done(adapter_t *adapter, int port); void t3_failover_clear(adapter_t *adapter); int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n, unsigned int *valp); int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, u64 *buf); int t3_mac_init(struct cmac *mac); void t3b_pcs_reset(struct cmac *mac); void t3c_pcs_force_los(struct cmac *mac); void t3_mac_disable_exact_filters(struct cmac *mac); void t3_mac_enable_exact_filters(struct cmac *mac); int t3_mac_enable(struct cmac *mac, int which); int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm); int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n); const struct mac_stats *t3_mac_update_stats(struct cmac *mac); int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); int t3b2_mac_watchdog_task(struct cmac *mac); void t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode); int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes); void t3_mc5_intr_handler(struct mc5 *mc5); int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n, u32 *buf); int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh); void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size); void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps); void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS], unsigned short alpha[NCCTRL_WIN], unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap); void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]); void t3_get_cong_cntl_tab(adapter_t *adap, unsigned short incr[NMTUS][NCCTRL_WIN]); void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp, int filter_index, int invert, int enable); void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp, int filter_index, int *inverted, int *enabled); int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched); int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg); void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg); void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED]); void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals, unsigned int start, unsigned int n); int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index, u32 *size, void *data); int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data); void t3_sge_prep(adapter_t *adap, struct sge_params *p); void t3_sge_init(adapter_t *adap, struct sge_params *p); int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable, enum sge_context_type type, int respq, u64 base_addr, unsigned int size, unsigned int token, int gen, unsigned int cidx); int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable, u64 base_addr, unsigned int size, unsigned int esize, unsigned int cong_thres, int gen, unsigned int cidx); int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx, u64 base_addr, unsigned int size, unsigned int fl_thres, int gen, unsigned int cidx); int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr, unsigned int size, int rspq, int ovfl_mode, unsigned int credits, unsigned int credit_thres); int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable); int t3_sge_disable_fl(adapter_t *adapter, unsigned int id); int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id); int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id); int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]); int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]); int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]); int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]); int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op, unsigned int credits); int t3_elmr_blk_write(adapter_t *adap, int start, const u32 *vals, int n); int t3_elmr_blk_read(adapter_t *adap, int start, u32 *vals, int n); int t3_vsc7323_init(adapter_t *adap, int nports); int t3_vsc7323_set_speed_fc(adapter_t *adap, int speed, int fc, int port); int t3_vsc7323_set_mtu(adapter_t *adap, unsigned int mtu, int port); int t3_vsc7323_set_addr(adapter_t *adap, u8 addr[6], int port); int t3_vsc7323_enable(adapter_t *adap, int port, int which); int t3_vsc7323_disable(adapter_t *adap, int port, int which); const struct mac_stats *t3_vsc7323_update_stats(struct cmac *mac); int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp); int t3_i2c_write8(adapter_t *adapter, int chained, u8 val); int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *valp); int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val); int t3_mv88e1xxx_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_vsc8211_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_vsc8211_fifo_depth(adapter_t *adap, unsigned int mtu, int port); int t3_ael1002_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_ael1006_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_ael2005_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_ael2020_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_qt2045_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_tn1010_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_xaui_direct_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); int t3_aq100x_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops); #endif /* __CHELSIO_COMMON_H */ Index: stable/9/sys/dev/cxgb/common/cxgb_mv88e1xxx.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_mv88e1xxx.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_mv88e1xxx.c (revision 277344) @@ -1,316 +1,317 @@ /************************************************************************** Copyright (c) 2007, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include /* Marvell PHY interrupt status bits. */ #define MV_INTR_JABBER 0x0001 #define MV_INTR_POLARITY_CHNG 0x0002 #define MV_INTR_ENG_DETECT_CHNG 0x0010 #define MV_INTR_DOWNSHIFT 0x0020 #define MV_INTR_MDI_XOVER_CHNG 0x0040 #define MV_INTR_FIFO_OVER_UNDER 0x0080 #define MV_INTR_FALSE_CARRIER 0x0100 #define MV_INTR_SYMBOL_ERROR 0x0200 #define MV_INTR_LINK_CHNG 0x0400 #define MV_INTR_AUTONEG_DONE 0x0800 #define MV_INTR_PAGE_RECV 0x1000 #define MV_INTR_DUPLEX_CHNG 0x2000 #define MV_INTR_SPEED_CHNG 0x4000 #define MV_INTR_AUTONEG_ERR 0x8000 /* Marvell PHY specific registers. */ #define MV88E1XXX_SPECIFIC_CNTRL 16 #define MV88E1XXX_SPECIFIC_STATUS 17 #define MV88E1XXX_INTR_ENABLE 18 #define MV88E1XXX_INTR_STATUS 19 #define MV88E1XXX_EXT_SPECIFIC_CNTRL 20 #define MV88E1XXX_RECV_ERR 21 #define MV88E1XXX_EXT_ADDR 22 #define MV88E1XXX_GLOBAL_STATUS 23 #define MV88E1XXX_LED_CNTRL 24 #define MV88E1XXX_LED_OVERRIDE 25 #define MV88E1XXX_EXT_SPECIFIC_CNTRL2 26 #define MV88E1XXX_EXT_SPECIFIC_STATUS 27 #define MV88E1XXX_VIRTUAL_CABLE_TESTER 28 #define MV88E1XXX_EXTENDED_ADDR 29 #define MV88E1XXX_EXTENDED_DATA 30 /* PHY specific control register fields */ #define S_PSCR_MDI_XOVER_MODE 5 #define M_PSCR_MDI_XOVER_MODE 0x3 #define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) /* Extended PHY specific control register fields */ #define S_DOWNSHIFT_ENABLE 8 #define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) #define S_DOWNSHIFT_CNT 9 #define M_DOWNSHIFT_CNT 0x7 #define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) /* PHY specific status register fields */ #define S_PSSR_JABBER 0 #define V_PSSR_JABBER (1 << S_PSSR_JABBER) #define S_PSSR_POLARITY 1 #define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) #define S_PSSR_RX_PAUSE 2 #define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) #define S_PSSR_TX_PAUSE 3 #define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) #define S_PSSR_ENERGY_DETECT 4 #define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) #define S_PSSR_DOWNSHIFT_STATUS 5 #define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) #define S_PSSR_MDI 6 #define V_PSSR_MDI (1 << S_PSSR_MDI) #define S_PSSR_CABLE_LEN 7 #define M_PSSR_CABLE_LEN 0x7 #define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) #define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) #define S_PSSR_LINK 10 #define V_PSSR_LINK (1 << S_PSSR_LINK) #define S_PSSR_STATUS_RESOLVED 11 #define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) #define S_PSSR_PAGE_RECEIVED 12 #define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) #define S_PSSR_DUPLEX 13 #define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) #define S_PSSR_SPEED 14 #define M_PSSR_SPEED 0x3 #define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) #define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) /* MV88E1XXX MDI crossover register values */ #define CROSSOVER_MDI 0 #define CROSSOVER_MDIX 1 #define CROSSOVER_AUTO 3 #define INTR_ENABLE_MASK (MV_INTR_SPEED_CHNG | MV_INTR_DUPLEX_CHNG | \ MV_INTR_AUTONEG_DONE | MV_INTR_LINK_CHNG | MV_INTR_FIFO_OVER_UNDER | \ MV_INTR_ENG_DETECT_CHNG) /* * Reset the PHY. If 'wait' is set wait until the reset completes. */ static int mv88e1xxx_reset(struct cphy *cphy, int wait) { return t3_phy_reset(cphy, 0, wait); } static int mv88e1xxx_intr_enable(struct cphy *cphy) { return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, INTR_ENABLE_MASK); } static int mv88e1xxx_intr_disable(struct cphy *cphy) { return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, 0); } static int mv88e1xxx_intr_clear(struct cphy *cphy) { u32 val; /* Clear PHY interrupts by reading the register. */ return mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &val); } static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) { return t3_mdio_change_bits(cphy, 0, MV88E1XXX_SPECIFIC_CNTRL, V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE), V_PSCR_MDI_XOVER_MODE(crossover)); } static int mv88e1xxx_autoneg_enable(struct cphy *cphy) { mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); /* restart autoneg for change to take effect */ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, BMCR_ANENABLE | BMCR_ANRESTART); } static int mv88e1xxx_autoneg_restart(struct cphy *cphy) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, BMCR_ANRESTART); } static int mv88e1xxx_set_loopback(struct cphy *cphy, int mmd, int dir, int on) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_LOOPBACK, on ? BMCR_LOOPBACK : 0); } -static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, +static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_state, int *speed, int *duplex, int *fc) { u32 status; int sp = -1, dplx = -1, pause = 0; mdio_read(cphy, 0, MV88E1XXX_SPECIFIC_STATUS, &status); if ((status & V_PSSR_STATUS_RESOLVED) != 0) { if (status & V_PSSR_RX_PAUSE) pause |= PAUSE_RX; if (status & V_PSSR_TX_PAUSE) pause |= PAUSE_TX; dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; sp = G_PSSR_SPEED(status); if (sp == 0) sp = SPEED_10; else if (sp == 1) sp = SPEED_100; else sp = SPEED_1000; } - if (link_ok) - *link_ok = (status & V_PSSR_LINK) != 0; + if (link_state) + *link_state = status & V_PSSR_LINK ? PHY_LINK_UP : + PHY_LINK_DOWN; if (speed) *speed = sp; if (duplex) *duplex = dplx; if (fc) *fc = pause; return 0; } static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) { int err = t3_set_phy_speed_duplex(phy, speed, duplex); /* PHY needs reset for new settings to take effect */ if (!err) err = mv88e1xxx_reset(phy, 0); return err; } static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) { /* * Set the downshift counter to 2 so we try to establish Gb link * twice before downshifting. */ return t3_mdio_change_bits(cphy, 0, MV88E1XXX_EXT_SPECIFIC_CNTRL, V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT), downshift_enable ? V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2) : 0); } static int mv88e1xxx_power_down(struct cphy *cphy, int enable) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, enable ? BMCR_PDOWN : 0); } static int mv88e1xxx_intr_handler(struct cphy *cphy) { const u32 link_change_intrs = MV_INTR_LINK_CHNG | MV_INTR_AUTONEG_DONE | MV_INTR_DUPLEX_CHNG | MV_INTR_SPEED_CHNG | MV_INTR_DOWNSHIFT; u32 cause; int cphy_cause = 0; mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &cause); cause &= INTR_ENABLE_MASK; if (cause & link_change_intrs) cphy_cause |= cphy_cause_link_change; if (cause & MV_INTR_FIFO_OVER_UNDER) cphy_cause |= cphy_cause_fifo_error; return cphy_cause; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops mv88e1xxx_ops = { mv88e1xxx_reset, mv88e1xxx_intr_enable, mv88e1xxx_intr_disable, mv88e1xxx_intr_clear, mv88e1xxx_intr_handler, mv88e1xxx_autoneg_enable, mv88e1xxx_autoneg_restart, t3_phy_advertise, mv88e1xxx_set_loopback, mv88e1xxx_set_speed_duplex, mv88e1xxx_get_link_status, mv88e1xxx_power_down, }; #else static struct cphy_ops mv88e1xxx_ops = { .reset = mv88e1xxx_reset, .intr_enable = mv88e1xxx_intr_enable, .intr_disable = mv88e1xxx_intr_disable, .intr_clear = mv88e1xxx_intr_clear, .intr_handler = mv88e1xxx_intr_handler, .autoneg_enable = mv88e1xxx_autoneg_enable, .autoneg_restart = mv88e1xxx_autoneg_restart, .advertise = t3_phy_advertise, .set_loopback = mv88e1xxx_set_loopback, .set_speed_duplex = mv88e1xxx_set_speed_duplex, .get_link_status = mv88e1xxx_get_link_status, .power_down = mv88e1xxx_power_down, }; #endif int t3_mv88e1xxx_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { struct cphy *phy = &pinfo->phy; int err; cphy_init(phy, pinfo->adapter, pinfo, phy_addr, &mv88e1xxx_ops, mdio_ops, SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); /* Configure copper PHY transmitter as class A to reduce EMI. */ err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb); if (!err) err = mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004); if (!err) err = mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */ return err; } Index: stable/9/sys/dev/cxgb/common/cxgb_t3_hw.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_t3_hw.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_t3_hw.c (revision 277344) @@ -1,4791 +1,4798 @@ /************************************************************************** Copyright (c) 2007-2009, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #undef msleep #define msleep t3_os_sleep /** * t3_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t3_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } /** * t3_write_regs - write a bunch of registers * @adapter: the adapter to program * @p: an array of register address/register value pairs * @n: the number of address/value pairs * @offset: register address offset * * Takes an array of register address/register value pairs and writes each * value to the corresponding register. Register addresses are adjusted * by the supplied offset. */ void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n, unsigned int offset) { while (n--) { t3_write_reg(adapter, p->reg_addr + offset, p->val); p++; } } /** * t3_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t3_read_reg(adapter, addr) & ~mask; t3_write_reg(adapter, addr, v | val); (void) t3_read_reg(adapter, addr); /* flush */ } /** * t3_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @start_idx: index of first indirect register to read * @nregs: how many indirect registers to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t3_write_reg(adap, addr_reg, start_idx); *vals++ = t3_read_reg(adap, data_reg); start_idx++; } } /** * t3_mc7_bd_read - read from MC7 through backdoor accesses * @mc7: identifies MC7 to read from * @start: index of first 64-bit word to read * @n: number of 64-bit words to read * @buf: where to store the read result * * Read n 64-bit words from MC7 starting at word start, using backdoor * accesses. */ int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, u64 *buf) { static int shift[] = { 0, 0, 16, 24 }; static int step[] = { 0, 32, 16, 8 }; unsigned int size64 = mc7->size / 8; /* # of 64-bit words */ adapter_t *adap = mc7->adapter; if (start >= size64 || start + n > size64) return -EINVAL; start *= (8 << mc7->width); while (n--) { int i; u64 val64 = 0; for (i = (1 << mc7->width) - 1; i >= 0; --i) { int attempts = 10; u32 val; t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start); t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0); val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); while ((val & F_BUSY) && attempts--) val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); if (val & F_BUSY) return -EIO; val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1); if (mc7->width == 0) { val64 = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA0); val64 |= (u64)val << 32; } else { if (mc7->width > 1) val >>= shift[mc7->width]; val64 |= (u64)val << (step[mc7->width] * i); } start += 8; } *buf++ = val64; } return 0; } /* * Low-level I2C read and write routines. These simply read and write a * single byte with the option of indicating a "continue" if another operation * is to be chained. Generally most code will use higher-level routines to * read and write to I2C Slave Devices. */ #define I2C_ATTEMPTS 100 /* * Read an 8-bit value from the I2C bus. If the "chained" parameter is * non-zero then a STOP bit will not be written after the read command. On * error (the read timed out, etc.), a negative errno will be returned (e.g. * -EAGAIN, etc.). On success, the 8-bit value read from the I2C bus is * stored into the buffer *valp and the value of the I2C ACK bit is returned * as a 0/1 value. */ int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp) { int ret; u32 opval; MDIO_LOCK(adapter); t3_write_reg(adapter, A_I2C_OP, F_I2C_READ | (chained ? F_I2C_CONT : 0)); ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0, I2C_ATTEMPTS, 10, &opval); if (ret >= 0) { ret = ((opval & F_I2C_ACK) == F_I2C_ACK); *valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA)); } MDIO_UNLOCK(adapter); return ret; } /* * Write an 8-bit value to the I2C bus. If the "chained" parameter is * non-zero, then a STOP bit will not be written after the write command. On * error (the write timed out, etc.), a negative errno will be returned (e.g. * -EAGAIN, etc.). On success, the value of the I2C ACK bit is returned as a * 0/1 value. */ int t3_i2c_write8(adapter_t *adapter, int chained, u8 val) { int ret; u32 opval; MDIO_LOCK(adapter); t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val)); t3_write_reg(adapter, A_I2C_OP, F_I2C_WRITE | (chained ? F_I2C_CONT : 0)); ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0, I2C_ATTEMPTS, 10, &opval); if (ret >= 0) ret = ((opval & F_I2C_ACK) == F_I2C_ACK); MDIO_UNLOCK(adapter); return ret; } /* * Initialize MI1. */ static void mi1_init(adapter_t *adap, const struct adapter_info *ai) { u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1; u32 val = F_PREEN | V_CLKDIV(clkdiv); t3_write_reg(adap, A_MI1_CFG, val); } #define MDIO_ATTEMPTS 20 /* * MI1 read/write operations for clause 22 PHYs. */ int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *valp) { int ret; u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); if (mmd_addr) return -EINVAL; MDIO_LOCK(adapter); t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); t3_write_reg(adapter, A_MI1_ADDR, addr); t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); if (!ret) *valp = t3_read_reg(adapter, A_MI1_DATA); MDIO_UNLOCK(adapter); return ret; } int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val) { int ret; u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); if (mmd_addr) return -EINVAL; MDIO_LOCK(adapter); t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); t3_write_reg(adapter, A_MI1_ADDR, addr); t3_write_reg(adapter, A_MI1_DATA, val); t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); MDIO_UNLOCK(adapter); return ret; } static struct mdio_ops mi1_mdio_ops = { t3_mi1_read, t3_mi1_write }; /* * MI1 read/write operations for clause 45 PHYs. */ static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int *valp) { int ret; u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); MDIO_LOCK(adapter); t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); t3_write_reg(adapter, A_MI1_ADDR, addr); t3_write_reg(adapter, A_MI1_DATA, reg_addr); t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); if (!ret) { t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); if (!ret) *valp = t3_read_reg(adapter, A_MI1_DATA); } MDIO_UNLOCK(adapter); return ret; } static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, int reg_addr, unsigned int val) { int ret; u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); MDIO_LOCK(adapter); t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); t3_write_reg(adapter, A_MI1_ADDR, addr); t3_write_reg(adapter, A_MI1_DATA, reg_addr); t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); if (!ret) { t3_write_reg(adapter, A_MI1_DATA, val); t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); } MDIO_UNLOCK(adapter); return ret; } static struct mdio_ops mi1_mdio_ext_ops = { mi1_ext_read, mi1_ext_write }; /** * t3_mdio_change_bits - modify the value of a PHY register * @phy: the PHY to operate on * @mmd: the device address * @reg: the register address * @clear: what part of the register value to mask off * @set: what part of the register value to set * * Changes the value of a PHY register by applying a mask to its current * value and ORing the result with a new value. */ int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, unsigned int set) { int ret; unsigned int val; ret = mdio_read(phy, mmd, reg, &val); if (!ret) { val &= ~clear; ret = mdio_write(phy, mmd, reg, val | set); } return ret; } /** * t3_phy_reset - reset a PHY block * @phy: the PHY to operate on * @mmd: the device address of the PHY block to reset * @wait: how long to wait for the reset to complete in 1ms increments * * Resets a PHY block and optionally waits for the reset to complete. * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset * for 10G PHYs. */ int t3_phy_reset(struct cphy *phy, int mmd, int wait) { int err; unsigned int ctl; err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET); if (err || !wait) return err; do { err = mdio_read(phy, mmd, MII_BMCR, &ctl); if (err) return err; ctl &= BMCR_RESET; if (ctl) msleep(1); } while (ctl && --wait); return ctl ? -1 : 0; } /** * t3_phy_advertise - set the PHY advertisement registers for autoneg * @phy: the PHY to operate on * @advert: bitmap of capabilities the PHY should advertise * * Sets a 10/100/1000 PHY's advertisement registers to advertise the * requested capabilities. */ int t3_phy_advertise(struct cphy *phy, unsigned int advert) { int err; unsigned int val = 0; err = mdio_read(phy, 0, MII_CTRL1000, &val); if (err) return err; val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); if (advert & ADVERTISED_1000baseT_Half) val |= ADVERTISE_1000HALF; if (advert & ADVERTISED_1000baseT_Full) val |= ADVERTISE_1000FULL; err = mdio_write(phy, 0, MII_CTRL1000, val); if (err) return err; val = 1; if (advert & ADVERTISED_10baseT_Half) val |= ADVERTISE_10HALF; if (advert & ADVERTISED_10baseT_Full) val |= ADVERTISE_10FULL; if (advert & ADVERTISED_100baseT_Half) val |= ADVERTISE_100HALF; if (advert & ADVERTISED_100baseT_Full) val |= ADVERTISE_100FULL; if (advert & ADVERTISED_Pause) val |= ADVERTISE_PAUSE_CAP; if (advert & ADVERTISED_Asym_Pause) val |= ADVERTISE_PAUSE_ASYM; return mdio_write(phy, 0, MII_ADVERTISE, val); } /** * t3_phy_advertise_fiber - set fiber PHY advertisement register * @phy: the PHY to operate on * @advert: bitmap of capabilities the PHY should advertise * * Sets a fiber PHY's advertisement register to advertise the * requested capabilities. */ int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) { unsigned int val = 0; if (advert & ADVERTISED_1000baseT_Half) val |= ADVERTISE_1000XHALF; if (advert & ADVERTISED_1000baseT_Full) val |= ADVERTISE_1000XFULL; if (advert & ADVERTISED_Pause) val |= ADVERTISE_1000XPAUSE; if (advert & ADVERTISED_Asym_Pause) val |= ADVERTISE_1000XPSE_ASYM; return mdio_write(phy, 0, MII_ADVERTISE, val); } /** * t3_set_phy_speed_duplex - force PHY speed and duplex * @phy: the PHY to operate on * @speed: requested PHY speed * @duplex: requested PHY duplex * * Force a 10/100/1000 PHY's speed and duplex. This also disables * auto-negotiation except for GigE, where auto-negotiation is mandatory. */ int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex) { int err; unsigned int ctl; err = mdio_read(phy, 0, MII_BMCR, &ctl); if (err) return err; if (speed >= 0) { ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); if (speed == SPEED_100) ctl |= BMCR_SPEED100; else if (speed == SPEED_1000) ctl |= BMCR_SPEED1000; } if (duplex >= 0) { ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); if (duplex == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; } if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */ ctl |= BMCR_ANENABLE; return mdio_write(phy, 0, MII_BMCR, ctl); } int t3_phy_lasi_intr_enable(struct cphy *phy) { return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1); } int t3_phy_lasi_intr_disable(struct cphy *phy) { return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0); } int t3_phy_lasi_intr_clear(struct cphy *phy) { u32 val; return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val); } int t3_phy_lasi_intr_handler(struct cphy *phy) { unsigned int status; int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status); if (err) return err; return (status & 1) ? cphy_cause_link_change : 0; } static struct adapter_info t3_adap_info[] = { { 1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio PE9000" }, { 1, 1, 0, F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, &mi1_mdio_ops, "Chelsio T302" }, { 1, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310" }, { 1, 1, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T320" }, { 4, 0, 0, F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL, { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI, &mi1_mdio_ops, "Chelsio T304" }, { 0 }, { 1, 0, 0, F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio T310" }, { 1, 0, 0, F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL, { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, &mi1_mdio_ext_ops, "Chelsio N320E-G2" }, }; /* * Return the adapter_info structure with a given index. Out-of-range indices * return NULL. */ const struct adapter_info *t3_get_adapter_info(unsigned int id) { return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; } struct port_type_info { int (*phy_prep)(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *ops); }; static struct port_type_info port_types[] = { { NULL }, { t3_ael1002_phy_prep }, { t3_vsc8211_phy_prep }, { t3_mv88e1xxx_phy_prep }, { t3_xaui_direct_phy_prep }, { t3_ael2005_phy_prep }, { t3_qt2045_phy_prep }, { t3_ael1006_phy_prep }, { t3_tn1010_phy_prep }, { t3_aq100x_phy_prep }, { t3_ael2020_phy_prep }, }; #define VPD_ENTRY(name, len) \ u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] /* * Partial EEPROM Vital Product Data structure. Includes only the ID and * VPD-R sections. */ struct t3_vpd { u8 id_tag; u8 id_len[2]; u8 id_data[16]; u8 vpdr_tag; u8 vpdr_len[2]; VPD_ENTRY(pn, 16); /* part number */ VPD_ENTRY(ec, ECNUM_LEN); /* EC level */ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ VPD_ENTRY(na, 12); /* MAC address base */ VPD_ENTRY(cclk, 6); /* core clock */ VPD_ENTRY(mclk, 6); /* mem clock */ VPD_ENTRY(uclk, 6); /* uP clk */ VPD_ENTRY(mdc, 6); /* MDIO clk */ VPD_ENTRY(mt, 2); /* mem timing */ VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */ VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */ VPD_ENTRY(port0, 2); /* PHY0 complex */ VPD_ENTRY(port1, 2); /* PHY1 complex */ VPD_ENTRY(port2, 2); /* PHY2 complex */ VPD_ENTRY(port3, 2); /* PHY3 complex */ VPD_ENTRY(rv, 1); /* csum */ u32 pad; /* for multiple-of-4 sizing and alignment */ }; #define EEPROM_MAX_POLL 40 #define EEPROM_STAT_ADDR 0x4000 #define VPD_BASE 0xc00 /** * t3_seeprom_read - read a VPD EEPROM location * @adapter: adapter to read * @addr: EEPROM address * @data: where to store the read data * * Read a 32-bit word from a location in VPD EEPROM using the card's PCI * VPD ROM capability. A zero is written to the flag bit when the * addres is written to the control register. The hardware device will * set the flag to 1 when 4 bytes have been read into the data register. */ int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data) { u16 val; int attempts = EEPROM_MAX_POLL; unsigned int base = adapter->params.pci.vpd_cap_addr; if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) return -EINVAL; t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr); do { udelay(10); t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val); } while (!(val & PCI_VPD_ADDR_F) && --attempts); if (!(val & PCI_VPD_ADDR_F)) { CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); return -EIO; } t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data); *data = le32_to_cpu(*data); return 0; } /** * t3_seeprom_write - write a VPD EEPROM location * @adapter: adapter to write * @addr: EEPROM address * @data: value to write * * Write a 32-bit word to a location in VPD EEPROM using the card's PCI * VPD ROM capability. */ int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data) { u16 val; int attempts = EEPROM_MAX_POLL; unsigned int base = adapter->params.pci.vpd_cap_addr; if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) return -EINVAL; t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA, cpu_to_le32(data)); t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr | PCI_VPD_ADDR_F); do { msleep(1); t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val); } while ((val & PCI_VPD_ADDR_F) && --attempts); if (val & PCI_VPD_ADDR_F) { CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); return -EIO; } return 0; } /** * t3_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: 1 to enable write protection, 0 to disable it * * Enables or disables write protection on the serial EEPROM. */ int t3_seeprom_wp(adapter_t *adapter, int enable) { return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } /* * Convert a character holding a hex digit to a number. */ static unsigned int hex2int(unsigned char c) { return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; } /** * get_desc_len - get the length of a vpd descriptor. * @adapter: the adapter * @offset: first byte offset of the vpd descriptor * * Retrieves the length of the small/large resource * data type starting at offset. */ static int get_desc_len(adapter_t *adapter, u32 offset) { u32 read_offset, tmp, shift, len = 0; u8 tag, buf[8]; int ret; read_offset = offset & 0xfffffffc; shift = offset & 0x03; ret = t3_seeprom_read(adapter, read_offset, &tmp); if (ret < 0) return ret; *((u32 *)buf) = cpu_to_le32(tmp); tag = buf[shift]; if (tag & 0x80) { ret = t3_seeprom_read(adapter, read_offset + 4, &tmp); if (ret < 0) return ret; *((u32 *)(&buf[4])) = cpu_to_le32(tmp); len = (buf[shift + 1] & 0xff) + ((buf[shift+2] << 8) & 0xff00) + 3; } else len = (tag & 0x07) + 1; return len; } /** * is_end_tag - Check if a vpd tag is the end tag. * @adapter: the adapter * @offset: first byte offset of the tag * * Checks if the tag located at offset is the end tag. */ static int is_end_tag(adapter_t * adapter, u32 offset) { u32 read_offset, shift, ret, tmp; u8 buf[4]; read_offset = offset & 0xfffffffc; shift = offset & 0x03; ret = t3_seeprom_read(adapter, read_offset, &tmp); if (ret) return ret; *((u32 *)buf) = cpu_to_le32(tmp); if (buf[shift] == 0x78) return 1; else return 0; } /** * t3_get_vpd_len - computes the length of a vpd structure * @adapter: the adapter * @vpd: contains the offset of first byte of vpd * * Computes the lentgh of the vpd structure starting at vpd->offset. */ int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd) { u32 len=0, offset; int inc, ret; offset = vpd->offset; while (offset < (vpd->offset + MAX_VPD_BYTES)) { ret = is_end_tag(adapter, offset); if (ret < 0) return ret; else if (ret == 1) break; inc = get_desc_len(adapter, offset); if (inc < 0) return inc; len += inc; offset += inc; } return (len + 1); } /** * t3_read_vpd - reads the stream of bytes containing a vpd structure * @adapter: the adapter * @vpd: contains a buffer that would hold the stream of bytes * * Reads the vpd structure starting at vpd->offset into vpd->data, * the length of the byte stream to read is vpd->len. */ int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd) { u32 i, ret; for (i = 0; i < vpd->len; i += 4) { ret = t3_seeprom_read(adapter, vpd->offset + i, (u32 *) &(vpd->data[i])); if (ret) return ret; } return 0; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ static int get_vpd_params(adapter_t *adapter, struct vpd_params *p) { int i, addr, ret; struct t3_vpd vpd; /* * Card information is normally at VPD_BASE but some early cards had * it at 0. */ ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd); if (ret) return ret; addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; for (i = 0; i < sizeof(vpd); i += 4) { ret = t3_seeprom_read(adapter, addr + i, (u32 *)((u8 *)&vpd + i)); if (ret) return ret; } p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); memcpy(p->sn, vpd.sn_data, SERNUM_LEN); memcpy(p->ec, vpd.ec_data, ECNUM_LEN); /* Old eeproms didn't have port information */ if (adapter->params.rev == 0 && !vpd.port0_data[0]) { p->port_type[0] = uses_xaui(adapter) ? 1 : 2; p->port_type[1] = uses_xaui(adapter) ? 6 : 2; } else { p->port_type[0] = (u8)hex2int(vpd.port0_data[0]); p->port_type[1] = (u8)hex2int(vpd.port1_data[0]); p->port_type[2] = (u8)hex2int(vpd.port2_data[0]); p->port_type[3] = (u8)hex2int(vpd.port3_data[0]); p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); } for (i = 0; i < 6; i++) p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 + hex2int(vpd.na_data[2 * i + 1]); return 0; } /* BIOS boot header */ typedef struct boot_header_s { u8 signature[2]; /* signature */ u8 length; /* image length (include header) */ u8 offset[4]; /* initialization vector */ u8 reserved[19]; /* reserved */ u8 exheader[2]; /* offset to expansion header */ } boot_header_t; /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 5, /* max retries for SF1 operations */ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */ FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */ FW_MIN_SIZE = 8, /* at least version and csum */ FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR, FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR, BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */ BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment */ }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); if (!ret) *valp = t3_read_reg(adapter, A_SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) return -EBUSY; t3_write_reg(adapter, A_SF_DATA, val); t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(adapter_t *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t3_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianess. */ int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, data); if (ret) return ret; if (byte_oriented) *data = htonl(*data); } return 0; } /** * t3_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write * @data: the data to write * @byte_oriented: whether to store data as bytes or as words * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. * If @byte_oriented is set the write data is stored as a 32-bit * big-endian array, otherwise in the processor's native endianess. * */ static int t3_write_flash(adapter_t *adapter, unsigned int addr, unsigned int n, const u8 *data, int byte_oriented) { int ret; u32 buf[64]; unsigned int c, left, val, offset = addr & 0xff; if (addr + n > SF_SIZE || offset + n > 256) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, val)) != 0) return ret; for (left = n; left; left -= c) { c = min(left, 4U); val = *(const u32*)data; data += c; if (byte_oriented) val = htonl(val); ret = sf1_write(adapter, c, c != left, val); if (ret) return ret; } if ((ret = flash_wait_op(adapter, 5, 1)) != 0) return ret; /* Read the page to verify the write succeeded */ ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, byte_oriented); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) return -EIO; return 0; } /** * t3_get_tp_version - read the tp sram version * @adapter: the adapter * @vers: where to place the version * * Reads the protocol sram version from sram. */ int t3_get_tp_version(adapter_t *adapter, u32 *vers) { int ret; /* Get version loaded in SRAM */ t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1); if (ret) return ret; *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); return 0; } /** * t3_check_tpsram_version - read the tp sram version * @adapter: the adapter * */ int t3_check_tpsram_version(adapter_t *adapter) { int ret; u32 vers; unsigned int major, minor; if (adapter->params.rev == T3_REV_A) return 0; ret = t3_get_tp_version(adapter, &vers); if (ret) return ret; vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); major = G_TP_VERSION_MAJOR(vers); minor = G_TP_VERSION_MINOR(vers); if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) return 0; else { CH_ERR(adapter, "found wrong TP version (%u.%u), " "driver compiled for version %d.%d\n", major, minor, TP_VERSION_MAJOR, TP_VERSION_MINOR); } return -EINVAL; } /** * t3_check_tpsram - check if provided protocol SRAM * is compatible with this driver * @adapter: the adapter * @tp_sram: the firmware image to write * @size: image size * * Checks if an adapter's tp sram is compatible with the driver. * Returns 0 if the versions are compatible, a negative error otherwise. */ int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size) { u32 csum; unsigned int i; const u32 *p = (const u32 *)tp_sram; /* Verify checksum */ for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); if (csum != 0xffffffff) { CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n", csum); return -EINVAL; } return 0; } enum fw_version_type { FW_VERSION_N3, FW_VERSION_T3 }; /** * t3_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. Note that we had to move the version * due to FW size. If we don't find a valid FW version in the new location * we fall back and read the old location. */ int t3_get_fw_version(adapter_t *adapter, u32 *vers) { int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0); if (!ret && *vers != 0xffffffff) return 0; else return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0); } /** * t3_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter * * Checks if an adapter's FW is compatible with the driver. Returns 0 * if the versions are compatible, a negative error otherwise. */ int t3_check_fw_version(adapter_t *adapter) { int ret; u32 vers; unsigned int type, major, minor; ret = t3_get_fw_version(adapter, &vers); if (ret) return ret; type = G_FW_VERSION_TYPE(vers); major = G_FW_VERSION_MAJOR(vers); minor = G_FW_VERSION_MINOR(vers); if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && minor == FW_VERSION_MINOR) return 0; else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR) CH_WARN(adapter, "found old FW minor version(%u.%u), " "driver compiled for version %u.%u\n", major, minor, FW_VERSION_MAJOR, FW_VERSION_MINOR); else { CH_WARN(adapter, "found newer FW version(%u.%u), " "driver compiled for version %u.%u\n", major, minor, FW_VERSION_MAJOR, FW_VERSION_MINOR); return 0; } return -EINVAL; } /** * t3_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given range. */ static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end) { while (start <= end) { int ret; if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 5, 500)) != 0) return ret; start++; } return 0; } /* * t3_load_fw - download firmware * @adapter: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. * The FW image has the following sections: @size - 8 bytes of code and * data, followed by 4 bytes of FW version, followed by the 32-bit * 1's complement checksum of the whole image. */ int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size) { u32 version, csum, fw_version_addr; unsigned int i; const u32 *p = (const u32 *)fw_data; int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; if ((size & 3) || size < FW_MIN_SIZE) return -EINVAL; if (size - 8 > FW_MAX_SIZE) return -EFBIG; version = ntohl(*(const u32 *)(fw_data + size - 8)); if (G_FW_VERSION_MAJOR(version) < 8) { fw_version_addr = FW_VERS_ADDR_PRE8; if (size - 8 > FW_MAX_SIZE_PRE8) return -EFBIG; } else fw_version_addr = FW_VERS_ADDR; for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); if (csum != 0xffffffff) { CH_ERR(adapter, "corrupted firmware image, checksum %u\n", csum); return -EINVAL; } ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector); if (ret) goto out; size -= 8; /* trim off version and checksum */ for (addr = FW_FLASH_BOOT_ADDR; size; ) { unsigned int chunk_size = min(size, 256U); ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1); if (ret) goto out; addr += chunk_size; fw_data += chunk_size; size -= chunk_size; } ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1); out: if (ret) CH_ERR(adapter, "firmware download failed, error %d\n", ret); return ret; } /* * t3_load_boot - download boot flash * @adapter: the adapter * @boot_data: the boot image to write * @size: image size * * Write the supplied boot image to the card's serial flash. * The boot image has the following sections: a 28-byte header and the * boot image. */ int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size) { boot_header_t *header = (boot_header_t *)boot_data; int ret; unsigned int addr; unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16; unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16; /* * Perform some primitive sanity testing to avoid accidentally * writing garbage over the boot sectors. We ought to check for * more but it's not worth it for now ... */ if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { CH_ERR(adapter, "boot image too small/large\n"); return -EFBIG; } if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) { CH_ERR(adapter, "boot image missing signature\n"); return -EINVAL; } if (header->length * BOOT_SIZE_INC != size) { CH_ERR(adapter, "boot image header length != image length\n"); return -EINVAL; } ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end); if (ret) goto out; for (addr = BOOT_FLASH_BOOT_ADDR; size; ) { unsigned int chunk_size = min(size, 256U); ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0); if (ret) goto out; addr += chunk_size; boot_data += chunk_size; size -= chunk_size; } out: if (ret) CH_ERR(adapter, "boot image download failed, error %d\n", ret); return ret; } #define CIM_CTL_BASE 0x2000 /** * t3_cim_ctl_blk_read - read a block from CIM control region * @adap: the adapter * @addr: the start address within the CIM control region * @n: number of words to read * @valp: where to store the result * * Reads a block of 4-byte words from the CIM control region. */ int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n, unsigned int *valp) { int ret = 0; if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) return -EBUSY; for ( ; !ret && n--; addr += 4) { t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr); ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 5, 2); if (!ret) *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA); } return ret; } static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, u32 *rx_hash_high, u32 *rx_hash_low) { /* stop Rx unicast traffic */ t3_mac_disable_exact_filters(mac); /* stop broadcast, multicast, promiscuous mode traffic */ *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, F_DISBCAST); *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset); t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, 0); *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset); t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, 0); /* Leave time to drain max RX fifo */ msleep(1); } static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, u32 rx_hash_high, u32 rx_hash_low) { t3_mac_enable_exact_filters(mac); t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, rx_cfg); t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, rx_hash_high); t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, rx_hash_low); } static int t3_detect_link_fault(adapter_t *adapter, int port_id) { struct port_info *pi = adap2pinfo(adapter, port_id); struct cmac *mac = &pi->mac; uint32_t rx_cfg, rx_hash_high, rx_hash_low; int link_fault; /* stop rx */ t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); /* clear status and make sure intr is enabled */ (void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); t3_xgm_intr_enable(adapter, port_id); /* restart rx */ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN); t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); return (link_fault & F_LINKFAULTCHANGE ? 1 : 0); } static void t3_clear_faults(adapter_t *adapter, int port_id) { struct port_info *pi = adap2pinfo(adapter, port_id); struct cmac *mac = &pi->mac; if (adapter->params.nports <= 2) { t3_xgm_intr_disable(adapter, pi->port_id); t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT); t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset, F_XGM_INT, F_XGM_INT); t3_xgm_intr_enable(adapter, pi->port_id); } } /** * t3_link_changed - handle interface link changes * @adapter: the adapter * @port_id: the port index that changed link state * * Called when a port's link settings change to propagate the new values * to the associated PHY and MAC. After performing the common tasks it * invokes an OS-specific handler. */ void t3_link_changed(adapter_t *adapter, int port_id) { - int link_ok, speed, duplex, fc, link_fault; + int link_ok, speed, duplex, fc, link_fault, link_state; struct port_info *pi = adap2pinfo(adapter, port_id); struct cphy *phy = &pi->phy; struct cmac *mac = &pi->mac; struct link_config *lc = &pi->link_config; link_ok = lc->link_ok; speed = lc->speed; duplex = lc->duplex; fc = lc->fc; link_fault = 0; - phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + phy->ops->get_link_status(phy, &link_state, &speed, &duplex, &fc); + link_ok = (link_state == PHY_LINK_UP); + if (link_state != PHY_LINK_PARTIAL) + phy->rst = 0; + else if (++phy->rst == 3) { + phy->ops->reset(phy, 0); + phy->rst = 0; + } if (link_ok == 0) pi->link_fault = LF_NO; if (lc->requested_fc & PAUSE_AUTONEG) fc &= lc->requested_fc; else fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); /* Update mac speed before checking for link fault. */ if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE && (speed != lc->speed || duplex != lc->duplex || fc != lc->fc)) t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc); /* * Check for link faults if any of these is true: * a) A link fault is suspected, and PHY says link ok * b) PHY link transitioned from down -> up */ if (adapter->params.nports <= 2 && ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) { link_fault = t3_detect_link_fault(adapter, port_id); if (link_fault) { if (pi->link_fault != LF_YES) { mac->stats.link_faults++; pi->link_fault = LF_YES; } if (uses_xaui(adapter)) { if (adapter->params.rev >= T3_REV_C) t3c_pcs_force_los(mac); else t3b_pcs_reset(mac); } /* Don't report link up */ link_ok = 0; } else { /* clear faults here if this was a false alarm. */ if (pi->link_fault == LF_MAYBE && link_ok && lc->link_ok) t3_clear_faults(adapter, port_id); pi->link_fault = LF_NO; } } if (link_ok == lc->link_ok && speed == lc->speed && duplex == lc->duplex && fc == lc->fc) return; /* nothing changed */ lc->link_ok = (unsigned char)link_ok; lc->speed = speed < 0 ? SPEED_INVALID : speed; lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; lc->fc = fc; if (link_ok) { /* down -> up, or up -> up with changed settings */ if (adapter->params.rev > 0 && uses_xaui(adapter)) { if (adapter->params.rev >= T3_REV_C) t3c_pcs_force_los(mac); else t3b_pcs_reset(mac); t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, F_TXACTENABLE | F_RXEN); } /* disable TX FIFO drain */ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, F_ENDROPPKT, 0); t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset, F_CLRSTATS, 1); t3_clear_faults(adapter, port_id); } else { /* up -> down */ if (adapter->params.rev > 0 && uses_xaui(adapter)) { t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0); } t3_xgm_intr_disable(adapter, pi->port_id); if (adapter->params.nports <= 2) { t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset, F_XGM_INT, 0); t3_mac_disable(mac, MAC_DIRECTION_RX); /* * Make sure Tx FIFO continues to drain, even as rxen is * left high to help detect and indicate remote faults. */ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset, 0, F_ENDROPPKT); t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset, F_TXEN); t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN); } } t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc, mac->was_reset); mac->was_reset = 0; } /** * t3_link_start - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) { unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); lc->link_ok = 0; if (lc->supported & SUPPORTED_Autoneg) { lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); if (fc) { lc->advertising |= ADVERTISED_Asym_Pause; if (fc & PAUSE_RX) lc->advertising |= ADVERTISED_Pause; } phy->ops->advertise(phy, lc->advertising); if (lc->autoneg == AUTONEG_DISABLE) { lc->speed = lc->requested_speed; lc->duplex = lc->requested_duplex; lc->fc = (unsigned char)fc; t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex, fc); /* Also disables autoneg */ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); /* PR 5666. Power phy up when doing an ifup */ if (!is_10G(phy->adapter)) phy->ops->power_down(phy, 0); } else phy->ops->autoneg_enable(phy); } else { t3_mac_set_speed_duplex_fc(mac, -1, -1, fc); lc->fc = (unsigned char)fc; phy->ops->reset(phy, 0); } return 0; } /** * t3_set_vlan_accel - control HW VLAN extraction * @adapter: the adapter * @ports: bitmap of adapter ports to operate on * @on: enable (1) or disable (0) HW VLAN extraction * * Enables or disables HW extraction of VLAN tags for the given port. */ void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on) { t3_set_reg_field(adapter, A_TP_OUT_CONFIG, ports << S_VLANEXTRACTIONENABLE, on ? (ports << S_VLANEXTRACTIONENABLE) : 0); } struct intr_info { unsigned int mask; /* bits to check in interrupt status */ const char *msg; /* message to print or NULL */ short stat_idx; /* stat counter to increment or -1 */ unsigned short fatal; /* whether the condition reported is fatal */ }; /** * t3_handle_intr_status - table driven interrupt handler * @adapter: the adapter that generated the interrupt * @reg: the interrupt status register to process * @mask: a mask to apply to the interrupt status * @acts: table of interrupt actions * @stats: statistics counters tracking interrupt occurences * * A table driven interrupt handler that applies a set of masks to an * interrupt status word and performs the corresponding actions if the * interrupts described by the mask have occured. The actions include * optionally printing a warning or alert message, and optionally * incrementing a stat counter. The table is terminated by an entry * specifying mask 0. Returns the number of fatal interrupt conditions. */ static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg, unsigned int mask, const struct intr_info *acts, unsigned long *stats) { int fatal = 0; unsigned int status = t3_read_reg(adapter, reg) & mask; for ( ; acts->mask; ++acts) { if (!(status & acts->mask)) continue; if (acts->fatal) { fatal++; CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, status & acts->mask); status &= ~acts->mask; } else if (acts->msg) CH_WARN(adapter, "%s (0x%x)\n", acts->msg, status & acts->mask); if (acts->stat_idx >= 0) stats[acts->stat_idx]++; } if (status) /* clear processed interrupts */ t3_write_reg(adapter, reg, status); return fatal; } #define SGE_INTR_MASK (F_RSPQDISABLED | \ F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ F_HIRCQPARITYERROR) #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ F_NFASRCHFAIL) #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ F_TXFIFO_UNDERRUN) #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \ V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \ V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */) #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ F_TXPARERR | V_BISTERR(M_BISTERR)) #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) #define ULPTX_INTR_MASK 0xfc #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ F_ZERO_SWITCH_ERROR) #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR)) #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \ V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ V_RXTPPARERRENB(M_RXTPPARERRENB) | \ V_MCAPARERRENB(M_MCAPARERRENB)) #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ F_MPS0 | F_CPL_SWITCH) /* * Interrupt handler for the PCIX1 module. */ static void pci_intr_handler(adapter_t *adapter) { static struct intr_info pcix1_intr_info[] = { { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 }, { F_SIGTARABT, "PCI signaled target abort", -1, 1 }, { F_RCVTARABT, "PCI received target abort", -1, 1 }, { F_RCVMSTABT, "PCI received master abort", -1, 1 }, { F_SIGSYSERR, "PCI signaled system error", -1, 1 }, { F_DETPARERR, "PCI detected parity error", -1, 1 }, { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 }, { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 }, { F_RCVSPLCMPERR, "PCI received split completion error", -1, 1 }, { F_DETCORECCERR, "PCI correctable ECC error", STAT_PCI_CORR_ECC, 0 }, { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 }, { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 }, { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1, 1 }, { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1, 1 }, { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1, 1 }, { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity " "error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK, pcix1_intr_info, adapter->irq_stats)) t3_fatal_err(adapter); } /* * Interrupt handler for the PCIE module. */ static void pcie_intr_handler(adapter_t *adapter) { static struct intr_info pcie_intr_info[] = { { F_PEXERR, "PCI PEX error", -1, 1 }, { F_UNXSPLCPLERRR, "PCI unexpected split completion DMA read error", -1, 1 }, { F_UNXSPLCPLERRC, "PCI unexpected split completion DMA command error", -1, 1 }, { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 }, { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 }, { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 }, { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 }, { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), "PCI MSI-X table/PBA parity error", -1, 1 }, { F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 }, { F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 }, { F_RXPARERR, "PCI Rx parity error", -1, 1 }, { F_TXPARERR, "PCI Tx parity error", -1, 1 }, { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 }, { 0 } }; if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) CH_ALERT(adapter, "PEX error code 0x%x\n", t3_read_reg(adapter, A_PCIE_PEX_ERR)); if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, pcie_intr_info, adapter->irq_stats)) t3_fatal_err(adapter); } /* * TP interrupt handler. */ static void tp_intr_handler(adapter_t *adapter) { static struct intr_info tp_intr_info[] = { { 0xffffff, "TP parity error", -1, 1 }, { 0x1000000, "TP out of Rx pages", -1, 1 }, { 0x2000000, "TP out of Tx pages", -1, 1 }, { 0 } }; static struct intr_info tp_intr_info_t3c[] = { { 0x1fffffff, "TP parity error", -1, 1 }, { F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 }, { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, adapter->params.rev < T3_REV_C ? tp_intr_info : tp_intr_info_t3c, NULL)) t3_fatal_err(adapter); } /* * CIM interrupt handler. */ static void cim_intr_handler(adapter_t *adapter) { static struct intr_info cim_intr_info[] = { { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 }, { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 }, { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 }, { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 }, { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 }, { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 }, { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 }, { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 }, { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 }, { F_DRAMPARERR, "CIM DRAM parity error", -1, 1 }, { F_ICACHEPARERR, "CIM icache parity error", -1, 1 }, { F_DCACHEPARERR, "CIM dcache parity error", -1, 1 }, { F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 }, { F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 }, { F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 }, { F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 }, { F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 }, { F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 }, { F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 }, { F_ITAGPARERR, "CIM itag parity error", -1, 1 }, { F_DTAGPARERR, "CIM dtag parity error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK, cim_intr_info, NULL)) t3_fatal_err(adapter); } /* * ULP RX interrupt handler. */ static void ulprx_intr_handler(adapter_t *adapter) { static struct intr_info ulprx_intr_info[] = { { F_PARERRDATA, "ULP RX data parity error", -1, 1 }, { F_PARERRPCMD, "ULP RX command parity error", -1, 1 }, { F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 }, { F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 }, { F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 }, { F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 }, { F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 }, { F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff, ulprx_intr_info, NULL)) t3_fatal_err(adapter); } /* * ULP TX interrupt handler. */ static void ulptx_intr_handler(adapter_t *adapter) { static struct intr_info ulptx_intr_info[] = { { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds", STAT_ULP_CH0_PBL_OOB, 0 }, { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", STAT_ULP_CH1_PBL_OOB, 0 }, { 0xfc, "ULP TX parity error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff, ulptx_intr_info, adapter->irq_stats)) t3_fatal_err(adapter); } #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \ F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \ F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \ F_ICSPI1_TX_FRAMING_ERROR) #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \ F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \ F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \ F_OESPI1_OFIFO2X_TX_FRAMING_ERROR) /* * PM TX interrupt handler. */ static void pmtx_intr_handler(adapter_t *adapter) { static struct intr_info pmtx_intr_info[] = { { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 }, { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 }, { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR), "PMTX ispi parity error", -1, 1 }, { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR), "PMTX ospi parity error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff, pmtx_intr_info, NULL)) t3_fatal_err(adapter); } #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \ F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \ F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \ F_IESPI1_TX_FRAMING_ERROR) #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \ F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \ F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \ F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) /* * PM RX interrupt handler. */ static void pmrx_intr_handler(adapter_t *adapter) { static struct intr_info pmrx_intr_info[] = { { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 }, { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 }, { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR), "PMRX ispi parity error", -1, 1 }, { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR), "PMRX ospi parity error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff, pmrx_intr_info, NULL)) t3_fatal_err(adapter); } /* * CPL switch interrupt handler. */ static void cplsw_intr_handler(adapter_t *adapter) { static struct intr_info cplsw_intr_info[] = { { F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 }, { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 }, { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 }, { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 }, { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff, cplsw_intr_info, NULL)) t3_fatal_err(adapter); } /* * MPS interrupt handler. */ static void mps_intr_handler(adapter_t *adapter) { static struct intr_info mps_intr_info[] = { { 0x1ff, "MPS parity error", -1, 1 }, { 0 } }; if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff, mps_intr_info, NULL)) t3_fatal_err(adapter); } #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE) /* * MC7 interrupt handler. */ static void mc7_intr_handler(struct mc7 *mc7) { adapter_t *adapter = mc7->adapter; u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE); if (cause & F_CE) { mc7->stats.corr_err++; CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, " "data 0x%x 0x%x 0x%x\n", mc7->name, t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR), t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0), t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1), t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2)); } if (cause & F_UE) { mc7->stats.uncorr_err++; CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, " "data 0x%x 0x%x 0x%x\n", mc7->name, t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR), t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0), t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1), t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2)); } if (G_PE(cause)) { mc7->stats.parity_err++; CH_ALERT(adapter, "%s MC7 parity error 0x%x\n", mc7->name, G_PE(cause)); } if (cause & F_AE) { u32 addr = 0; if (adapter->params.rev > 0) addr = t3_read_reg(adapter, mc7->offset + A_MC7_ERR_ADDR); mc7->stats.addr_err++; CH_ALERT(adapter, "%s MC7 address error: 0x%x\n", mc7->name, addr); } if (cause & MC7_INTR_FATAL) t3_fatal_err(adapter); t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause); } #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) /* * XGMAC interrupt handler. */ static int mac_intr_handler(adapter_t *adap, unsigned int idx) { u32 cause; struct port_info *pi; struct cmac *mac; idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */ pi = adap2pinfo(adap, idx); mac = &pi->mac; /* * We mask out interrupt causes for which we're not taking interrupts. * This allows us to use polling logic to monitor some of the other * conditions when taking interrupts would impose too much load on the * system. */ cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) & ~(F_RXFIFO_OVERFLOW)); if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { mac->stats.tx_fifo_parity_err++; CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx); } if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) { mac->stats.rx_fifo_parity_err++; CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx); } if (cause & F_TXFIFO_UNDERRUN) mac->stats.tx_fifo_urun++; if (cause & F_RXFIFO_OVERFLOW) mac->stats.rx_fifo_ovfl++; if (cause & V_SERDES_LOS(M_SERDES_LOS)) mac->stats.serdes_signal_loss++; if (cause & F_XAUIPCSCTCERR) mac->stats.xaui_pcs_ctc_err++; if (cause & F_XAUIPCSALIGNCHANGE) mac->stats.xaui_pcs_align_change++; if (cause & F_XGM_INT & t3_read_reg(adap, A_XGM_INT_ENABLE + mac->offset)) { t3_set_reg_field(adap, A_XGM_INT_ENABLE + mac->offset, F_XGM_INT, 0); /* link fault suspected */ pi->link_fault = LF_MAYBE; t3_os_link_intr(pi); } if (cause & XGM_INTR_FATAL) t3_fatal_err(adap); t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); return cause != 0; } /* * Interrupt handler for PHY events. */ static int phy_intr_handler(adapter_t *adapter) { u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); for_each_port(adapter, i) { struct port_info *p = adap2pinfo(adapter, i); if (!(p->phy.caps & SUPPORTED_IRQ)) continue; if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) { int phy_cause = p->phy.ops->intr_handler(&p->phy); if (phy_cause & cphy_cause_link_change) t3_os_link_intr(p); if (phy_cause & cphy_cause_fifo_error) p->phy.fifo_errors++; if (phy_cause & cphy_cause_module_change) t3_os_phymod_changed(adapter, i); if (phy_cause & cphy_cause_alarm) CH_WARN(adapter, "Operation affected due to " "adverse environment. Check the spec " "sheet for corrective action."); } } t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause); return 0; } /** * t3_slow_intr_handler - control path interrupt handler * @adapter: the adapter * * T3 interrupt handler for non-data interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t3_slow_intr_handler(adapter_t *adapter) { u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0); cause &= adapter->slow_intr_mask; if (!cause) return 0; if (cause & F_PCIM0) { if (is_pcie(adapter)) pcie_intr_handler(adapter); else pci_intr_handler(adapter); } if (cause & F_SGE3) t3_sge_err_intr_handler(adapter); if (cause & F_MC7_PMRX) mc7_intr_handler(&adapter->pmrx); if (cause & F_MC7_PMTX) mc7_intr_handler(&adapter->pmtx); if (cause & F_MC7_CM) mc7_intr_handler(&adapter->cm); if (cause & F_CIM) cim_intr_handler(adapter); if (cause & F_TP1) tp_intr_handler(adapter); if (cause & F_ULP2_RX) ulprx_intr_handler(adapter); if (cause & F_ULP2_TX) ulptx_intr_handler(adapter); if (cause & F_PM1_RX) pmrx_intr_handler(adapter); if (cause & F_PM1_TX) pmtx_intr_handler(adapter); if (cause & F_CPL_SWITCH) cplsw_intr_handler(adapter); if (cause & F_MPS0) mps_intr_handler(adapter); if (cause & F_MC5A) t3_mc5_intr_handler(&adapter->mc5); if (cause & F_XGMAC0_0) mac_intr_handler(adapter, 0); if (cause & F_XGMAC0_1) mac_intr_handler(adapter, 1); if (cause & F_T3DBG) phy_intr_handler(adapter); /* Clear the interrupts just processed. */ t3_write_reg(adapter, A_PL_INT_CAUSE0, cause); (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ return 1; } static unsigned int calc_gpio_intr(adapter_t *adap) { unsigned int i, gpi_intr = 0; for_each_port(adap, i) if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) && adapter_info(adap)->gpio_intr[i]) gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i]; return gpi_intr; } /** * t3_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable interrupts by setting the interrupt enable registers of the * various HW modules and then enabling the top-level interrupt * concentrator. */ void t3_intr_enable(adapter_t *adapter) { static struct addr_val_pair intr_en_avp[] = { { A_MC7_INT_ENABLE, MC7_INTR_MASK }, { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, MC7_INTR_MASK }, { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, MC7_INTR_MASK }, { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK }, { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK }, { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK }, { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK }, { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK }, { A_MPS_INT_ENABLE, MPS_INTR_MASK }, }; adapter->slow_intr_mask = PL_INTR_MASK; t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); t3_write_reg(adapter, A_TP_INT_ENABLE, adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK); if (adapter->params.rev > 0) { t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK | F_CIM_OVFL_ERROR); t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 | F_PBL_BOUND_ERR_CH1); } else { t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK); t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK); } t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter)); if (is_pcie(adapter)) t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK); else t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK); t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask); (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ } /** * t3_intr_disable - disable a card's interrupts * @adapter: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrator and the SGE data interrupts. */ void t3_intr_disable(adapter_t *adapter) { t3_write_reg(adapter, A_PL_INT_ENABLE0, 0); (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ adapter->slow_intr_mask = 0; } /** * t3_intr_clear - clear all interrupts * @adapter: the adapter whose interrupts should be cleared * * Clears all interrupts. */ void t3_intr_clear(adapter_t *adapter) { static const unsigned int cause_reg_addr[] = { A_SG_INT_CAUSE, A_SG_RSPQ_FL_STATUS, A_PCIX_INT_CAUSE, A_MC7_INT_CAUSE, A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, A_CIM_HOST_INT_CAUSE, A_TP_INT_CAUSE, A_MC5_DB_INT_CAUSE, A_ULPRX_INT_CAUSE, A_ULPTX_INT_CAUSE, A_CPL_INTR_CAUSE, A_PM1_TX_INT_CAUSE, A_PM1_RX_INT_CAUSE, A_MPS_INT_CAUSE, A_T3DBG_INT_CAUSE, }; unsigned int i; /* Clear PHY and MAC interrupts for each port. */ for_each_port(adapter, i) t3_port_intr_clear(adapter, i); for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); if (is_pcie(adapter)) t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ } void t3_xgm_intr_enable(adapter_t *adapter, int idx) { struct port_info *pi = adap2pinfo(adapter, idx); t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, XGM_EXTRA_INTR_MASK); } void t3_xgm_intr_disable(adapter_t *adapter, int idx) { struct port_info *pi = adap2pinfo(adapter, idx); t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, 0x7ff); } /** * t3_port_intr_enable - enable port-specific interrupts * @adapter: associated adapter * @idx: index of port whose interrupts should be enabled * * Enable port-specific (i.e., MAC and PHY) interrupts for the given * adapter port. */ void t3_port_intr_enable(adapter_t *adapter, int idx) { struct port_info *pi = adap2pinfo(adapter, idx); t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK); pi->phy.ops->intr_enable(&pi->phy); } /** * t3_port_intr_disable - disable port-specific interrupts * @adapter: associated adapter * @idx: index of port whose interrupts should be disabled * * Disable port-specific (i.e., MAC and PHY) interrupts for the given * adapter port. */ void t3_port_intr_disable(adapter_t *adapter, int idx) { struct port_info *pi = adap2pinfo(adapter, idx); t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0); pi->phy.ops->intr_disable(&pi->phy); } /** * t3_port_intr_clear - clear port-specific interrupts * @adapter: associated adapter * @idx: index of port whose interrupts to clear * * Clear port-specific (i.e., MAC and PHY) interrupts for the given * adapter port. */ void t3_port_intr_clear(adapter_t *adapter, int idx) { struct port_info *pi = adap2pinfo(adapter, idx); t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff); pi->phy.ops->intr_clear(&pi->phy); } #define SG_CONTEXT_CMD_ATTEMPTS 100 /** * t3_sge_write_context - write an SGE context * @adapter: the adapter * @id: the context id * @type: the context type * * Program an SGE context with the values already loaded in the * CONTEXT_DATA? registers. */ static int t3_sge_write_context(adapter_t *adapter, unsigned int id, unsigned int type) { if (type == F_RESPONSEQ) { /* * Can't write the Response Queue Context bits for * Interrupt Armed or the Reserve bits after the chip * has been initialized out of reset. Writing to these * bits can confuse the hardware. */ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); } else { t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); } t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * clear_sge_ctxt - completely clear an SGE context * @adapter: the adapter * @id: the context id * @type: the context type * * Completely clear an SGE context. Used predominantly at post-reset * initialization. Note in particular that we don't skip writing to any * "sensitive bits" in the contexts the way that t3_sge_write_context() * does ... */ static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type) { t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); t3_write_reg(adap, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * t3_sge_init_ecntxt - initialize an SGE egress context * @adapter: the adapter to configure * @id: the context id * @gts_enable: whether to enable GTS for the context * @type: the egress context type * @respq: associated response queue * @base_addr: base address of queue * @size: number of queue entries * @token: uP token * @gen: initial generation value for the context * @cidx: consumer pointer * * Initialize an SGE egress context and make it ready for use. If the * platform allows concurrent context operations, the caller is * responsible for appropriate locking. */ int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable, enum sge_context_type type, int respq, u64 base_addr, unsigned int size, unsigned int token, int gen, unsigned int cidx) { unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM; if (base_addr & 0xfff) /* must be 4K aligned */ return -EINVAL; if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; base_addr >>= 12; t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) | V_EC_CREDITS(credits) | V_EC_GTS(gts_enable)); t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) | V_EC_BASE_LO((u32)base_addr & 0xffff)); base_addr >>= 16; t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr); base_addr >>= 32; t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) | V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) | F_EC_VALID); return t3_sge_write_context(adapter, id, F_EGRESS); } /** * t3_sge_init_flcntxt - initialize an SGE free-buffer list context * @adapter: the adapter to configure * @id: the context id * @gts_enable: whether to enable GTS for the context * @base_addr: base address of queue * @size: number of queue entries * @bsize: size of each buffer for this queue * @cong_thres: threshold to signal congestion to upstream producers * @gen: initial generation value for the context * @cidx: consumer pointer * * Initialize an SGE free list context and make it ready for use. The * caller is responsible for ensuring only one context operation occurs * at a time. */ int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable, u64 base_addr, unsigned int size, unsigned int bsize, unsigned int cong_thres, int gen, unsigned int cidx) { if (base_addr & 0xfff) /* must be 4K aligned */ return -EINVAL; if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; base_addr >>= 12; t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr); base_addr >>= 32; t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_FL_BASE_HI((u32)base_addr) | V_FL_INDEX_LO(cidx & M_FL_INDEX_LO)); t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) | V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) | V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO)); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) | V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable)); return t3_sge_write_context(adapter, id, F_FREELIST); } /** * t3_sge_init_rspcntxt - initialize an SGE response queue context * @adapter: the adapter to configure * @id: the context id * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ * @base_addr: base address of queue * @size: number of queue entries * @fl_thres: threshold for selecting the normal or jumbo free list * @gen: initial generation value for the context * @cidx: consumer pointer * * Initialize an SGE response queue context and make it ready for use. * The caller is responsible for ensuring only one context operation * occurs at a time. */ int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx, u64 base_addr, unsigned int size, unsigned int fl_thres, int gen, unsigned int cidx) { unsigned int ctrl, intr = 0; if (base_addr & 0xfff) /* must be 4K aligned */ return -EINVAL; if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; base_addr >>= 12; t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) | V_CQ_INDEX(cidx)); t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr); base_addr >>= 32; ctrl = t3_read_reg(adapter, A_SG_CONTROL); if ((irq_vec_idx > 0) || ((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ))) intr = F_RQ_INTR_EN; if (irq_vec_idx >= 0) intr |= V_RQ_MSI_VEC(irq_vec_idx); t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen)); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres); return t3_sge_write_context(adapter, id, F_RESPONSEQ); } /** * t3_sge_init_cqcntxt - initialize an SGE completion queue context * @adapter: the adapter to configure * @id: the context id * @base_addr: base address of queue * @size: number of queue entries * @rspq: response queue for async notifications * @ovfl_mode: CQ overflow mode * @credits: completion queue credits * @credit_thres: the credit threshold * * Initialize an SGE completion queue context and make it ready for use. * The caller is responsible for ensuring only one context operation * occurs at a time. */ int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr, unsigned int size, int rspq, int ovfl_mode, unsigned int credits, unsigned int credit_thres) { if (base_addr & 0xfff) /* must be 4K aligned */ return -EINVAL; if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; base_addr >>= 12; t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size)); t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr); base_addr >>= 32; t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) | V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | V_CQ_ERR(ovfl_mode)); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | V_CQ_CREDIT_THRES(credit_thres)); return t3_sge_write_context(adapter, id, F_CQ); } /** * t3_sge_enable_ecntxt - enable/disable an SGE egress context * @adapter: the adapter * @id: the egress context id * @enable: enable (1) or disable (0) the context * * Enable or disable an SGE egress context. The caller is responsible for * ensuring only one context operation occurs at a time. */ int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable) { if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable)); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * t3_sge_disable_fl - disable an SGE free-buffer list * @adapter: the adapter * @id: the free list context id * * Disable an SGE free-buffer list. The caller is responsible for * ensuring only one context operation occurs at a time. */ int t3_sge_disable_fl(adapter_t *adapter, unsigned int id) { if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE)); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * t3_sge_disable_rspcntxt - disable an SGE response queue * @adapter: the adapter * @id: the response queue context id * * Disable an SGE response queue. The caller is responsible for * ensuring only one context operation occurs at a time. */ int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id) { if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * t3_sge_disable_cqcntxt - disable an SGE completion queue * @adapter: the adapter * @id: the completion queue context id * * Disable an SGE completion queue. The caller is responsible for * ensuring only one context operation occurs at a time. */ int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id) { if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** * t3_sge_cqcntxt_op - perform an operation on a completion queue context * @adapter: the adapter * @id: the context id * @op: the operation to perform * @credits: credits to return to the CQ * * Perform the selected operation on an SGE completion queue context. * The caller is responsible for ensuring only one context operation * occurs at a time. * * For most operations the function returns the current HW position in * the completion queue. */ int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op, unsigned int credits) { u32 val; if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | V_CONTEXT(id) | F_CQ); if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) return -EIO; if (op >= 2 && op < 7) { if (adapter->params.rev > 0) return G_CQ_INDEX(val); t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); } return 0; } /** * t3_sge_read_context - read an SGE context * @type: the context type * @adapter: the adapter * @id: the context id * @data: holds the retrieved context * * Read an SGE egress context. The caller is responsible for ensuring * only one context operation occurs at a time. */ static int t3_sge_read_context(unsigned int type, adapter_t *adapter, unsigned int id, u32 data[4]) { if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0); data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1); data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2); data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3); return 0; } /** * t3_sge_read_ecntxt - read an SGE egress context * @adapter: the adapter * @id: the context id * @data: holds the retrieved context * * Read an SGE egress context. The caller is responsible for ensuring * only one context operation occurs at a time. */ int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]) { if (id >= 65536) return -EINVAL; return t3_sge_read_context(F_EGRESS, adapter, id, data); } /** * t3_sge_read_cq - read an SGE CQ context * @adapter: the adapter * @id: the context id * @data: holds the retrieved context * * Read an SGE CQ context. The caller is responsible for ensuring * only one context operation occurs at a time. */ int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]) { if (id >= 65536) return -EINVAL; return t3_sge_read_context(F_CQ, adapter, id, data); } /** * t3_sge_read_fl - read an SGE free-list context * @adapter: the adapter * @id: the context id * @data: holds the retrieved context * * Read an SGE free-list context. The caller is responsible for ensuring * only one context operation occurs at a time. */ int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]) { if (id >= SGE_QSETS * 2) return -EINVAL; return t3_sge_read_context(F_FREELIST, adapter, id, data); } /** * t3_sge_read_rspq - read an SGE response queue context * @adapter: the adapter * @id: the context id * @data: holds the retrieved context * * Read an SGE response queue context. The caller is responsible for * ensuring only one context operation occurs at a time. */ int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]) { if (id >= SGE_QSETS) return -EINVAL; return t3_sge_read_context(F_RESPONSEQ, adapter, id, data); } /** * t3_config_rss - configure Rx packet steering * @adapter: the adapter * @rss_config: RSS settings (written to TP_RSS_CONFIG) * @cpus: values for the CPU lookup table (0xff terminated) * @rspq: values for the response queue lookup table (0xffff terminated) * * Programs the receive packet steering logic. @cpus and @rspq provide * the values for the CPU and response queue lookup tables. If they * provide fewer values than the size of the tables the supplied values * are used repeatedly until the tables are fully populated. */ void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus, const u16 *rspq) { int i, j, cpu_idx = 0, q_idx = 0; if (cpus) for (i = 0; i < RSS_TABLE_SIZE; ++i) { u32 val = i << 16; for (j = 0; j < 2; ++j) { val |= (cpus[cpu_idx++] & 0x3f) << (8 * j); if (cpus[cpu_idx] == 0xff) cpu_idx = 0; } t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val); } if (rspq) for (i = 0; i < RSS_TABLE_SIZE; ++i) { t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, (i << 16) | rspq[q_idx++]); if (rspq[q_idx] == 0xffff) q_idx = 0; } t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config); } /** * t3_read_rss - read the contents of the RSS tables * @adapter: the adapter * @lkup: holds the contents of the RSS lookup table * @map: holds the contents of the RSS map table * * Reads the contents of the receive packet steering tables. */ int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map) { int i; u32 val; if (lkup) for (i = 0; i < RSS_TABLE_SIZE; ++i) { t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, 0xffff0000 | i); val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE); if (!(val & 0x80000000)) return -EAGAIN; *lkup++ = (u8)val; *lkup++ = (u8)(val >> 8); } if (map) for (i = 0; i < RSS_TABLE_SIZE; ++i) { t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, 0xffff0000 | i); val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE); if (!(val & 0x80000000)) return -EAGAIN; *map++ = (u16)val; } return 0; } /** * t3_tp_set_offload_mode - put TP in NIC/offload mode * @adap: the adapter * @enable: 1 to select offload mode, 0 for regular NIC * * Switches TP to NIC/offload mode. */ void t3_tp_set_offload_mode(adapter_t *adap, int enable) { if (is_offload(adap) || !enable) t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, V_NICMODE(!enable)); } /** * tp_wr_bits_indirect - set/clear bits in an indirect TP register * @adap: the adapter * @addr: the indirect TP register address * @mask: specifies the field within the register to modify * @val: new value for the field * * Sets a field of an indirect TP register to the given value. */ static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr, unsigned int mask, unsigned int val) { t3_write_reg(adap, A_TP_PIO_ADDR, addr); val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask; t3_write_reg(adap, A_TP_PIO_DATA, val); } /** * t3_enable_filters - enable the HW filters * @adap: the adapter * * Enables the HW filters for NIC traffic. */ void t3_enable_filters(adapter_t *adap) { t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0); t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN); t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3)); tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT); } /** * t3_disable_filters - disable the HW filters * @adap: the adapter * * Disables the HW filters for NIC traffic. */ void t3_disable_filters(adapter_t *adap) { /* note that we don't want to revert to NIC-only mode */ t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0); t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0); tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0); } /** * pm_num_pages - calculate the number of pages of the payload memory * @mem_size: the size of the payload memory * @pg_size: the size of each payload memory page * * Calculate the number of pages, each of the given size, that fit in a * memory of the specified size, respecting the HW requirement that the * number of pages must be a multiple of 24. */ static inline unsigned int pm_num_pages(unsigned int mem_size, unsigned int pg_size) { unsigned int n = mem_size / pg_size; return n - n % 24; } #define mem_region(adap, start, size, reg) \ t3_write_reg((adap), A_ ## reg, (start)); \ start += size /** * partition_mem - partition memory and configure TP memory settings * @adap: the adapter * @p: the TP parameters * * Partitions context and payload memory and configures TP's memory * registers. */ static void partition_mem(adapter_t *adap, const struct tp_params *p) { unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5); unsigned int timers = 0, timers_shift = 22; if (adap->params.rev > 0) { if (tids <= 16 * 1024) { timers = 1; timers_shift = 16; } else if (tids <= 64 * 1024) { timers = 2; timers_shift = 18; } else if (tids <= 256 * 1024) { timers = 3; timers_shift = 20; } } t3_write_reg(adap, A_TP_PMM_SIZE, p->chan_rx_size | (p->chan_tx_size >> 16)); t3_write_reg(adap, A_TP_PMM_TX_BASE, 0); t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size); t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs); t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX), V_TXDATAACKIDX(fls(p->tx_pg_size) - 12)); t3_write_reg(adap, A_TP_PMM_RX_BASE, 0); t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size); t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs); pstructs = p->rx_num_pgs + p->tx_num_pgs; /* Add a bit of headroom and make multiple of 24 */ pstructs += 48; pstructs -= pstructs % 24; t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs); m = tids * TCB_SIZE; mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR); mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR); t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m); m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22); mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE); mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE); mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE); mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE); m = (m + 4095) & ~0xfff; t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m); t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m); tids = (p->cm_size - m - (3 << 20)) / 3072 - 32; m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - adap->params.mc5.nfilters - adap->params.mc5.nroutes; if (tids < m) adap->params.mc5.nservers += m - tids; } static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val) { t3_write_reg(adap, A_TP_PIO_ADDR, addr); t3_write_reg(adap, A_TP_PIO_DATA, val); } static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr) { t3_write_reg(adap, A_TP_PIO_ADDR, addr); return t3_read_reg(adap, A_TP_PIO_DATA); } static void tp_config(adapter_t *adap, const struct tp_params *p) { t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU | F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD | F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | F_MTUENABLE | V_WINDOWSCALEMODE(1) | V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, F_IPV6ENABLE | F_NICMODE); t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); t3_set_reg_field(adap, A_TP_PARA_REG6, 0, adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND); t3_set_reg_field(adap, A_TP_PC_CONFIG, F_ENABLEEPCMDAFULL, F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | F_ENABLEARPMISS | F_DISBLEDAPARBIT0); t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); if (adap->params.rev > 0) { tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTO | F_TXPACEAUTOSTRICT); t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID); tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50); tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688); tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688); } else t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); if (adap->params.rev == T3_REV_C) t3_set_reg_field(adap, A_TP_PC_CONFIG, V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), V_TABLELATENCYDELTA(4)); t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000); if (adap->params.nports > 2) { t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0, F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR); tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE, V_RXMAPMODE(M_RXMAPMODE), 0); tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) | V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) | F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD | F_ENABLEINSERTION | F_ENABLEINSERTIONSFD); tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000); tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5); tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT); } } /* TCP timer values in ms */ #define TP_DACK_TIMER 50 #define TP_RTO_MIN 250 /** * tp_set_timers - set TP timing parameters * @adap: the adapter to set * @core_clk: the core clock frequency in Hz * * Set TP's timing parameters, such as the various timer resolutions and * the TCP timer values. */ static void tp_set_timers(adapter_t *adap, unsigned int core_clk) { unsigned int tre = adap->params.tp.tre; unsigned int dack_re = adap->params.tp.dack_re; unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */ unsigned int tps = core_clk >> tre; t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) | V_DELAYEDACKRESOLUTION(dack_re) | V_TIMESTAMPRESOLUTION(tstamp_re)); t3_write_reg(adap, A_TP_DACK_TIMER, (core_clk >> dack_re) / (1000 / TP_DACK_TIMER)); t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100); t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504); t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908); t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c); t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | V_KEEPALIVEMAX(9)); #define SECONDS * tps t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS); t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN)); t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS); t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS); t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS); t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS); t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS); t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS); t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS); #undef SECONDS } /** * t3_tp_set_coalescing_size - set receive coalescing size * @adap: the adapter * @size: the receive coalescing size * @psh: whether a set PSH bit should deliver coalesced data * * Set the receive coalescing size and PSH bit handling. */ int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh) { u32 val; if (size > MAX_RX_COALESCING_LEN) return -EINVAL; val = t3_read_reg(adap, A_TP_PARA_REG3); val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN); if (size) { val |= F_RXCOALESCEENABLE; if (psh) val |= F_RXCOALESCEPSHEN; size = min(MAX_RX_COALESCING_LEN, size); t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | V_MAXRXDATA(MAX_RX_COALESCING_LEN)); } t3_write_reg(adap, A_TP_PARA_REG3, val); return 0; } /** * t3_tp_set_max_rxsize - set the max receive size * @adap: the adapter * @size: the max receive size * * Set TP's max receive size. This is the limit that applies when * receive coalescing is disabled. */ void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size) { t3_write_reg(adap, A_TP_PARA_REG7, V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); } static void __devinit init_mtus(unsigned short mtus[]) { /* * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so * it can accomodate max size TCP/IP headers when SACK and timestamps * are enabled and still have at least 8 bytes of payload. */ mtus[0] = 88; mtus[1] = 88; mtus[2] = 256; mtus[3] = 512; mtus[4] = 576; mtus[5] = 1024; mtus[6] = 1280; mtus[7] = 1492; mtus[8] = 1500; mtus[9] = 2002; mtus[10] = 2048; mtus[11] = 4096; mtus[12] = 4352; mtus[13] = 8192; mtus[14] = 9000; mtus[15] = 9600; } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t3_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the unrestricted values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * @mtu_cap: the maximum permitted effective MTU * * Write the MTU table with the supplied MTUs capping each at &mtu_cap. * Update the high-speed congestion control table with the supplied alpha, * beta, and MTUs. */ void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS], unsigned short alpha[NCCTRL_WIN], unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = min(mtus[i], mtu_cap); unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t3_write_reg(adap, A_TP_MTU_TABLE, (i << 24) | (log2 << 16) | mtu); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * t3_read_hw_mtus - returns the values in the HW MTU table * @adap: the adapter * @mtus: where to store the HW MTU values * * Reads the HW MTU table. */ void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]) { int i; for (i = 0; i < NMTUS; ++i) { unsigned int val; t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i); val = t3_read_reg(adap, A_TP_MTU_TABLE); mtus[i] = val & 0x3fff; } } /** * t3_get_cong_cntl_tab - reads the congestion control table * @adap: the adapter * @incr: where to store the alpha values * * Reads the additive increments programmed into the HW congestion * control table. */ void t3_get_cong_cntl_tab(adapter_t *adap, unsigned short incr[NMTUS][NCCTRL_WIN]) { unsigned int mtu, w; for (mtu = 0; mtu < NMTUS; ++mtu) for (w = 0; w < NCCTRL_WIN; ++w) { t3_write_reg(adap, A_TP_CCTRL_TABLE, 0xffff0000 | (mtu << 5) | w); incr[mtu][w] = (unsigned short)t3_read_reg(adap, A_TP_CCTRL_TABLE) & 0x1fff; } } /** * t3_tp_get_mib_stats - read TP's MIB counters * @adap: the adapter * @tps: holds the returned counter values * * Returns the values of TP's MIB counters. */ void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps) { t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps, sizeof(*tps) / sizeof(u32), 0); } /** * t3_read_pace_tbl - read the pace table * @adap: the adapter * @pace_vals: holds the returned values * * Returns the values of TP's pace table in nanoseconds. */ void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED]) { unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000); for (i = 0; i < NTX_SCHED; i++) { t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns; } } /** * t3_set_pace_tbl - set the pace table * @adap: the adapter * @pace_vals: the pace values in nanoseconds * @start: index of the first entry in the HW pace table to set * @n: how many entries to set * * Sets (a subset of the) HW pace table. */ void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals, unsigned int start, unsigned int n) { unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); for ( ; n; n--, start++, pace_vals++) t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | ((*pace_vals + tick_ns / 2) / tick_ns)); } #define ulp_region(adap, name, start, len) \ t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \ (start) + (len) - 1); \ start += len #define ulptx_region(adap, name, start, len) \ t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \ (start) + (len) - 1) static void ulp_config(adapter_t *adap, const struct tp_params *p) { unsigned int m = p->chan_rx_size; ulp_region(adap, ISCSI, m, p->chan_rx_size / 8); ulp_region(adap, TDDP, m, p->chan_rx_size / 8); ulptx_region(adap, TPT, m, p->chan_rx_size / 4); ulp_region(adap, STAG, m, p->chan_rx_size / 4); ulp_region(adap, RQ, m, p->chan_rx_size / 4); ulptx_region(adap, PBL, m, p->chan_rx_size / 4); ulp_region(adap, PBL, m, p->chan_rx_size / 4); t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); } /** * t3_set_proto_sram - set the contents of the protocol sram * @adapter: the adapter * @data: the protocol image * * Write the contents of the protocol SRAM. */ int t3_set_proto_sram(adapter_t *adap, const u8 *data) { int i; const u32 *buf = (const u32 *)data; for (i = 0; i < PROTO_SRAM_LINES; i++) { t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++)); t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++)); t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++)); t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++)); t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++)); t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31); if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1)) return -EIO; } return 0; } /** * t3_config_trace_filter - configure one of the tracing filters * @adapter: the adapter * @tp: the desired trace filter parameters * @filter_index: which filter to configure * @invert: if set non-matching packets are traced instead of matching ones * @enable: whether to enable or disable the filter * * Configures one of the tracing filters available in HW. */ void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp, int filter_index, int invert, int enable) { u32 addr, key[4], mask[4]; key[0] = tp->sport | (tp->sip << 16); key[1] = (tp->sip >> 16) | (tp->dport << 16); key[2] = tp->dip; key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20); mask[0] = tp->sport_mask | (tp->sip_mask << 16); mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16); mask[2] = tp->dip_mask; mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20); if (invert) key[3] |= (1 << 29); if (enable) key[3] |= (1 << 28); addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; tp_wr_indirect(adapter, addr++, key[0]); tp_wr_indirect(adapter, addr++, mask[0]); tp_wr_indirect(adapter, addr++, key[1]); tp_wr_indirect(adapter, addr++, mask[1]); tp_wr_indirect(adapter, addr++, key[2]); tp_wr_indirect(adapter, addr++, mask[2]); tp_wr_indirect(adapter, addr++, key[3]); tp_wr_indirect(adapter, addr, mask[3]); (void) t3_read_reg(adapter, A_TP_PIO_DATA); } /** * t3_query_trace_filter - query a tracing filter * @adapter: the adapter * @tp: the current trace filter parameters * @filter_index: which filter to query * @inverted: non-zero if the filter is inverted * @enabled: non-zero if the filter is enabled * * Returns the current settings of the specified HW tracing filter. */ void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp, int filter_index, int *inverted, int *enabled) { u32 addr, key[4], mask[4]; addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; key[0] = tp_rd_indirect(adapter, addr++); mask[0] = tp_rd_indirect(adapter, addr++); key[1] = tp_rd_indirect(adapter, addr++); mask[1] = tp_rd_indirect(adapter, addr++); key[2] = tp_rd_indirect(adapter, addr++); mask[2] = tp_rd_indirect(adapter, addr++); key[3] = tp_rd_indirect(adapter, addr++); mask[3] = tp_rd_indirect(adapter, addr); tp->sport = key[0] & 0xffff; tp->sip = (key[0] >> 16) | ((key[1] & 0xffff) << 16); tp->dport = key[1] >> 16; tp->dip = key[2]; tp->proto = key[3] & 0xff; tp->vlan = key[3] >> 8; tp->intf = key[3] >> 20; tp->sport_mask = mask[0] & 0xffff; tp->sip_mask = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16); tp->dport_mask = mask[1] >> 16; tp->dip_mask = mask[2]; tp->proto_mask = mask[3] & 0xff; tp->vlan_mask = mask[3] >> 8; tp->intf_mask = mask[3] >> 20; *inverted = key[3] & (1 << 29); *enabled = key[3] & (1 << 28); } /** * t3_config_sched - configure a HW traffic scheduler * @adap: the adapter * @kbps: target rate in Kbps * @sched: the scheduler index * * Configure a Tx HW scheduler for the target rate. */ int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched) { unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; unsigned int clk = adap->params.vpd.cclk * 1000; unsigned int selected_cpt = 0, selected_bpt = 0; if (kbps > 0) { kbps *= 125; /* -> bytes */ for (cpt = 1; cpt <= 255; cpt++) { tps = clk / cpt; bpt = (kbps + tps / 2) / tps; if (bpt > 0 && bpt <= 255) { v = bpt * tps; delta = v >= kbps ? v - kbps : kbps - v; if (delta < mindelta) { mindelta = delta; selected_cpt = cpt; selected_bpt = bpt; } } else if (selected_cpt) break; } if (!selected_cpt) return -EINVAL; } t3_write_reg(adap, A_TP_TM_PIO_ADDR, A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); v = t3_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); else v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); t3_write_reg(adap, A_TP_TM_PIO_DATA, v); return 0; } /** * t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler * @adap: the adapter * @sched: the scheduler index * @ipg: the interpacket delay in tenths of nanoseconds * * Set the interpacket delay for a HW packet rate scheduler. */ int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg) { unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; /* convert ipg to nearest number of core clocks */ ipg *= core_ticks_per_usec(adap); ipg = (ipg + 5000) / 10000; if (ipg > 0xffff) return -EINVAL; t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); v = t3_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v = (v & 0xffff) | (ipg << 16); else v = (v & 0xffff0000) | ipg; t3_write_reg(adap, A_TP_TM_PIO_DATA, v); t3_read_reg(adap, A_TP_TM_PIO_DATA); return 0; } /** * t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler * @adap: the adapter * @sched: the scheduler index * @kbps: the byte rate in Kbps * @ipg: the interpacket delay in tenths of nanoseconds * * Return the current configuration of a HW Tx scheduler. */ void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg) { unsigned int v, addr, bpt, cpt; if (kbps) { addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); v = t3_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v >>= 16; bpt = (v >> 8) & 0xff; cpt = v & 0xff; if (!cpt) *kbps = 0; /* scheduler disabled */ else { v = (adap->params.vpd.cclk * 1000) / cpt; *kbps = (v * bpt) / 125; } } if (ipg) { addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); v = t3_read_reg(adap, A_TP_TM_PIO_DATA); if (sched & 1) v >>= 16; v &= 0xffff; *ipg = (10000 * v) / core_ticks_per_usec(adap); } } /** * tp_init - configure TP * @adap: the adapter * @p: TP configuration parameters * * Initializes the TP HW module. */ static int tp_init(adapter_t *adap, const struct tp_params *p) { int busy = 0; tp_config(adap, p); t3_set_vlan_accel(adap, 3, 0); if (is_offload(adap)) { tp_set_timers(adap, adap->params.vpd.cclk * 1000); t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE); busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE, 0, 1000, 5); if (busy) CH_ERR(adap, "TP initialization timed out\n"); } if (!busy) t3_write_reg(adap, A_TP_RESET, F_TPRESET); return busy; } /** * t3_mps_set_active_ports - configure port failover * @adap: the adapter * @port_mask: bitmap of active ports * * Sets the active ports according to the supplied bitmap. */ int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask) { if (port_mask & ~((1 << adap->params.nports) - 1)) return -EINVAL; t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE, port_mask << S_PORT0ACTIVE); return 0; } /** * chan_init_hw - channel-dependent HW initialization * @adap: the adapter * @chan_map: bitmap of Tx channels being used * * Perform the bits of HW initialization that are dependent on the Tx * channels being used. */ static void chan_init_hw(adapter_t *adap, unsigned int chan_map) { int i; if (chan_map != 3) { /* one channel */ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : F_TPTXPORT1EN | F_PORT1ACTIVE)); t3_write_reg(adap, A_PM1_TX_CFG, chan_map == 1 ? 0xffffffff : 0); if (chan_map == 2) t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, V_TX_MOD_QUEUE_REQ_MAP(0xff)); t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8); t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea); } else { /* two channels */ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, V_D1_WEIGHT(16) | V_D0_WEIGHT(16)); t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN | F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE | F_ENFORCEPKT); t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000); t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE); t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, V_TX_MOD_QUEUE_REQ_MAP(0xaa)); for (i = 0; i < 16; i++) t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (i << 16) | 0x1010); t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98); t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc); } } static int calibrate_xgm(adapter_t *adapter) { if (uses_xaui(adapter)) { unsigned int v, i; for (i = 0; i < 5; ++i) { t3_write_reg(adapter, A_XGM_XAUI_IMP, 0); (void) t3_read_reg(adapter, A_XGM_XAUI_IMP); msleep(1); v = t3_read_reg(adapter, A_XGM_XAUI_IMP); if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) { t3_write_reg(adapter, A_XGM_XAUI_IMP, V_XAUIIMP(G_CALIMP(v) >> 2)); return 0; } } CH_ERR(adapter, "MAC calibration failed\n"); return -1; } else { t3_write_reg(adapter, A_XGM_RGMII_IMP, V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, F_XGM_IMPSETUPDATE); } return 0; } static void calibrate_xgm_t3b(adapter_t *adapter) { if (!uses_xaui(adapter)) { t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET | F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_XGM_IMPSETUPDATE); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, 0); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0); t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE); } } struct mc7_timing_params { unsigned char ActToPreDly; unsigned char ActToRdWrDly; unsigned char PreCyc; unsigned char RefCyc[5]; unsigned char BkCyc; unsigned char WrToRdDly; unsigned char RdToWrDly; }; /* * Write a value to a register and check that the write completed. These * writes normally complete in a cycle or two, so one read should suffice. * The very first read exists to flush the posted write to the device. */ static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val) { t3_write_reg(adapter, addr, val); (void) t3_read_reg(adapter, addr); /* flush */ if (!(t3_read_reg(adapter, addr) & F_BUSY)) return 0; CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr); return -EIO; } static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type) { static const unsigned int mc7_mode[] = { 0x632, 0x642, 0x652, 0x432, 0x442 }; static const struct mc7_timing_params mc7_timings[] = { { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 }, { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 }, { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 }, { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 }, { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 } }; u32 val; unsigned int width, density, slow, attempts; adapter_t *adapter = mc7->adapter; const struct mc7_timing_params *p = &mc7_timings[mem_type]; if (!mc7->size) return 0; val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); slow = val & F_SLOW; width = G_WIDTH(val); density = G_DEN(val); t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN); val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ msleep(1); if (!slow) { t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN); (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL); msleep(1); if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) & (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) { CH_ERR(adapter, "%s MC7 calibration timed out\n", mc7->name); goto out_fail; } } t3_write_reg(adapter, mc7->offset + A_MC7_PARM, V_ACTTOPREDLY(p->ActToPreDly) | V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) | V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) | V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly)); t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_CLKEN | F_TERM150); (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ if (!slow) t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB, F_DLLENB); udelay(1); val = slow ? 3 : 6; if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) goto out_fail; if (!slow) { t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100); t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0); udelay(5); } if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || wrreg_wait(adapter, mc7->offset + A_MC7_MODE, mc7_mode[mem_type]) || wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) || wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) goto out_fail; /* clock value is in KHz */ mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */ mc7_clock /= 1000000; /* KHz->MHz, ns->us */ t3_write_reg(adapter, mc7->offset + A_MC7_REF, F_PERREFEN | V_PREREFDIV(mc7_clock)); (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */ t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN); t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0); t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0); t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END, (mc7->size << width) - 1); t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1)); (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */ attempts = 50; do { msleep(250); val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); } while ((val & F_BUSY) && --attempts); if (val & F_BUSY) { CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name); goto out_fail; } /* Enable normal memory accesses. */ t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY); return 0; out_fail: return -1; } static void config_pcie(adapter_t *adap) { static const u16 ack_lat[4][6] = { { 237, 416, 559, 1071, 2095, 4143 }, { 128, 217, 289, 545, 1057, 2081 }, { 73, 118, 154, 282, 538, 1050 }, { 67, 107, 86, 150, 278, 534 } }; static const u16 rpl_tmr[4][6] = { { 711, 1248, 1677, 3213, 6285, 12429 }, { 384, 651, 867, 1635, 3171, 6243 }, { 219, 354, 462, 846, 1614, 3150 }, { 201, 321, 258, 450, 834, 1602 } }; u16 val, devid; unsigned int log2_width, pldsize; unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; t3_os_pci_read_config_2(adap, adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL, &val); pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; /* * Gen2 adapter pcie bridge compatibility requires minimum * Max_Read_Request_size */ t3_os_pci_read_config_2(adap, 0x2, &devid); if (devid == 0x37) { t3_os_pci_write_config_2(adap, adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL, val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD); pldsize = 0; } t3_os_pci_read_config_2(adap, adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL, &val); fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE)); log2_width = fls(adap->params.pci.width) - 1; acklat = ack_lat[log2_width][pldsize]; if (val & 1) /* check LOsEnable */ acklat += fst_trn_tx * 4; rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4; if (adap->params.rev == 0) t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_T3A_ACKLAT(M_T3A_ACKLAT), V_T3A_ACKLAT(acklat)); else t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT), V_ACKLAT(acklat)); t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT), V_REPLAYLMT(rpllmt)); t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); t3_set_reg_field(adap, A_PCIE_CFG, 0, F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); } /** * t3_init_hw - initialize and configure T3 HW modules * @adapter: the adapter * @fw_params: initial parameters to pass to firmware (optional) * * Initialize and configure T3 HW modules. This performs the * initialization steps that need to be done once after a card is reset. * MAC and PHY initialization is handled separarely whenever a port is * enabled. * * @fw_params are passed to FW and their value is platform dependent. * Only the top 8 bits are available for use, the rest must be 0. */ int t3_init_hw(adapter_t *adapter, u32 fw_params) { int err = -EIO, attempts, i; const struct vpd_params *vpd = &adapter->params.vpd; if (adapter->params.rev > 0) calibrate_xgm_t3b(adapter); else if (calibrate_xgm(adapter)) goto out_err; if (adapter->params.nports > 2) t3_mac_init(&adap2pinfo(adapter, 0)->mac); if (vpd->mclk) { partition_mem(adapter, &adapter->params.tp); if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) || mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) || mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) || t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers, adapter->params.mc5.nfilters, adapter->params.mc5.nroutes)) goto out_err; for (i = 0; i < 32; i++) if (clear_sge_ctxt(adapter, i, F_CQ)) goto out_err; } if (tp_init(adapter, &adapter->params.tp)) goto out_err; t3_tp_set_coalescing_size(adapter, min(adapter->params.sge.max_pkt_size, MAX_RX_COALESCING_LEN), 1); t3_tp_set_max_rxsize(adapter, min(adapter->params.sge.max_pkt_size, 16384U)); ulp_config(adapter, &adapter->params.tp); if (is_pcie(adapter)) config_pcie(adapter); else t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_DMASTOPEN | F_CLIDECEN); if (adapter->params.rev == T3_REV_C) t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, F_CFG_CQE_SOP_MASK); t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); t3_write_reg(adapter, A_PM1_RX_MODE, 0); t3_write_reg(adapter, A_PM1_TX_MODE, 0); chan_init_hw(adapter, adapter->params.chan_map); t3_sge_init(adapter, &adapter->params.sge); t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN); t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params); t3_write_reg(adapter, A_CIM_BOOT_CFG, V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ attempts = 100; do { /* wait for uP to initialize */ msleep(20); } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); if (!attempts) { CH_ERR(adapter, "uP initialization timed out\n"); goto out_err; } err = 0; out_err: return err; } /** * get_pci_mode - determine a card's PCI mode * @adapter: the adapter * @p: where to store the PCI settings * * Determines a card's PCI mode and associated parameters, such as speed * and width. */ static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p) { static unsigned short speed_map[] = { 33, 66, 100, 133 }; u32 pci_mode, pcie_cap; pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); if (pcie_cap) { u16 val; p->variant = PCI_VARIANT_PCIE; p->pcie_cap_addr = pcie_cap; t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); p->width = (val >> 4) & 0x3f; return; } pci_mode = t3_read_reg(adapter, A_PCIX_MODE); p->speed = speed_map[G_PCLKRANGE(pci_mode)]; p->width = (pci_mode & F_64BIT) ? 64 : 32; pci_mode = G_PCIXINITPAT(pci_mode); if (pci_mode == 0) p->variant = PCI_VARIANT_PCI; else if (pci_mode < 4) p->variant = PCI_VARIANT_PCIX_MODE1_PARITY; else if (pci_mode < 8) p->variant = PCI_VARIANT_PCIX_MODE1_ECC; else p->variant = PCI_VARIANT_PCIX_266_MODE2; } /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state * @caps: link capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/duplex/flow-control/autonegotiation * settings. */ static void __devinit init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; lc->requested_speed = lc->speed = SPEED_INVALID; lc->requested_duplex = lc->duplex = DUPLEX_INVALID; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & SUPPORTED_Autoneg) { lc->advertising = lc->supported; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } /** * mc7_calc_size - calculate MC7 memory size * @cfg: the MC7 configuration * * Calculates the size of an MC7 memory in bytes from the value of its * configuration register. */ static unsigned int __devinit mc7_calc_size(u32 cfg) { unsigned int width = G_WIDTH(cfg); unsigned int banks = !!(cfg & F_BKS) + 1; unsigned int org = !!(cfg & F_ORG) + 1; unsigned int density = G_DEN(cfg); unsigned int MBs = ((256 << density) * banks) / (org << width); return MBs << 20; } static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7, unsigned int base_addr, const char *name) { u32 cfg; mc7->adapter = adapter; mc7->name = name; mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); mc7->width = G_WIDTH(cfg); } void mac_prep(struct cmac *mac, adapter_t *adapter, int index) { u16 devid; mac->adapter = adapter; mac->multiport = adapter->params.nports > 2; if (mac->multiport) { mac->ext_port = (unsigned char)index; mac->nucast = 8; } else mac->nucast = 1; /* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC is connected to each port, its suppose to be using xgmac0 for both ports */ t3_os_pci_read_config_2(adapter, 0x2, &devid); if (mac->multiport || (!adapter->params.vpd.xauicfg[1] && (devid==0x37))) index = 0; mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index; if (adapter->params.rev == 0 && uses_xaui(adapter)) { t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset, is_10G(adapter) ? 0x2901c04 : 0x2301c04); t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset, F_ENRGMII, 0); } } /** * early_hw_init - HW initialization done at card detection time * @adapter: the adapter * @ai: contains information about the adapter type and properties * * Perfoms the part of HW initialization that is done early on when the * driver first detecs the card. Most of the HW state is initialized * lazily later on when a port or an offload function are first used. */ void early_hw_init(adapter_t *adapter, const struct adapter_info *ai) { u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ? 3 : 2); u32 gpio_out = ai->gpio_out; mi1_init(adapter, ai); t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */ V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); t3_write_reg(adapter, A_T3DBG_GPIO_EN, gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); if (adapter->params.rev == 0 || !uses_xaui(adapter)) val |= F_ENRGMII; /* Enable MAC clocks so we can access the registers */ t3_write_reg(adapter, A_XGM_PORT_CFG, val); (void) t3_read_reg(adapter, A_XGM_PORT_CFG); val |= F_CLKDIVRESET_; t3_write_reg(adapter, A_XGM_PORT_CFG, val); (void) t3_read_reg(adapter, A_XGM_PORT_CFG); t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val); (void) t3_read_reg(adapter, A_XGM_PORT_CFG); } /** * t3_reset_adapter - reset the adapter * @adapter: the adapter * * Reset the adapter. */ int t3_reset_adapter(adapter_t *adapter) { int i, save_and_restore_pcie = adapter->params.rev < T3_REV_B2 && is_pcie(adapter); uint16_t devid = 0; if (save_and_restore_pcie) t3_os_pci_save_state(adapter); t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); /* * Delay. Give Some time to device to reset fully. * XXX The delay time should be modified. */ for (i = 0; i < 10; i++) { msleep(50); t3_os_pci_read_config_2(adapter, 0x00, &devid); if (devid == 0x1425) break; } if (devid != 0x1425) return -1; if (save_and_restore_pcie) t3_os_pci_restore_state(adapter); return 0; } static int init_parity(adapter_t *adap) { int i, err, addr; if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) return -EBUSY; for (err = i = 0; !err && i < 16; i++) err = clear_sge_ctxt(adap, i, F_EGRESS); for (i = 0xfff0; !err && i <= 0xffff; i++) err = clear_sge_ctxt(adap, i, F_EGRESS); for (i = 0; !err && i < SGE_QSETS; i++) err = clear_sge_ctxt(adap, i, F_RESPONSEQ); if (err) return err; t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); for (i = 0; i < 4; i++) for (addr = 0; addr <= M_IBQDBGADDR; addr++) { t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | F_IBQDBGWR | V_IBQDBGQID(i) | V_IBQDBGADDR(addr)); err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 2, 1); if (err) return err; } return 0; } /** * t3_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @ai: contains information about the adapter type and properties * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int __devinit t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset) { int ret; unsigned int i, j = 0; get_pci_mode(adapter, &adapter->params.pci); adapter->params.info = ai; adapter->params.nports = ai->nports0 + ai->nports1; adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); adapter->params.rev = t3_read_reg(adapter, A_PL_REV); /* * We used to only run the "adapter check task" once a second if * we had PHYs which didn't support interrupts (we would check * their link status once a second). Now we check other conditions * in that routine which would [potentially] impose a very high * interrupt load on the system. As such, we now always scan the * adapter state once a second ... */ adapter->params.linkpoll_period = 10; if (adapter->params.nports > 2) adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS; else adapter->params.stats_update_period = is_10G(adapter) ? MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; if (reset && t3_reset_adapter(adapter)) return -1; if (adapter->params.vpd.mclk) { struct tp_params *p = &adapter->params.tp; mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX"); mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); p->nchan = adapter->params.chan_map == 3 ? 2 : 1; p->pmrx_size = t3_mc7_size(&adapter->pmrx); p->pmtx_size = t3_mc7_size(&adapter->pmtx); p->cm_size = t3_mc7_size(&adapter->cm); p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */ p->chan_tx_size = p->pmtx_size / p->nchan; p->rx_pg_size = 64 * 1024; p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024; p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size); p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); p->ntimer_qs = p->cm_size >= (128 << 20) || adapter->params.rev > 0 ? 12 : 6; p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) - 1; p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */ } adapter->params.offload = t3_mc7_size(&adapter->pmrx) && t3_mc7_size(&adapter->pmtx) && t3_mc7_size(&adapter->cm); t3_sge_prep(adapter, &adapter->params.sge); if (is_offload(adapter)) { adapter->params.mc5.nservers = DEFAULT_NSERVERS; /* PR 6487. TOE and filtering are mutually exclusive */ adapter->params.mc5.nfilters = 0; adapter->params.mc5.nroutes = 0; t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT); init_mtus(adapter->params.mtus); init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); } early_hw_init(adapter, ai); ret = init_parity(adapter); if (ret) return ret; if (adapter->params.nports > 2 && (ret = t3_vsc7323_init(adapter, adapter->params.nports))) return ret; for_each_port(adapter, i) { u8 hw_addr[6]; const struct port_type_info *pti; struct port_info *p = adap2pinfo(adapter, i); for (;;) { unsigned port_type = adapter->params.vpd.port_type[j]; if (port_type) { if (port_type < ARRAY_SIZE(port_types)) { pti = &port_types[port_type]; break; } else return -EINVAL; } j++; if (j >= ARRAY_SIZE(adapter->params.vpd.port_type)) return -EINVAL; } ret = pti->phy_prep(p, ai->phy_base_addr + j, ai->mdio_ops); if (ret) return ret; mac_prep(&p->mac, adapter, j); ++j; /* * The VPD EEPROM stores the base Ethernet address for the * card. A port's address is derived from the base by adding * the port's index to the base's low octet. */ memcpy(hw_addr, adapter->params.vpd.eth_base, 5); hw_addr[5] = adapter->params.vpd.eth_base[5] + i; t3_os_set_hw_addr(adapter, i, hw_addr); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1); /* * If the PHY doesn't support interrupts for link status * changes, schedule a scan of the adapter links at least * once a second. */ if (!(p->phy.caps & SUPPORTED_IRQ) && adapter->params.linkpoll_period > 10) adapter->params.linkpoll_period = 10; } return 0; } /** * t3_reinit_adapter - prepare HW for operation again * @adapter: the adapter * * Put HW in the same state as @t3_prep_adapter without any changes to * SW state. This is a cut down version of @t3_prep_adapter intended * to be used after events that wipe out HW state but preserve SW state, * e.g., EEH. The device must be reset before calling this. */ int t3_reinit_adapter(adapter_t *adap) { unsigned int i; int ret, j = 0; early_hw_init(adap, adap->params.info); ret = init_parity(adap); if (ret) return ret; if (adap->params.nports > 2 && (ret = t3_vsc7323_init(adap, adap->params.nports))) return ret; for_each_port(adap, i) { const struct port_type_info *pti; struct port_info *p = adap2pinfo(adap, i); for (;;) { unsigned port_type = adap->params.vpd.port_type[j]; if (port_type) { if (port_type < ARRAY_SIZE(port_types)) { pti = &port_types[port_type]; break; } else return -EINVAL; } j++; if (j >= ARRAY_SIZE(adap->params.vpd.port_type)) return -EINVAL; } ret = pti->phy_prep(p, p->phy.addr, NULL); if (ret) return ret; p->phy.ops->power_down(&p->phy, 1); } return 0; } void t3_led_ready(adapter_t *adapter) { t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, F_GPIO0_OUT_VAL); } void t3_port_failover(adapter_t *adapter, int port) { u32 val; val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE; t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, val); } void t3_failover_done(adapter_t *adapter, int port) { t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, F_PORT0ACTIVE | F_PORT1ACTIVE); } void t3_failover_clear(adapter_t *adapter) { t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE, F_PORT0ACTIVE | F_PORT1ACTIVE); } static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val) { u32 v; t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr); if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 10, 10, &v)) return -EIO; *val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA); return 0; } static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val) { u32 v; t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val); addr |= F_HOSTWRITE; t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr); if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 0, 10, 5, &v)) return -EIO; return 0; } int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index, u32 *size, void *data) { u32 v, *buf = data; int i, cnt, ret; if (*size < LA_ENTRIES * 4) return -EINVAL; ret = t3_cim_hac_read(adapter, LA_CTRL, &v); if (ret) goto out; *stopped = !(v & 1); /* Freeze LA */ if (!*stopped) { ret = t3_cim_hac_write(adapter, LA_CTRL, 0); if (ret) goto out; } for (i = 0; i < LA_ENTRIES; i++) { v = (i << 2) | (1 << 1); ret = t3_cim_hac_write(adapter, LA_CTRL, v); if (ret) goto out; ret = t3_cim_hac_read(adapter, LA_CTRL, &v); if (ret) goto out; cnt = 20; while ((v & (1 << 1)) && cnt) { udelay(5); --cnt; ret = t3_cim_hac_read(adapter, LA_CTRL, &v); if (ret) goto out; } if (v & (1 << 1)) return -EIO; ret = t3_cim_hac_read(adapter, LA_DATA, &v); if (ret) goto out; *buf++ = v; } ret = t3_cim_hac_read(adapter, LA_CTRL, &v); if (ret) goto out; *index = (v >> 16) + 4; *size = LA_ENTRIES * 4; out: /* Unfreeze LA */ t3_cim_hac_write(adapter, LA_CTRL, 1); return ret; } int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data) { u32 v, *buf = data; int i, j, ret; if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry)) return -EINVAL; for (i = 0; i < 4; i++) { ret = t3_cim_hac_read(adapter, (4 * i), &v); if (ret) goto out; *buf++ = v; } for (i = 0; i < IOQ_ENTRIES; i++) { u32 base_addr = 0x10 * (i + 1); for (j = 0; j < 4; j++) { ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v); if (ret) goto out; *buf++ = v; } } *size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry); out: return ret; } Index: stable/9/sys/dev/cxgb/common/cxgb_tn1010.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_tn1010.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_tn1010.c (revision 277344) @@ -1,221 +1,222 @@ /************************************************************************** Copyright (c) 2008, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #undef msleep #define msleep t3_os_sleep /* TN1010 PHY specific registers. */ enum { TN1010_VEND1_STAT = 1, }; /* IEEE auto-negotiation 10GBASE-T registers */ enum { ANEG_ADVER = 16, ANEG_LPA = 19, ANEG_10G_CTRL = 32, ANEG_10G_STAT = 33 }; #define ADVERTISE_ENPAGE (1 << 12) #define ADVERTISE_10000FULL (1 << 12) #define ADVERTISE_LOOP_TIMING (1 << 0) /* vendor specific status register fields */ #define F_XS_LANE_ALIGN_STAT (1 << 0) #define F_PCS_BLK_LOCK (1 << 1) #define F_PMD_SIGNAL_OK (1 << 2) #define F_LINK_STAT (1 << 3) #define F_ANEG_SPEED_1G (1 << 4) #define F_ANEG_MASTER (1 << 5) #define S_ANEG_STAT 6 #define M_ANEG_STAT 0x3 #define G_ANEG_STAT(x) (((x) >> S_ANEG_STAT) & M_ANEG_STAT) enum { /* autonegotiation status */ ANEG_IN_PROGR = 0, ANEG_COMPLETE = 1, ANEG_FAILED = 3 }; /* * Reset the PHY. May take up to 500ms to complete. */ static int tn1010_reset(struct cphy *phy, int wait) { int err = t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait); msleep(500); return err; } static int tn1010_power_down(struct cphy *phy, int enable) { return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR, BMCR_PDOWN, enable ? BMCR_PDOWN : 0); } static int tn1010_autoneg_enable(struct cphy *phy) { int err; err = tn1010_power_down(phy, 0); if (!err) err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, MII_BMCR, 0, BMCR_ANENABLE | BMCR_ANRESTART); return err; } static int tn1010_autoneg_restart(struct cphy *phy) { int err; err = tn1010_power_down(phy, 0); if (!err) err = t3_mdio_change_bits(phy, MDIO_DEV_ANEG, MII_BMCR, 0, BMCR_ANRESTART); return err; } static int tn1010_advertise(struct cphy *phy, unsigned int advert) { int err, val; if (!(advert & ADVERTISED_1000baseT_Full)) return -EINVAL; /* PHY can't disable 1000BASE-T */ val = ADVERTISE_CSMA | ADVERTISE_ENPAGE | ADVERTISE_NPAGE; if (advert & ADVERTISED_Pause) val |= ADVERTISE_PAUSE_CAP; if (advert & ADVERTISED_Asym_Pause) val |= ADVERTISE_PAUSE_ASYM; err = mdio_write(phy, MDIO_DEV_ANEG, ANEG_ADVER, val); if (err) return err; val = (advert & ADVERTISED_10000baseT_Full) ? ADVERTISE_10000FULL : 0; return mdio_write(phy, MDIO_DEV_ANEG, ANEG_10G_CTRL, val | ADVERTISE_LOOP_TIMING); } -static int tn1010_get_link_status(struct cphy *phy, int *link_ok, +static int tn1010_get_link_status(struct cphy *phy, int *link_state, int *speed, int *duplex, int *fc) { unsigned int status, lpa, adv; int err, sp = -1, pause = 0; err = mdio_read(phy, MDIO_DEV_VEND1, TN1010_VEND1_STAT, &status); if (err) return err; - if (link_ok) - *link_ok = (status & F_LINK_STAT) != 0; + if (link_state) + *link_state = status & F_LINK_STAT ? PHY_LINK_UP : + PHY_LINK_DOWN; if (G_ANEG_STAT(status) == ANEG_COMPLETE) { sp = (status & F_ANEG_SPEED_1G) ? SPEED_1000 : SPEED_10000; if (fc) { err = mdio_read(phy, MDIO_DEV_ANEG, ANEG_LPA, &lpa); if (!err) err = mdio_read(phy, MDIO_DEV_ANEG, ANEG_ADVER, &adv); if (err) return err; if (lpa & adv & ADVERTISE_PAUSE_CAP) pause = PAUSE_RX | PAUSE_TX; else if ((lpa & ADVERTISE_PAUSE_CAP) && (lpa & ADVERTISE_PAUSE_ASYM) && (adv & ADVERTISE_PAUSE_ASYM)) pause = PAUSE_TX; else if ((lpa & ADVERTISE_PAUSE_ASYM) && (adv & ADVERTISE_PAUSE_CAP)) pause = PAUSE_RX; } } if (speed) *speed = sp; if (duplex) *duplex = DUPLEX_FULL; if (fc) *fc = pause; return 0; } static int tn1010_set_speed_duplex(struct cphy *phy, int speed, int duplex) { return -EINVAL; /* require autoneg */ } #ifdef C99_NOT_SUPPORTED static struct cphy_ops tn1010_ops = { tn1010_reset, t3_phy_lasi_intr_enable, t3_phy_lasi_intr_disable, t3_phy_lasi_intr_clear, t3_phy_lasi_intr_handler, tn1010_autoneg_enable, tn1010_autoneg_restart, tn1010_advertise, NULL, tn1010_set_speed_duplex, tn1010_get_link_status, tn1010_power_down, }; #else static struct cphy_ops tn1010_ops = { .reset = tn1010_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, .intr_clear = t3_phy_lasi_intr_clear, .intr_handler = t3_phy_lasi_intr_handler, .autoneg_enable = tn1010_autoneg_enable, .autoneg_restart = tn1010_autoneg_restart, .advertise = tn1010_advertise, .set_speed_duplex = tn1010_set_speed_duplex, .get_link_status = tn1010_get_link_status, .power_down = tn1010_power_down, }; #endif int t3_tn1010_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(&pinfo->phy, pinfo->adapter, pinfo, phy_addr, &tn1010_ops, mdio_ops, SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_AUI | SUPPORTED_TP, "1000/10GBASE-T"); msleep(500); /* PHY needs up to 500ms to start responding to MDIO */ return 0; } Index: stable/9/sys/dev/cxgb/common/cxgb_vsc8211.c =================================================================== --- stable/9/sys/dev/cxgb/common/cxgb_vsc8211.c (revision 277343) +++ stable/9/sys/dev/cxgb/common/cxgb_vsc8211.c (revision 277344) @@ -1,463 +1,465 @@ /************************************************************************** Copyright (c) 2007, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #undef msleep #define msleep t3_os_sleep /* VSC8211 PHY specific registers. */ enum { VSC8211_SIGDET_CTRL = 19, VSC8211_EXT_CTRL = 23, VSC8211_PHY_CTRL = 24, VSC8211_INTR_ENABLE = 25, VSC8211_INTR_STATUS = 26, VSC8211_LED_CTRL = 27, VSC8211_AUX_CTRL_STAT = 28, VSC8211_EXT_PAGE_AXS = 31, }; enum { VSC_INTR_RX_ERR = 1 << 0, VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */ VSC_INTR_CABLE = 1 << 2, /* cable impairment */ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */ VSC_INTR_LINK_CHG = 1 << 13, /* link change */ VSC_INTR_SPD_CHG = 1 << 14, /* speed change */ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ }; enum { VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */ VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */ }; #define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \ VSC_INTR_NEG_DONE) #define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ VSC_INTR_ENABLE) /* PHY specific auxiliary control & status register fields */ #define S_ACSR_ACTIPHY_TMR 0 #define M_ACSR_ACTIPHY_TMR 0x3 #define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR) #define S_ACSR_SPEED 3 #define M_ACSR_SPEED 0x3 #define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED) #define S_ACSR_DUPLEX 5 #define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX) #define S_ACSR_ACTIPHY 6 #define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY) /* * Reset the PHY. This PHY completes reset immediately so we never wait. */ static int vsc8211_reset(struct cphy *cphy, int wait) { return t3_phy_reset(cphy, 0, 0); } static int vsc8211_intr_enable(struct cphy *cphy) { return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK); } static int vsc8211_intr_disable(struct cphy *cphy) { return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0); } static int vsc8211_intr_clear(struct cphy *cphy) { u32 val; /* Clear PHY interrupts by reading the register. */ return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val); } static int vsc8211_autoneg_enable(struct cphy *cphy) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, BMCR_ANENABLE | BMCR_ANRESTART); } static int vsc8211_autoneg_restart(struct cphy *cphy) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, BMCR_ANRESTART); } -static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok, +static int vsc8211_get_link_status(struct cphy *cphy, int *link_state, int *speed, int *duplex, int *fc) { unsigned int bmcr, status, lpa, adv; int err, sp = -1, dplx = -1, pause = 0; err = mdio_read(cphy, 0, MII_BMCR, &bmcr); if (!err) err = mdio_read(cphy, 0, MII_BMSR, &status); if (err) return err; - if (link_ok) { + if (link_state) { /* * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it * once more to get the current link state. */ if (!(status & BMSR_LSTATUS)) err = mdio_read(cphy, 0, MII_BMSR, &status); if (err) return err; - *link_ok = (status & BMSR_LSTATUS) != 0; + *link_state = status & BMSR_LSTATUS ? PHY_LINK_UP : + PHY_LINK_DOWN; } if (!(bmcr & BMCR_ANENABLE)) { dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; if (bmcr & BMCR_SPEED1000) sp = SPEED_1000; else if (bmcr & BMCR_SPEED100) sp = SPEED_100; else sp = SPEED_10; } else if (status & BMSR_ANEGCOMPLETE) { err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status); if (err) return err; dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; sp = G_ACSR_SPEED(status); if (sp == 0) sp = SPEED_10; else if (sp == 1) sp = SPEED_100; else sp = SPEED_1000; if (fc && dplx == DUPLEX_FULL) { err = mdio_read(cphy, 0, MII_LPA, &lpa); if (!err) err = mdio_read(cphy, 0, MII_ADVERTISE, &adv); if (err) return err; if (lpa & adv & ADVERTISE_PAUSE_CAP) pause = PAUSE_RX | PAUSE_TX; else if ((lpa & ADVERTISE_PAUSE_CAP) && (lpa & ADVERTISE_PAUSE_ASYM) && (adv & ADVERTISE_PAUSE_ASYM)) pause = PAUSE_TX; else if ((lpa & ADVERTISE_PAUSE_ASYM) && (adv & ADVERTISE_PAUSE_CAP)) pause = PAUSE_RX; } } if (speed) *speed = sp; if (duplex) *duplex = dplx; if (fc) *fc = pause; return 0; } -static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok, +static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_state, int *speed, int *duplex, int *fc) { unsigned int bmcr, status, lpa, adv; int err, sp = -1, dplx = -1, pause = 0; err = mdio_read(cphy, 0, MII_BMCR, &bmcr); if (!err) err = mdio_read(cphy, 0, MII_BMSR, &status); if (err) return err; - if (link_ok) { + if (link_state) { /* * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it * once more to get the current link state. */ if (!(status & BMSR_LSTATUS)) err = mdio_read(cphy, 0, MII_BMSR, &status); if (err) return err; - *link_ok = (status & BMSR_LSTATUS) != 0; + *link_state = status & BMSR_LSTATUS ? PHY_LINK_UP : + PHY_LINK_DOWN; } if (!(bmcr & BMCR_ANENABLE)) { dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; if (bmcr & BMCR_SPEED1000) sp = SPEED_1000; else if (bmcr & BMCR_SPEED100) sp = SPEED_100; else sp = SPEED_10; } else if (status & BMSR_ANEGCOMPLETE) { err = mdio_read(cphy, 0, MII_LPA, &lpa); if (!err) err = mdio_read(cphy, 0, MII_ADVERTISE, &adv); if (err) return err; if (adv & lpa & ADVERTISE_1000XFULL) { dplx = DUPLEX_FULL; sp = SPEED_1000; } else if (adv & lpa & ADVERTISE_1000XHALF) { dplx = DUPLEX_HALF; sp = SPEED_1000; } if (fc && dplx == DUPLEX_FULL) { if (lpa & adv & ADVERTISE_1000XPAUSE) pause = PAUSE_RX | PAUSE_TX; else if ((lpa & ADVERTISE_1000XPAUSE) && (adv & lpa & ADVERTISE_1000XPSE_ASYM)) pause = PAUSE_TX; else if ((lpa & ADVERTISE_1000XPSE_ASYM) && (adv & ADVERTISE_1000XPAUSE)) pause = PAUSE_RX; } } if (speed) *speed = sp; if (duplex) *duplex = dplx; if (fc) *fc = pause; return 0; } /* * Enable/disable auto MDI/MDI-X in forced link speed mode. */ static int vsc8211_set_automdi(struct cphy *phy, int enable) { int err; if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0x52b5)) != 0 || (err = mdio_write(phy, 0, 18, 0x12)) != 0 || (err = mdio_write(phy, 0, 17, enable ? 0x2803 : 0x3003)) != 0 || (err = mdio_write(phy, 0, 16, 0x87fa)) != 0 || (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0) return err; return 0; } static int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex) { int err; err = t3_set_phy_speed_duplex(phy, speed, duplex); if (!err) err = vsc8211_set_automdi(phy, 1); return err; } static int vsc8211_power_down(struct cphy *cphy, int enable) { return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, enable ? BMCR_PDOWN : 0); } static int vsc8211_intr_handler(struct cphy *cphy) { unsigned int cause; int err, cphy_cause = 0; err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause); if (err) return err; cause &= INTR_MASK; if (cause & CFG_CHG_INTR_MASK) cphy_cause |= cphy_cause_link_change; if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO)) cphy_cause |= cphy_cause_fifo_error; return cphy_cause; } #ifdef C99_NOT_SUPPORTED static struct cphy_ops vsc8211_ops = { vsc8211_reset, vsc8211_intr_enable, vsc8211_intr_disable, vsc8211_intr_clear, vsc8211_intr_handler, vsc8211_autoneg_enable, vsc8211_autoneg_restart, t3_phy_advertise, NULL, vsc8211_set_speed_duplex, vsc8211_get_link_status, vsc8211_power_down, }; static struct cphy_ops vsc8211_fiber_ops = { vsc8211_reset, vsc8211_intr_enable, vsc8211_intr_disable, vsc8211_intr_clear, vsc8211_intr_handler, vsc8211_autoneg_enable, vsc8211_autoneg_restart, t3_phy_advertise_fiber, NULL, t3_set_phy_speed_duplex, vsc8211_get_link_status_fiber, vsc8211_power_down, }; #else static struct cphy_ops vsc8211_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, .intr_clear = vsc8211_intr_clear, .intr_handler = vsc8211_intr_handler, .autoneg_enable = vsc8211_autoneg_enable, .autoneg_restart = vsc8211_autoneg_restart, .advertise = t3_phy_advertise, .set_speed_duplex = vsc8211_set_speed_duplex, .get_link_status = vsc8211_get_link_status, .power_down = vsc8211_power_down, }; static struct cphy_ops vsc8211_fiber_ops = { .reset = vsc8211_reset, .intr_enable = vsc8211_intr_enable, .intr_disable = vsc8211_intr_disable, .intr_clear = vsc8211_intr_clear, .intr_handler = vsc8211_intr_handler, .autoneg_enable = vsc8211_autoneg_enable, .autoneg_restart = vsc8211_autoneg_restart, .advertise = t3_phy_advertise_fiber, .set_speed_duplex = t3_set_phy_speed_duplex, .get_link_status = vsc8211_get_link_status_fiber, .power_down = vsc8211_power_down, }; #endif #define VSC8211_PHY_CTRL 24 #define S_VSC8211_TXFIFODEPTH 7 #define M_VSC8211_TXFIFODEPTH 0x7 #define V_VSC8211_TXFIFODEPTH(x) ((x) << S_VSC8211_TXFIFODEPTH) #define G_VSC8211_TXFIFODEPTH(x) (((x) >> S_VSC8211_TXFIFODEPTH) & M_VSC8211_TXFIFODEPTH) #define S_VSC8211_RXFIFODEPTH 4 #define M_VSC8211_RXFIFODEPTH 0x7 #define V_VSC8211_RXFIFODEPTH(x) ((x) << S_VSC8211_RXFIFODEPTH) #define G_VSC8211_RXFIFODEPTH(x) (((x) >> S_VSC8211_RXFIFODEPTH) & M_VSC8211_RXFIFODEPTH) int t3_vsc8211_fifo_depth(adapter_t *adap, unsigned int mtu, int port) { /* TX FIFO Depth set bits 9:7 to 100 (IEEE mode) */ unsigned int val = 4; unsigned int currentregval; unsigned int regval; int err; /* Retrieve the port info structure from adater_t */ struct port_info *portinfo = adap2pinfo(adap, port); /* What phy is this */ struct cphy *phy = &portinfo->phy; /* Read the current value of the PHY control Register */ err = mdio_read(phy, 0, VSC8211_PHY_CTRL, ¤tregval); if (err) return err; /* IEEE mode supports up to 1518 bytes */ /* mtu does not contain the header + FCS (18 bytes) */ if (mtu > 1500) /* * If using a packet size > 1500 set TX FIFO Depth bits * 9:7 to 011 (Jumbo packet mode) */ val = 3; regval = V_VSC8211_TXFIFODEPTH(val) | V_VSC8211_RXFIFODEPTH(val) | (currentregval & ~V_VSC8211_TXFIFODEPTH(M_VSC8211_TXFIFODEPTH) & ~V_VSC8211_RXFIFODEPTH(M_VSC8211_RXFIFODEPTH)); return mdio_write(phy, 0, VSC8211_PHY_CTRL, regval); } int t3_vsc8211_phy_prep(pinfo_t *pinfo, int phy_addr, const struct mdio_ops *mdio_ops) { struct cphy *phy = &pinfo->phy; int err; unsigned int val; cphy_init(&pinfo->phy, pinfo->adapter, pinfo, phy_addr, &vsc8211_ops, mdio_ops, SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); msleep(20); /* PHY needs ~10ms to start responding to MDIO */ err = mdio_read(phy, 0, VSC8211_EXT_CTRL, &val); if (err) return err; if (val & VSC_CTRL_MEDIA_MODE_HI) { /* copper interface, just need to configure the LEDs */ return mdio_write(phy, 0, VSC8211_LED_CTRL, 0x100); } phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ; phy->desc = "1000BASE-X"; phy->ops = &vsc8211_fiber_ops; if ((err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 1)) != 0 || (err = mdio_write(phy, 0, VSC8211_SIGDET_CTRL, 1)) != 0 || (err = mdio_write(phy, 0, VSC8211_EXT_PAGE_AXS, 0)) != 0 || (err = mdio_write(phy, 0, VSC8211_EXT_CTRL, val | VSC_CTRL_CLAUSE37_VIEW)) != 0 || (err = vsc8211_reset(phy, 0)) != 0) return err; udelay(5); /* delay after reset before next SMI */ return 0; } Index: stable/9/sys/dev/cxgb/cxgb_main.c =================================================================== --- stable/9/sys/dev/cxgb/cxgb_main.c (revision 277343) +++ stable/9/sys/dev/cxgb/cxgb_main.c (revision 277344) @@ -1,3529 +1,3530 @@ /************************************************************************** Copyright (c) 2007-2009, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PRIV_SUPPORTED #include #endif static int cxgb_setup_interrupts(adapter_t *); static void cxgb_teardown_interrupts(adapter_t *); static void cxgb_init(void *); static int cxgb_init_locked(struct port_info *); static int cxgb_uninit_locked(struct port_info *); static int cxgb_uninit_synchronized(struct port_info *); static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t); static int cxgb_media_change(struct ifnet *); static int cxgb_ifm_type(int); static void cxgb_build_medialist(struct port_info *); static void cxgb_media_status(struct ifnet *, struct ifmediareq *); static int setup_sge_qsets(adapter_t *); static void cxgb_async_intr(void *); static void cxgb_tick_handler(void *, int); static void cxgb_tick(void *); static void link_check_callout(void *); static void check_link_status(void *, int); static void setup_rss(adapter_t *sc); static int alloc_filters(struct adapter *); static int setup_hw_filters(struct adapter *); static int set_filter(struct adapter *, int, const struct filter_info *); static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int, unsigned int, u64, u64); static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int, unsigned int, u64, u64); #ifdef TCP_OFFLOAD static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *); #endif /* Attachment glue for the PCI controller end of the device. Each port of * the device is attached separately, as defined later. */ static int cxgb_controller_probe(device_t); static int cxgb_controller_attach(device_t); static int cxgb_controller_detach(device_t); static void cxgb_free(struct adapter *); static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, unsigned int end); static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf); static int cxgb_get_regs_len(void); static void touch_bars(device_t dev); static void cxgb_update_mac_settings(struct port_info *p); #ifdef TCP_OFFLOAD static int toe_capability(struct port_info *, int); #endif static device_method_t cxgb_controller_methods[] = { DEVMETHOD(device_probe, cxgb_controller_probe), DEVMETHOD(device_attach, cxgb_controller_attach), DEVMETHOD(device_detach, cxgb_controller_detach), DEVMETHOD_END }; static driver_t cxgb_controller_driver = { "cxgbc", cxgb_controller_methods, sizeof(struct adapter) }; static int cxgbc_mod_event(module_t, int, void *); static devclass_t cxgb_controller_devclass; DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, cxgbc_mod_event, 0); MODULE_VERSION(cxgbc, 1); MODULE_DEPEND(cxgbc, firmware, 1, 1, 1); /* * Attachment glue for the ports. Attachment is done directly to the * controller device. */ static int cxgb_port_probe(device_t); static int cxgb_port_attach(device_t); static int cxgb_port_detach(device_t); static device_method_t cxgb_port_methods[] = { DEVMETHOD(device_probe, cxgb_port_probe), DEVMETHOD(device_attach, cxgb_port_attach), DEVMETHOD(device_detach, cxgb_port_detach), { 0, 0 } }; static driver_t cxgb_port_driver = { "cxgb", cxgb_port_methods, 0 }; static d_ioctl_t cxgb_extension_ioctl; static d_open_t cxgb_extension_open; static d_close_t cxgb_extension_close; static struct cdevsw cxgb_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = cxgb_extension_open, .d_close = cxgb_extension_close, .d_ioctl = cxgb_extension_ioctl, .d_name = "cxgb", }; static devclass_t cxgb_port_devclass; DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0); MODULE_VERSION(cxgb, 1); static struct mtx t3_list_lock; static SLIST_HEAD(, adapter) t3_list; #ifdef TCP_OFFLOAD static struct mtx t3_uld_list_lock; static SLIST_HEAD(, uld_info) t3_uld_list; #endif /* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X, MSI, legacy pin interrupts. This parameter determines which * of these schemes the driver may consider as follows: * * msi = 2: choose from among all three options * msi = 1 : only consider MSI and pin interrupts * msi = 0: force pin interrupts */ static int msi_allowed = 2; TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed); SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, "MSI-X, MSI, INTx selector"); /* * The driver uses an auto-queue algorithm by default. * To disable it and force a single queue-set per port, use multiq = 0 */ static int multiq = 1; TUNABLE_INT("hw.cxgb.multiq", &multiq); SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, "use min(ncpus/ports, 8) queue-sets per port"); /* * By default the driver will not update the firmware unless * it was compiled against a newer version * */ static int force_fw_update = 0; TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update); SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, "update firmware even if up to date"); int cxgb_use_16k_clusters = -1; TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters); SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); static int nfilters = -1; TUNABLE_INT("hw.cxgb.nfilters", &nfilters); SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, &nfilters, 0, "max number of entries in the filter table"); enum { MAX_TXQ_ENTRIES = 16384, MAX_CTRL_TXQ_ENTRIES = 1024, MAX_RSPQ_ENTRIES = 16384, MAX_RX_BUFFERS = 16384, MAX_RX_JUMBO_BUFFERS = 16384, MIN_TXQ_ENTRIES = 4, MIN_CTRL_TXQ_ENTRIES = 4, MIN_RSPQ_ENTRIES = 32, MIN_FL_ENTRIES = 32, MIN_FL_JUMBO_ENTRIES = 32 }; struct filter_info { u32 sip; u32 sip_mask; u32 dip; u16 sport; u16 dport; u32 vlan:12; u32 vlan_prio:3; u32 mac_hit:1; u32 mac_idx:4; u32 mac_vld:1; u32 pkt_type:2; u32 report_filter_id:1; u32 pass:1; u32 rss:1; u32 qset:3; u32 locked:1; u32 valid:1; }; enum { FILTER_NO_VLAN_PRI = 7 }; #define EEPROM_MAGIC 0x38E2F10C #define PORT_MASK ((1 << MAX_NPORTS) - 1) /* Table for probing the cards. The desc field isn't actually used */ struct cxgb_ident { uint16_t vendor; uint16_t device; int index; char *desc; } cxgb_identifiers[] = { {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, {0, 0, 0, NULL} }; static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); static __inline char t3rev2char(struct adapter *adapter) { char rev = 'z'; switch(adapter->params.rev) { case T3_REV_A: rev = 'a'; break; case T3_REV_B: case T3_REV_B2: rev = 'b'; break; case T3_REV_C: rev = 'c'; break; } return rev; } static struct cxgb_ident * cxgb_get_ident(device_t dev) { struct cxgb_ident *id; for (id = cxgb_identifiers; id->desc != NULL; id++) { if ((id->vendor == pci_get_vendor(dev)) && (id->device == pci_get_device(dev))) { return (id); } } return (NULL); } static const struct adapter_info * cxgb_get_adapter_info(device_t dev) { struct cxgb_ident *id; const struct adapter_info *ai; id = cxgb_get_ident(dev); if (id == NULL) return (NULL); ai = t3_get_adapter_info(id->index); return (ai); } static int cxgb_controller_probe(device_t dev) { const struct adapter_info *ai; char *ports, buf[80]; int nports; ai = cxgb_get_adapter_info(dev); if (ai == NULL) return (ENXIO); nports = ai->nports0 + ai->nports1; if (nports == 1) ports = "port"; else ports = "ports"; snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } #define FW_FNAME "cxgb_t3fw" #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom" #define TPSRAM_NAME "cxgb_t3%c_protocol_sram" static int upgrade_fw(adapter_t *sc) { const struct firmware *fw; int status; u32 vers; if ((fw = firmware_get(FW_FNAME)) == NULL) { device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); return (ENOENT); } else device_printf(sc->dev, "installing firmware on card\n"); status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); if (status != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", status); } else { t3_get_fw_version(sc, &vers); snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); } firmware_put(fw, FIRMWARE_UNLOAD); return (status); } /* * The cxgb_controller_attach function is responsible for the initial * bringup of the device. Its responsibilities include: * * 1. Determine if the device supports MSI or MSI-X. * 2. Allocate bus resources so that we can access the Base Address Register * 3. Create and initialize mutexes for the controller and its control * logic such as SGE and MDIO. * 4. Call hardware specific setup routine for the adapter as a whole. * 5. Allocate the BAR for doing MSI-X. * 6. Setup the line interrupt iff MSI-X is not supported. * 7. Create the driver's taskq. * 8. Start one task queue service thread. * 9. Check if the firmware and SRAM are up-to-date. They will be * auto-updated later (before FULL_INIT_DONE), if required. * 10. Create a child device for each MAC (port) * 11. Initialize T3 private state. * 12. Trigger the LED * 13. Setup offload iff supported. * 14. Reset/restart the tick callout. * 15. Attach sysctls * * NOTE: Any modification or deviation from this list MUST be reflected in * the above comment. Failure to do so will result in problems on various * error conditions including link flapping. */ static int cxgb_controller_attach(device_t dev) { device_t child; const struct adapter_info *ai; struct adapter *sc; int i, error = 0; uint32_t vers; int port_qsets = 1; int msi_needed, reg; char buf[80]; sc = device_get_softc(dev); sc->dev = dev; sc->msi_count = 0; ai = cxgb_get_adapter_info(dev); snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", device_get_unit(dev)); ADAPTER_LOCK_INIT(sc, sc->lockbuf); snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", device_get_unit(dev)); snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", device_get_unit(dev)); snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", device_get_unit(dev)); MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); mtx_lock(&t3_list_lock); SLIST_INSERT_HEAD(&t3_list, sc, link); mtx_unlock(&t3_list_lock); /* find the PCIe link width and set max read request to 4KB*/ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t lnk; lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2); sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; if (sc->link_width < 8 && (ai->caps & SUPPORTED_10000baseT_Full)) { device_printf(sc->dev, "PCIe x%d Link, expect reduced performance\n", sc->link_width); } pci_set_max_read_req(dev, 4096); } touch_bars(dev); pci_enable_busmaster(dev); /* * Allocate the registers and make them available to the driver. * The registers that we care about for NIC mode are in BAR 0 */ sc->regs_rid = PCIR_BAR(0); if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate BAR region 0\n"); error = ENXIO; goto out; } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); for (i = 0; i < MAX_NPORTS; i++) sc->port[i].adapter = sc; if (t3_prep_adapter(sc, ai, 1) < 0) { printf("prep adapter failed\n"); error = ENODEV; goto out; } sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = NULL; if (is_offload(sc) && ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE)) == NULL)) { device_printf(dev, "Cannot allocate BAR region 1\n"); error = ENXIO; goto out; } /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate * enough messages for the queue sets. If that fails, try falling * back to MSI. If that fails, then try falling back to the legacy * interrupt pin model. */ sc->msix_regs_rid = 0x20; if ((msi_allowed >= 2) && (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { if (multiq) port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; if (pci_msix_count(dev) == 0 || (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || sc->msi_count != msi_needed) { device_printf(dev, "alloc msix failed - " "msi_count=%d, msi_needed=%d, err=%d; " "will try MSI\n", sc->msi_count, msi_needed, error); sc->msi_count = 0; port_qsets = 1; pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_regs_rid, sc->msix_regs_res); sc->msix_regs_res = NULL; } else { sc->flags |= USING_MSIX; sc->cxgb_intr = cxgb_async_intr; device_printf(dev, "using MSI-X interrupts (%u vectors)\n", sc->msi_count); } } if ((msi_allowed >= 1) && (sc->msi_count == 0)) { sc->msi_count = 1; if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { device_printf(dev, "alloc msi failed - " "err=%d; will try INTx\n", error); sc->msi_count = 0; port_qsets = 1; pci_release_msi(dev); } else { sc->flags |= USING_MSI; sc->cxgb_intr = t3_intr_msi; device_printf(dev, "using MSI interrupts\n"); } } if (sc->msi_count == 0) { device_printf(dev, "using line interrupts\n"); sc->cxgb_intr = t3b_intr; } /* Create a private taskqueue thread for handling driver events */ sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq); if (sc->tq == NULL) { device_printf(dev, "failed to allocate controller task queue\n"); goto out; } taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); /* Create a periodic callout for checking adapter status */ callout_init(&sc->cxgb_tick_ch, TRUE); if (t3_check_fw_version(sc) < 0 || force_fw_update) { /* * Warn user that a firmware update will be attempted in init. */ device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); sc->flags &= ~FW_UPTODATE; } else { sc->flags |= FW_UPTODATE; } if (t3_check_tpsram_version(sc) < 0) { /* * Warn user that a firmware update will be attempted in init. */ device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); sc->flags &= ~TPS_UPTODATE; } else { sc->flags |= TPS_UPTODATE; } /* * Create a child device for each MAC. The ethernet attachment * will be done in these children. */ for (i = 0; i < (sc)->params.nports; i++) { struct port_info *pi; if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { device_printf(dev, "failed to add child port\n"); error = EINVAL; goto out; } pi = &sc->port[i]; pi->adapter = sc; pi->nqsets = port_qsets; pi->first_qset = i*port_qsets; pi->port_id = i; pi->tx_chan = i >= ai->nports0; pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; sc->rxpkt_map[pi->txpkt_intf] = i; sc->port[i].tx_chan = i >= ai->nports0; sc->portdev[i] = child; device_set_softc(child, pi); } if ((error = bus_generic_attach(dev)) != 0) goto out; /* initialize sge private state */ t3_sge_init_adapter(sc); t3_led_ready(sc); error = t3_get_fw_version(sc, &vers); if (error) goto out; snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s", ai->desc, is_offload(sc) ? "R" : "", sc->params.vpd.ec, sc->params.vpd.sn); device_set_desc_copy(dev, buf); snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); t3_add_attach_sysctls(sc); #ifdef TCP_OFFLOAD for (i = 0; i < NUM_CPL_HANDLERS; i++) sc->cpl_handler[i] = cpl_not_handled; #endif t3_intr_clear(sc); error = cxgb_setup_interrupts(sc); out: if (error) cxgb_free(sc); return (error); } /* * The cxgb_controller_detach routine is called with the device is * unloaded from the system. */ static int cxgb_controller_detach(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); cxgb_free(sc); return (0); } /* * The cxgb_free() is called by the cxgb_controller_detach() routine * to tear down the structures that were built up in * cxgb_controller_attach(), and should be the final piece of work * done when fully unloading the driver. * * * 1. Shutting down the threads started by the cxgb_controller_attach() * routine. * 2. Stopping the lower level device and all callouts (cxgb_down_locked()). * 3. Detaching all of the port devices created during the * cxgb_controller_attach() routine. * 4. Removing the device children created via cxgb_controller_attach(). * 5. Releasing PCI resources associated with the device. * 6. Turning off the offload support, iff it was turned on. * 7. Destroying the mutexes created in cxgb_controller_attach(). * */ static void cxgb_free(struct adapter *sc) { int i, nqsets = 0; ADAPTER_LOCK(sc); sc->flags |= CXGB_SHUTDOWN; ADAPTER_UNLOCK(sc); /* * Make sure all child devices are gone. */ bus_generic_detach(sc->dev); for (i = 0; i < (sc)->params.nports; i++) { if (sc->portdev[i] && device_delete_child(sc->dev, sc->portdev[i]) != 0) device_printf(sc->dev, "failed to delete child port\n"); nqsets += sc->port[i].nqsets; } /* * At this point, it is as if cxgb_port_detach has run on all ports, and * cxgb_down has run on the adapter. All interrupts have been silenced, * all open devices have been closed. */ KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", __func__, sc->open_device_map)); for (i = 0; i < sc->params.nports; i++) { KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", __func__, i)); } /* * Finish off the adapter's callouts. */ callout_drain(&sc->cxgb_tick_ch); callout_drain(&sc->sge_timer_ch); /* * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The * sysctls are cleaned up by the kernel linker. */ if (sc->flags & FULL_INIT_DONE) { t3_free_sge_resources(sc, nqsets); sc->flags &= ~FULL_INIT_DONE; } /* * Release all interrupt resources. */ cxgb_teardown_interrupts(sc); if (sc->flags & (USING_MSI | USING_MSIX)) { device_printf(sc->dev, "releasing msi message(s)\n"); pci_release_msi(sc->dev); } else { device_printf(sc->dev, "no msi message to release\n"); } if (sc->msix_regs_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, sc->msix_regs_res); } /* * Free the adapter's taskqueue. */ if (sc->tq != NULL) { taskqueue_free(sc->tq); sc->tq = NULL; } free(sc->filters, M_DEVBUF); t3_sge_free(sc); if (sc->udbs_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->regs_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); MTX_DESTROY(&sc->mdio_lock); MTX_DESTROY(&sc->sge.reg_lock); MTX_DESTROY(&sc->elmer_lock); mtx_lock(&t3_list_lock); SLIST_REMOVE(&t3_list, sc, adapter, link); mtx_unlock(&t3_list_lock); ADAPTER_LOCK_DEINIT(sc); } /** * setup_sge_qsets - configure SGE Tx/Rx/response queues * @sc: the controller softc * * Determines how many sets of SGE queues to use and initializes them. * We support multiple queue sets per port if we have MSI-X, otherwise * just one queue set per port. */ static int setup_sge_qsets(adapter_t *sc) { int i, j, err, irq_idx = 0, qset_idx = 0; u_int ntxq = SGE_TXQ_PER_SET; if ((err = t3_sge_alloc(sc)) != 0) { device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); return (err); } if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) irq_idx = -1; for (i = 0; i < (sc)->params.nports; i++) { struct port_info *pi = &sc->port[i]; for (j = 0; j < pi->nqsets; j++, qset_idx++) { err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, &sc->params.sge.qset[qset_idx], ntxq, pi); if (err) { t3_free_sge_resources(sc, qset_idx); device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n", err); return (err); } } } return (0); } static void cxgb_teardown_interrupts(adapter_t *sc) { int i; for (i = 0; i < SGE_QSETS; i++) { if (sc->msix_intr_tag[i] == NULL) { /* Should have been setup fully or not at all */ KASSERT(sc->msix_irq_res[i] == NULL && sc->msix_irq_rid[i] == 0, ("%s: half-done interrupt (%d).", __func__, i)); continue; } bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_intr_tag[i]); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], sc->msix_irq_res[i]); sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; sc->msix_irq_rid[i] = 0; } if (sc->intr_tag) { KASSERT(sc->irq_res != NULL, ("%s: half-done interrupt.", __func__)); bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = sc->intr_tag = NULL; sc->irq_rid = 0; } } static int cxgb_setup_interrupts(adapter_t *sc) { struct resource *res; void *tag; int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); sc->irq_rid = intr_flag ? 1 : 0; sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", intr_flag, sc->irq_rid); err = EINVAL; sc->irq_rid = 0; } else { err = bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, sc->cxgb_intr, sc, &sc->intr_tag); if (err) { device_printf(sc->dev, "Cannot set up interrupt (%x, %u, %d)\n", intr_flag, sc->irq_rid, err); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = sc->intr_tag = NULL; sc->irq_rid = 0; } } /* That's all for INTx or MSI */ if (!(intr_flag & USING_MSIX) || err) return (err); bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); for (i = 0; i < sc->msi_count - 1; i++) { rid = i + 2; res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { device_printf(sc->dev, "Cannot allocate interrupt " "for message %d\n", rid); err = EINVAL; break; } err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, NULL, t3_intr_msix, &sc->sge.qs[i], &tag); if (err) { device_printf(sc->dev, "Cannot set up interrupt " "for message %d (%d)\n", rid, err); bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); break; } sc->msix_irq_rid[i] = rid; sc->msix_irq_res[i] = res; sc->msix_intr_tag[i] = tag; bus_describe_intr(sc->dev, res, tag, "qs%d", i); } if (err) cxgb_teardown_interrupts(sc); return (err); } static int cxgb_port_probe(device_t dev) { struct port_info *p; char buf[80]; const char *desc; p = device_get_softc(dev); desc = p->phy.desc; snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc); device_set_desc_copy(dev, buf); return (0); } static int cxgb_makedev(struct port_info *pi) { pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); if (pi->port_cdev == NULL) return (ENOMEM); pi->port_cdev->si_drv1 = (void *)pi; return (0); } #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) #define CXGB_CAP_ENABLE (CXGB_CAP) static int cxgb_port_attach(device_t dev) { struct port_info *p; struct ifnet *ifp; int err; struct adapter *sc; p = device_get_softc(dev); sc = p->adapter; snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", device_get_unit(device_get_parent(dev)), p->port_id); PORT_LOCK_INIT(p, p->lockbuf); callout_init(&p->link_check_ch, CALLOUT_MPSAFE); TASK_INIT(&p->link_check_task, 0, check_link_status, p); /* Allocate an ifnet object and set it up */ ifp = p->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Cannot allocate ifnet\n"); return (ENOMEM); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = cxgb_init; ifp->if_softc = p; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = cxgb_ioctl; ifp->if_transmit = cxgb_transmit; ifp->if_qflush = cxgb_qflush; ifp->if_capabilities = CXGB_CAP; #ifdef TCP_OFFLOAD if (is_offload(sc)) ifp->if_capabilities |= IFCAP_TOE4; #endif ifp->if_capenable = CXGB_CAP_ENABLE; ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6; /* * Disable TSO on 4-port - it isn't supported by the firmware. */ if (sc->params.nports > 2) { ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } ether_ifattach(ifp, p->hw_addr); #ifdef DEFAULT_JUMBO if (sc->params.nports <= 2) ifp->if_mtu = ETHERMTU_JUMBO; #endif if ((err = cxgb_makedev(p)) != 0) { printf("makedev failed %d\n", err); return (err); } /* Create a list of media supported by this port */ ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, cxgb_media_status); cxgb_build_medialist(p); t3_sge_init_port(p); return (err); } /* * cxgb_port_detach() is called via the device_detach methods when * cxgb_free() calls the bus_generic_detach. It is responsible for * removing the device from the view of the kernel, i.e. from all * interfaces lists etc. This routine is only called when the driver is * being unloaded, not when the link goes down. */ static int cxgb_port_detach(device_t dev) { struct port_info *p; struct adapter *sc; int i; p = device_get_softc(dev); sc = p->adapter; /* Tell cxgb_ioctl and if_init that the port is going away */ ADAPTER_LOCK(sc); SET_DOOMED(p); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); SET_BUSY(sc); ADAPTER_UNLOCK(sc); if (p->port_cdev != NULL) destroy_dev(p->port_cdev); cxgb_uninit_synchronized(p); ether_ifdetach(p->ifp); for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { struct sge_qset *qs = &sc->sge.qs[i]; struct sge_txq *txq = &qs->txq[TXQ_ETH]; callout_drain(&txq->txq_watchdog); callout_drain(&txq->txq_timer); } PORT_LOCK_DEINIT(p); if_free(p->ifp); p->ifp = NULL; ADAPTER_LOCK(sc); CLR_BUSY(sc); wakeup_one(&sc->flags); ADAPTER_UNLOCK(sc); return (0); } void t3_fatal_err(struct adapter *sc) { u_int fw_status[4]; if (sc->flags & FULL_INIT_DONE) { t3_sge_stop(sc); t3_write_reg(sc, A_XGM_TX_CTRL, 0); t3_write_reg(sc, A_XGM_RX_CTRL, 0); t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0); t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0); t3_intr_disable(sc); } device_printf(sc->dev,"encountered fatal error, operation suspended\n"); if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", fw_status[0], fw_status[1], fw_status[2], fw_status[3]); } int t3_os_find_pci_capability(adapter_t *sc, int cap) { device_t dev; struct pci_devinfo *dinfo; pcicfgregs *cfg; uint32_t status; uint8_t ptr; dev = sc->dev; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; status = pci_read_config(dev, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (0); switch (cfg->hdrtype & PCIM_HDRTYPE) { case 0: case 1: ptr = PCIR_CAP_PTR; break; case 2: ptr = PCIR_CAP_PTR_2; break; default: return (0); break; } ptr = pci_read_config(dev, ptr, 1); while (ptr != 0) { if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) return (ptr); ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); } return (0); } int t3_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t3_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } /** * t3_os_link_changed - handle link status changes * @sc: the adapter associated with the link change * @port_id: the port index whose link status has changed * @link_status: the new status of the link * @speed: the new speed setting * @duplex: the new duplex setting * @fc: the new flow-control setting * * This is the OS-dependent handler for link status changes. The OS * neutral handler takes care of most of the processing for these events, * then calls this handler for any OS-specific processing. */ void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, int duplex, int fc, int mac_was_reset) { struct port_info *pi = &adapter->port[port_id]; struct ifnet *ifp = pi->ifp; /* no race with detach, so ifp should always be good */ KASSERT(ifp, ("%s: if detached.", __func__)); /* Reapply mac settings if they were lost due to a reset */ if (mac_was_reset) { PORT_LOCK(pi); cxgb_update_mac_settings(pi); PORT_UNLOCK(pi); } if (link_status) { ifp->if_baudrate = IF_Mbps(speed); if_link_state_change(ifp, LINK_STATE_UP); } else if_link_state_change(ifp, LINK_STATE_DOWN); } /** * t3_os_phymod_changed - handle PHY module changes * @phy: the PHY reporting the module change * @mod_type: new module type * * This is the OS-dependent handler for PHY module changes. It is * invoked when a PHY module is removed or inserted for any OS-specific * processing. */ void t3_os_phymod_changed(struct adapter *adap, int port_id) { static const char *mod_str[] = { NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" }; struct port_info *pi = &adap->port[port_id]; int mod = pi->phy.modtype; if (mod != pi->media.ifm_cur->ifm_data) cxgb_build_medialist(pi); if (mod == phy_modtype_none) if_printf(pi->ifp, "PHY module unplugged\n"); else { KASSERT(mod < ARRAY_SIZE(mod_str), ("invalid PHY module type %d", mod)); if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); } } void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) { /* * The ifnet might not be allocated before this gets called, * as this is called early on in attach by t3_prep_adapter * save the address off in the port structure */ if (cxgb_debug) printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); } /* * Programs the XGMAC based on the settings in the ifnet. These settings * include MTU, MAC address, mcast addresses, etc. */ static void cxgb_update_mac_settings(struct port_info *p) { struct ifnet *ifp = p->ifp; struct t3_rx_mode rm; struct cmac *mac = &p->mac; int mtu, hwtagging; PORT_LOCK_ASSERT_OWNED(p); bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN); mtu = ifp->if_mtu; if (ifp->if_capenable & IFCAP_VLAN_MTU) mtu += ETHER_VLAN_ENCAP_LEN; hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0; t3_mac_set_mtu(mac, mtu); t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); t3_mac_set_address(mac, 0, p->hw_addr); t3_init_rx_mode(&rm, p); t3_mac_set_rx_mode(mac, &rm); } static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, unsigned long n) { int attempts = 5; while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { if (!--attempts) return (ETIMEDOUT); t3_os_sleep(10); } return 0; } static int init_tp_parity(struct adapter *adap) { int i; struct mbuf *m; struct cpl_set_tcb_field *greq; unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; t3_tp_set_offload_mode(adap, 1); for (i = 0; i < 16; i++) { struct cpl_smt_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_smt_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); req->iff = i; t3_mgmt_tx(adap, m); } for (i = 0; i < 2048; i++) { struct cpl_l2t_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_l2t_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); req->params = htonl(V_L2T_W_IDX(i)); t3_mgmt_tx(adap, m); } for (i = 0; i < 2048; i++) { struct cpl_rte_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_rte_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); req->l2t_idx = htonl(V_L2T_W_IDX(i)); t3_mgmt_tx(adap, m); } m = m_gethdr(M_WAITOK, MT_DATA); greq = mtod(m, struct cpl_set_tcb_field *); m->m_len = m->m_pkthdr.len = sizeof(*greq); memset(greq, 0, sizeof(*greq)); greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); greq->mask = htobe64(1); t3_mgmt_tx(adap, m); i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); t3_tp_set_offload_mode(adap, 0); return (i); } /** * setup_rss - configure Receive Side Steering (per-queue connection demux) * @adap: the adapter * * Sets up RSS to distribute packets to multiple receive queues. We * configure the RSS CPU lookup table to distribute to the number of HW * receive queues, and the response queue lookup table to narrow that * down to the response queues actually configured for each port. * We always configure the RSS mapping for two ports since the mapping * table has plenty of entries. */ static void setup_rss(adapter_t *adap) { int i; u_int nq[2]; uint8_t cpus[SGE_QSETS + 1]; uint16_t rspq_map[RSS_TABLE_SIZE]; for (i = 0; i < SGE_QSETS; ++i) cpus[i] = i; cpus[SGE_QSETS] = 0xff; nq[0] = nq[1] = 0; for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); nq[pi->tx_chan] += pi->nqsets; } for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { rspq_map[i] = nq[0] ? i % nq[0] : 0; rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; } /* Calculate the reverse RSS map table */ for (i = 0; i < SGE_QSETS; ++i) adap->rrss_map[i] = 0xff; for (i = 0; i < RSS_TABLE_SIZE; ++i) if (adap->rrss_map[rspq_map[i]] == 0xff) adap->rrss_map[rspq_map[i]] = i; t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); } static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, int hi, int port) { struct mbuf *m; struct mngt_pktsched_wr *req; m = m_gethdr(M_NOWAIT, MT_DATA); if (m) { req = mtod(m, struct mngt_pktsched_wr *); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; req->sched = sched; req->idx = qidx; req->min = lo; req->max = hi; req->binding = port; m->m_len = m->m_pkthdr.len = sizeof(*req); t3_mgmt_tx(adap, m); } } static void bind_qsets(adapter_t *sc) { int i, j; for (i = 0; i < (sc)->params.nports; ++i) { const struct port_info *pi = adap2pinfo(sc, i); for (j = 0; j < pi->nqsets; ++j) { send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, -1, pi->tx_chan); } } } static void update_tpeeprom(struct adapter *adap) { const struct firmware *tpeeprom; uint32_t version; unsigned int major, minor; int ret, len; char rev, name[32]; t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); major = G_TP_VERSION_MAJOR(version); minor = G_TP_VERSION_MINOR(version); if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) return; rev = t3rev2char(adap); snprintf(name, sizeof(name), TPEEPROM_NAME, rev); tpeeprom = firmware_get(name); if (tpeeprom == NULL) { device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n", name); return; } len = tpeeprom->datasize - 4; ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); if (ret) goto release_tpeeprom; if (len != TP_SRAM_LEN) { device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", name, len, TP_SRAM_LEN); return; } ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, TP_SRAM_OFFSET); if (!ret) { device_printf(adap->dev, "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); } else device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n"); release_tpeeprom: firmware_put(tpeeprom, FIRMWARE_UNLOAD); return; } static int update_tpsram(struct adapter *adap) { const struct firmware *tpsram; int ret; char rev, name[32]; rev = t3rev2char(adap); snprintf(name, sizeof(name), TPSRAM_NAME, rev); update_tpeeprom(adap); tpsram = firmware_get(name); if (tpsram == NULL){ device_printf(adap->dev, "could not load TP SRAM\n"); return (EINVAL); } else device_printf(adap->dev, "updating TP SRAM\n"); ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); if (ret) goto release_tpsram; ret = t3_set_proto_sram(adap, tpsram->data); if (ret) device_printf(adap->dev, "loading protocol SRAM failed\n"); release_tpsram: firmware_put(tpsram, FIRMWARE_UNLOAD); return ret; } /** * cxgb_up - enable the adapter * @adap: adapter being enabled * * Called when the first port is enabled, this function performs the * actions necessary to make an adapter operational, such as completing * the initialization of HW modules, and enabling interrupts. */ static int cxgb_up(struct adapter *sc) { int err = 0; unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", __func__, sc->open_device_map)); if ((sc->flags & FULL_INIT_DONE) == 0) { ADAPTER_LOCK_ASSERT_NOTOWNED(sc); if ((sc->flags & FW_UPTODATE) == 0) if ((err = upgrade_fw(sc))) goto out; if ((sc->flags & TPS_UPTODATE) == 0) if ((err = update_tpsram(sc))) goto out; if (is_offload(sc) && nfilters != 0) { sc->params.mc5.nservers = 0; if (nfilters < 0) sc->params.mc5.nfilters = mxf; else sc->params.mc5.nfilters = min(nfilters, mxf); } err = t3_init_hw(sc, 0); if (err) goto out; t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); err = setup_sge_qsets(sc); if (err) goto out; alloc_filters(sc); setup_rss(sc); t3_add_configured_sysctls(sc); sc->flags |= FULL_INIT_DONE; } t3_intr_clear(sc); t3_sge_start(sc); t3_intr_enable(sc); if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && is_offload(sc) && init_tp_parity(sc) == 0) sc->flags |= TP_PARITY_INIT; if (sc->flags & TP_PARITY_INIT) { t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR); t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); } if (!(sc->flags & QUEUES_BOUND)) { bind_qsets(sc); setup_hw_filters(sc); sc->flags |= QUEUES_BOUND; } t3_sge_reset_adapter(sc); out: return (err); } /* * Called when the last open device is closed. Does NOT undo all of cxgb_up's * work. Specifically, the resources grabbed under FULL_INIT_DONE are released * during controller_detach, not here. */ static void cxgb_down(struct adapter *sc) { t3_sge_stop(sc); t3_intr_disable(sc); } /* * if_init for cxgb ports. */ static void cxgb_init(void *arg) { struct port_info *p = arg; struct adapter *sc = p->adapter; ADAPTER_LOCK(sc); cxgb_init_locked(p); /* releases adapter lock */ ADAPTER_LOCK_ASSERT_NOTOWNED(sc); } static int cxgb_init_locked(struct port_info *p) { struct adapter *sc = p->adapter; struct ifnet *ifp = p->ifp; struct cmac *mac = &p->mac; int i, rc = 0, may_sleep = 0, gave_up_lock = 0; ADAPTER_LOCK_ASSERT_OWNED(sc); while (!IS_DOOMED(p) && IS_BUSY(sc)) { gave_up_lock = 1; if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { rc = EINTR; goto done; } } if (IS_DOOMED(p)) { rc = ENXIO; goto done; } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); /* * The code that runs during one-time adapter initialization can sleep * so it's important not to hold any locks across it. */ may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; if (may_sleep) { SET_BUSY(sc); gave_up_lock = 1; ADAPTER_UNLOCK(sc); } if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) goto done; PORT_LOCK(p); if (isset(&sc->open_device_map, p->port_id) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { PORT_UNLOCK(p); goto done; } t3_port_intr_enable(sc, p->port_id); if (!mac->multiport) t3_mac_init(mac); cxgb_update_mac_settings(p); t3_link_start(&p->phy, mac, &p->link_config); t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; PORT_UNLOCK(p); for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { struct sge_qset *qs = &sc->sge.qs[i]; struct sge_txq *txq = &qs->txq[TXQ_ETH]; callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, txq->txq_watchdog.c_cpu); } /* all ok */ setbit(&sc->open_device_map, p->port_id); callout_reset(&p->link_check_ch, p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, link_check_callout, p); done: if (may_sleep) { ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); } if (gave_up_lock) wakeup_one(&sc->flags); ADAPTER_UNLOCK(sc); return (rc); } static int cxgb_uninit_locked(struct port_info *p) { struct adapter *sc = p->adapter; int rc; ADAPTER_LOCK_ASSERT_OWNED(sc); while (!IS_DOOMED(p) && IS_BUSY(sc)) { if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { rc = EINTR; goto done; } } if (IS_DOOMED(p)) { rc = ENXIO; goto done; } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); ADAPTER_UNLOCK(sc); rc = cxgb_uninit_synchronized(p); ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup_one(&sc->flags); done: ADAPTER_UNLOCK(sc); return (rc); } /* * Called on "ifconfig down", and from port_detach */ static int cxgb_uninit_synchronized(struct port_info *pi) { struct adapter *sc = pi->adapter; struct ifnet *ifp = pi->ifp; /* * taskqueue_drain may cause a deadlock if the adapter lock is held. */ ADAPTER_LOCK_ASSERT_NOTOWNED(sc); /* * Clear this port's bit from the open device map, and then drain all * the tasks that can access/manipulate this port's port_info or ifp. * We disable this port's interrupts here and so the slow/ext * interrupt tasks won't be enqueued. The tick task will continue to * be enqueued every second but the runs after this drain will not see * this port in the open device map. * * A well behaved task must take open_device_map into account and ignore * ports that are not open. */ clrbit(&sc->open_device_map, pi->port_id); t3_port_intr_disable(sc, pi->port_id); taskqueue_drain(sc->tq, &sc->slow_intr_task); taskqueue_drain(sc->tq, &sc->tick_task); callout_drain(&pi->link_check_ch); taskqueue_drain(sc->tq, &pi->link_check_task); PORT_LOCK(pi); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable pause frames */ t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); /* Reset RX FIFO HWM */ t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0); DELAY(100 * 1000); /* Wait for TXFIFO empty */ t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, F_TXFIFO_EMPTY, 1, 20, 5); DELAY(100 * 1000); t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); pi->phy.ops->power_down(&pi->phy, 1); PORT_UNLOCK(pi); pi->link_config.link_ok = 0; t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); if (sc->open_device_map == 0) cxgb_down(pi->adapter); return (0); } /* * Mark lro enabled or disabled in all qsets for this port */ static int cxgb_set_lro(struct port_info *p, int enabled) { int i; struct adapter *adp = p->adapter; struct sge_qset *q; for (i = 0; i < p->nqsets; i++) { q = &adp->sge.qs[p->first_qset + i]; q->lro.enabled = (enabled != 0); } return (0); } static int cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) { struct port_info *p = ifp->if_softc; struct adapter *sc = p->adapter; struct ifreq *ifr = (struct ifreq *)data; int flags, error = 0, mtu; uint32_t mask; switch (command) { case SIOCSIFMTU: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) { fail: ADAPTER_UNLOCK(sc); return (error); } mtu = ifr->ifr_mtu; if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { error = EINVAL; } else { ifp->if_mtu = mtu; PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); break; case SIOCSIFFLAGS: ADAPTER_LOCK(sc); if (IS_DOOMED(p)) { error = ENXIO; goto fail; } if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = p->if_flags; if (((ifp->if_flags ^ flags) & IFF_PROMISC) || ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) { if (IS_BUSY(sc)) { error = EBUSY; goto fail; } PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); } else error = cxgb_init_locked(p); p->if_flags = ifp->if_flags; } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = cxgb_uninit_locked(p); else ADAPTER_UNLOCK(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) goto fail; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); break; case SIOCSIFCAP: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) goto fail; mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO4; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO6; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); error = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO4; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); error = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO6; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; /* Safe to do this even if cxgb_up not called yet */ cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO); } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE4) { int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4; error = toe_capability(p, enable); if (error == 0) ifp->if_capenable ^= mask; } #endif if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } } if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } } if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif ADAPTER_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &p->media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static int cxgb_media_change(struct ifnet *ifp) { return (EOPNOTSUPP); } /* * Translates phy->modtype to the correct Ethernet media subtype. */ static int cxgb_ifm_type(int mod) { switch (mod) { case phy_modtype_sr: return (IFM_10G_SR); case phy_modtype_lr: return (IFM_10G_LR); case phy_modtype_lrm: return (IFM_10G_LRM); case phy_modtype_twinax: return (IFM_10G_TWINAX); case phy_modtype_twinax_long: return (IFM_10G_TWINAX_LONG); case phy_modtype_none: return (IFM_NONE); case phy_modtype_unknown: return (IFM_UNKNOWN); } KASSERT(0, ("%s: modtype %d unknown", __func__, mod)); return (IFM_UNKNOWN); } /* * Rebuilds the ifmedia list for this port, and sets the current media. */ static void cxgb_build_medialist(struct port_info *p) { struct cphy *phy = &p->phy; struct ifmedia *media = &p->media; int mod = phy->modtype; int m = IFM_ETHER | IFM_FDX; PORT_LOCK(p); ifmedia_removeall(media); if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { /* Copper (RJ45) */ if (phy->caps & SUPPORTED_10000baseT_Full) ifmedia_add(media, m | IFM_10G_T, mod, NULL); if (phy->caps & SUPPORTED_1000baseT_Full) ifmedia_add(media, m | IFM_1000_T, mod, NULL); if (phy->caps & SUPPORTED_100baseT_Full) ifmedia_add(media, m | IFM_100_TX, mod, NULL); if (phy->caps & SUPPORTED_10baseT_Full) ifmedia_add(media, m | IFM_10_T, mod, NULL); ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL); ifmedia_set(media, IFM_ETHER | IFM_AUTO); } else if (phy->caps & SUPPORTED_TP) { /* Copper (CX4) */ KASSERT(phy->caps & SUPPORTED_10000baseT_Full, ("%s: unexpected cap 0x%x", __func__, phy->caps)); ifmedia_add(media, m | IFM_10G_CX4, mod, NULL); ifmedia_set(media, m | IFM_10G_CX4); } else if (phy->caps & SUPPORTED_FIBRE && phy->caps & SUPPORTED_10000baseT_Full) { /* 10G optical (but includes SFP+ twinax) */ m |= cxgb_ifm_type(mod); if (IFM_SUBTYPE(m) == IFM_NONE) m &= ~IFM_FDX; ifmedia_add(media, m, mod, NULL); ifmedia_set(media, m); } else if (phy->caps & SUPPORTED_FIBRE && phy->caps & SUPPORTED_1000baseT_Full) { /* 1G optical */ /* XXX: Lie and claim to be SX, could actually be any 1G-X */ ifmedia_add(media, m | IFM_1000_SX, mod, NULL); ifmedia_set(media, m | IFM_1000_SX); } else { KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, phy->caps)); } PORT_UNLOCK(p); } static void cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct port_info *p = ifp->if_softc; struct ifmedia_entry *cur = p->media.ifm_cur; int speed = p->link_config.speed; if (cur->ifm_data != p->phy.modtype) { cxgb_build_medialist(p); cur = p->media.ifm_cur; } ifmr->ifm_status = IFM_AVALID; if (!p->link_config.link_ok) return; ifmr->ifm_status |= IFM_ACTIVE; /* * active and current will differ iff current media is autoselect. That * can happen only for copper RJ45. */ if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) return; KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); ifmr->ifm_active = IFM_ETHER | IFM_FDX; if (speed == SPEED_10000) ifmr->ifm_active |= IFM_10G_T; else if (speed == SPEED_1000) ifmr->ifm_active |= IFM_1000_T; else if (speed == SPEED_100) ifmr->ifm_active |= IFM_100_TX; else if (speed == SPEED_10) ifmr->ifm_active |= IFM_10_T; else KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, speed)); } static void cxgb_async_intr(void *data) { adapter_t *sc = data; t3_write_reg(sc, A_PL_INT_ENABLE0, 0); (void) t3_read_reg(sc, A_PL_INT_ENABLE0); taskqueue_enqueue(sc->tq, &sc->slow_intr_task); } static void link_check_callout(void *arg) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; if (!isset(&sc->open_device_map, pi->port_id)) return; taskqueue_enqueue(sc->tq, &pi->link_check_task); } static void check_link_status(void *arg, int pending) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; if (!isset(&sc->open_device_map, pi->port_id)) return; t3_link_changed(sc, pi->port_id); - if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ)) + if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || + pi->link_config.link_ok == 0) callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); } void t3_os_link_intr(struct port_info *pi) { /* * Schedule a link check in the near future. If the link is flapping * rapidly we'll keep resetting the callout and delaying the check until * things stabilize a bit. */ callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); } static void check_t3b2_mac(struct adapter *sc) { int i; if (sc->flags & CXGB_SHUTDOWN) return; for_each_port(sc, i) { struct port_info *p = &sc->port[i]; int status; #ifdef INVARIANTS struct ifnet *ifp = p->ifp; #endif if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || !p->link_config.link_ok) continue; KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("%s: state mismatch (drv_flags %x, device_map %x)", __func__, ifp->if_drv_flags, sc->open_device_map)); PORT_LOCK(p); status = t3b2_mac_watchdog_task(&p->mac); if (status == 1) p->mac.stats.num_toggled++; else if (status == 2) { struct cmac *mac = &p->mac; cxgb_update_mac_settings(p); t3_link_start(&p->phy, mac, &p->link_config); t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); t3_port_intr_enable(sc, p->port_id); p->mac.stats.num_resets++; } PORT_UNLOCK(p); } } static void cxgb_tick(void *arg) { adapter_t *sc = (adapter_t *)arg; if (sc->flags & CXGB_SHUTDOWN) return; taskqueue_enqueue(sc->tq, &sc->tick_task); callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); } static void cxgb_tick_handler(void *arg, int count) { adapter_t *sc = (adapter_t *)arg; const struct adapter_params *p = &sc->params; int i; uint32_t cause, reset; if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) return; if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) check_t3b2_mac(sc); cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY); if (cause) { struct sge_qset *qs = &sc->sge.qs[0]; uint32_t mask, v; v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00; mask = 1; for (i = 0; i < SGE_QSETS; i++) { if (v & mask) qs[i].rspq.starved++; mask <<= 1; } mask <<= SGE_QSETS; /* skip RSPQXDISABLED */ for (i = 0; i < SGE_QSETS * 2; i++) { if (v & mask) { qs[i / 2].fl[i % 2].empty++; } mask <<= 1; } /* clear */ t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v); t3_write_reg(sc, A_SG_INT_CAUSE, cause); } for (i = 0; i < sc->params.nports; i++) { struct port_info *pi = &sc->port[i]; struct ifnet *ifp = pi->ifp; struct cmac *mac = &pi->mac; struct mac_stats *mstats = &mac->stats; int drops, j; if (!isset(&sc->open_device_map, pi->port_id)) continue; PORT_LOCK(pi); t3_mac_update_stats(mac); PORT_UNLOCK(pi); ifp->if_opackets = mstats->tx_frames; ifp->if_ipackets = mstats->rx_frames; ifp->if_obytes = mstats->tx_octets; ifp->if_ibytes = mstats->rx_octets; ifp->if_omcasts = mstats->tx_mcast_frames; ifp->if_imcasts = mstats->rx_mcast_frames; ifp->if_collisions = mstats->tx_total_collisions; ifp->if_iqdrops = mstats->rx_cong_drops; drops = 0; for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; j++) drops += sc->sge.qs[j].txq[TXQ_ETH].txq_mr->br_drops; ifp->if_snd.ifq_drops = drops; ifp->if_oerrors = mstats->tx_excess_collisions + mstats->tx_underrun + mstats->tx_len_errs + mstats->tx_mac_internal_errs + mstats->tx_excess_deferral + mstats->tx_fcs_errs; ifp->if_ierrors = mstats->rx_jabber + mstats->rx_data_errs + mstats->rx_sequence_errs + mstats->rx_runt + mstats->rx_too_long + mstats->rx_mac_internal_errs + mstats->rx_short + mstats->rx_fcs_errs; if (mac->multiport) continue; /* Count rx fifo overflows, once per second */ cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); reset = 0; if (cause & F_RXFIFO_OVERFLOW) { mac->stats.rx_fifo_ovfl++; reset |= F_RXFIFO_OVERFLOW; } t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); } } static void touch_bars(device_t dev) { /* * Don't enable yet */ #if !defined(__LP64__) && 0 u32 v; pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v); pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v); pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v); #endif } static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) { uint8_t *buf; int err = 0; u32 aligned_offset, aligned_len, *p; struct adapter *adapter = pi->adapter; aligned_offset = offset & ~3; aligned_len = (len + (offset & 3) + 3) & ~3; if (aligned_offset != offset || aligned_len != len) { buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO); if (!buf) return (ENOMEM); err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); if (!err && aligned_len > 4) err = t3_seeprom_read(adapter, aligned_offset + aligned_len - 4, (u32 *)&buf[aligned_len - 4]); if (err) goto out; memcpy(buf + (offset & 3), data, len); } else buf = (uint8_t *)(uintptr_t)data; err = t3_seeprom_wp(adapter, 0); if (err) goto out; for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { err = t3_seeprom_write(adapter, aligned_offset, *p); aligned_offset += 4; } if (!err) err = t3_seeprom_wp(adapter, 1); out: if (buf != data) free(buf, M_DEVBUF); return err; } static int in_range(int val, int lo, int hi) { return val < 0 || (val <= hi && val >= lo); } static int cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td) { return (0); } static int cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int mmd, error = 0; struct port_info *pi = dev->si_drv1; adapter_t *sc = pi->adapter; #ifdef PRIV_SUPPORTED if (priv_check(td, PRIV_DRIVER)) { if (cxgb_debug) printf("user does not have access to privileged ioctls\n"); return (EPERM); } #else if (suser(td)) { if (cxgb_debug) printf("user does not have access to privileged ioctls\n"); return (EPERM); } #endif switch (cmd) { case CHELSIO_GET_MIIREG: { uint32_t val; struct cphy *phy = &pi->phy; struct ch_mii_data *mid = (struct ch_mii_data *)data; if (!phy->mdio_read) return (EOPNOTSUPP); if (is_10G(sc)) { mmd = mid->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; else if (mmd > MDIO_DEV_VEND2) return (EINVAL); error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, mid->reg_num, &val); } else error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, mid->reg_num & 0x1f, &val); if (error == 0) mid->val_out = val; break; } case CHELSIO_SET_MIIREG: { struct cphy *phy = &pi->phy; struct ch_mii_data *mid = (struct ch_mii_data *)data; if (!phy->mdio_write) return (EOPNOTSUPP); if (is_10G(sc)) { mmd = mid->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; else if (mmd > MDIO_DEV_VEND2) return (EINVAL); error = phy->mdio_write(sc, mid->phy_id & 0x1f, mmd, mid->reg_num, mid->val_in); } else error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, mid->reg_num & 0x1f, mid->val_in); break; } case CHELSIO_SETREG: { struct ch_reg *edata = (struct ch_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); t3_write_reg(sc, edata->addr, edata->val); break; } case CHELSIO_GETREG: { struct ch_reg *edata = (struct ch_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); edata->val = t3_read_reg(sc, edata->addr); break; } case CHELSIO_GET_SGE_CONTEXT: { struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; mtx_lock_spin(&sc->sge.reg_lock); switch (ecntxt->cntxt_type) { case CNTXT_TYPE_EGRESS: error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_FL: error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_RSP: error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_CQ: error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, ecntxt->data); break; default: error = EINVAL; break; } mtx_unlock_spin(&sc->sge.reg_lock); break; } case CHELSIO_GET_SGE_DESC: { struct ch_desc *edesc = (struct ch_desc *)data; int ret; if (edesc->queue_num >= SGE_QSETS * 6) return (EINVAL); ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], edesc->queue_num % 6, edesc->idx, edesc->data); if (ret < 0) return (EINVAL); edesc->size = ret; break; } case CHELSIO_GET_QSET_PARAMS: { struct qset_params *q; struct ch_qset_params *t = (struct ch_qset_params *)data; int q1 = pi->first_qset; int nqsets = pi->nqsets; int i; if (t->qset_idx >= nqsets) return EINVAL; i = q1 + t->qset_idx; q = &sc->params.sge.qset[i]; t->rspq_size = q->rspq_size; t->txq_size[0] = q->txq_size[0]; t->txq_size[1] = q->txq_size[1]; t->txq_size[2] = q->txq_size[2]; t->fl_size[0] = q->fl_size; t->fl_size[1] = q->jumbo_size; t->polling = q->polling; t->lro = q->lro; t->intr_lat = q->coalesce_usecs; t->cong_thres = q->cong_thres; t->qnum = i; if ((sc->flags & FULL_INIT_DONE) == 0) t->vector = 0; else if (sc->flags & USING_MSIX) t->vector = rman_get_start(sc->msix_irq_res[i]); else t->vector = rman_get_start(sc->irq_res); break; } case CHELSIO_GET_QSET_NUM: { struct ch_reg *edata = (struct ch_reg *)data; edata->val = pi->nqsets; break; } case CHELSIO_LOAD_FW: { uint8_t *fw_data; uint32_t vers; struct ch_mem_range *t = (struct ch_mem_range *)data; /* * You're allowed to load a firmware only before FULL_INIT_DONE * * FW_UPTODATE is also set so the rest of the initialization * will not overwrite what was loaded here. This gives you the * flexibility to load any firmware (and maybe shoot yourself in * the foot). */ ADAPTER_LOCK(sc); if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { ADAPTER_UNLOCK(sc); return (EBUSY); } fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); if (!fw_data) error = ENOMEM; else error = copyin(t->buf, fw_data, t->len); if (!error) error = -t3_load_fw(sc, fw_data, t->len); if (t3_get_fw_version(sc, &vers) == 0) { snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); } if (!error) sc->flags |= FW_UPTODATE; free(fw_data, M_DEVBUF); ADAPTER_UNLOCK(sc); break; } case CHELSIO_LOAD_BOOT: { uint8_t *boot_data; struct ch_mem_range *t = (struct ch_mem_range *)data; boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); if (!boot_data) return ENOMEM; error = copyin(t->buf, boot_data, t->len); if (!error) error = -t3_load_boot(sc, boot_data, t->len); free(boot_data, M_DEVBUF); break; } case CHELSIO_GET_PM: { struct ch_pm *m = (struct ch_pm *)data; struct tp_params *p = &sc->params.tp; if (!is_offload(sc)) return (EOPNOTSUPP); m->tx_pg_sz = p->tx_pg_size; m->tx_num_pg = p->tx_num_pgs; m->rx_pg_sz = p->rx_pg_size; m->rx_num_pg = p->rx_num_pgs; m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; break; } case CHELSIO_SET_PM: { struct ch_pm *m = (struct ch_pm *)data; struct tp_params *p = &sc->params.tp; if (!is_offload(sc)) return (EOPNOTSUPP); if (sc->flags & FULL_INIT_DONE) return (EBUSY); if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) return (EINVAL); /* not power of 2 */ if (!(m->rx_pg_sz & 0x14000)) return (EINVAL); /* not 16KB or 64KB */ if (!(m->tx_pg_sz & 0x1554000)) return (EINVAL); if (m->tx_num_pg == -1) m->tx_num_pg = p->tx_num_pgs; if (m->rx_num_pg == -1) m->rx_num_pg = p->rx_num_pgs; if (m->tx_num_pg % 24 || m->rx_num_pg % 24) return (EINVAL); if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) return (EINVAL); p->rx_pg_size = m->rx_pg_sz; p->tx_pg_size = m->tx_pg_sz; p->rx_num_pgs = m->rx_num_pg; p->tx_num_pgs = m->tx_num_pg; break; } case CHELSIO_SETMTUTAB: { struct ch_mtus *m = (struct ch_mtus *)data; int i; if (!is_offload(sc)) return (EOPNOTSUPP); if (offload_running(sc)) return (EBUSY); if (m->nmtus != NMTUS) return (EINVAL); if (m->mtus[0] < 81) /* accommodate SACK */ return (EINVAL); /* * MTUs must be in ascending order */ for (i = 1; i < NMTUS; ++i) if (m->mtus[i] < m->mtus[i - 1]) return (EINVAL); memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); break; } case CHELSIO_GETMTUTAB: { struct ch_mtus *m = (struct ch_mtus *)data; if (!is_offload(sc)) return (EOPNOTSUPP); memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); m->nmtus = NMTUS; break; } case CHELSIO_GET_MEM: { struct ch_mem_range *t = (struct ch_mem_range *)data; struct mc7 *mem; uint8_t *useraddr; u64 buf[32]; /* * Use these to avoid modifying len/addr in the return * struct */ uint32_t len = t->len, addr = t->addr; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EIO); /* need the memory controllers */ if ((addr & 0x7) || (len & 0x7)) return (EINVAL); if (t->mem_id == MEM_CM) mem = &sc->cm; else if (t->mem_id == MEM_PMRX) mem = &sc->pmrx; else if (t->mem_id == MEM_PMTX) mem = &sc->pmtx; else return (EINVAL); /* * Version scheme: * bits 0..9: chip version * bits 10..15: chip revision */ t->version = 3 | (sc->params.rev << 10); /* * Read 256 bytes at a time as len can be large and we don't * want to use huge intermediate buffers. */ useraddr = (uint8_t *)t->buf; while (len) { unsigned int chunk = min(len, sizeof(buf)); error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf); if (error) return (-error); if (copyout(buf, useraddr, chunk)) return (EFAULT); useraddr += chunk; addr += chunk; len -= chunk; } break; } case CHELSIO_READ_TCAM_WORD: { struct ch_tcam_word *t = (struct ch_tcam_word *)data; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EIO); /* need MC5 */ return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); break; } case CHELSIO_SET_TRACE_FILTER: { struct ch_trace *t = (struct ch_trace *)data; const struct trace_params *tp; tp = (const struct trace_params *)&t->sip; if (t->config_tx) t3_config_trace_filter(sc, tp, 0, t->invert_match, t->trace_tx); if (t->config_rx) t3_config_trace_filter(sc, tp, 1, t->invert_match, t->trace_rx); break; } case CHELSIO_SET_PKTSCHED: { struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; if (sc->open_device_map == 0) return (EAGAIN); send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, p->binding); break; } case CHELSIO_IFCONF_GETREGS: { struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data; int reglen = cxgb_get_regs_len(); uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } if (regs->len > reglen) regs->len = reglen; else if (regs->len < reglen) error = ENOBUFS; if (!error) { cxgb_get_regs(sc, regs, buf); error = copyout(buf, regs->data, reglen); } free(buf, M_DEVBUF); break; } case CHELSIO_SET_HW_SCHED: { struct ch_hw_sched *t = (struct ch_hw_sched *)data; unsigned int ticks_per_usec = core_ticks_per_usec(sc); if ((sc->flags & FULL_INIT_DONE) == 0) return (EAGAIN); /* need TP to be initialized */ if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || !in_range(t->channel, 0, 1) || !in_range(t->kbps, 0, 10000000) || !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || !in_range(t->flow_ipg, 0, dack_ticks_to_usec(sc, 0x7ff))) return (EINVAL); if (t->kbps >= 0) { error = t3_config_sched(sc, t->kbps, t->sched); if (error < 0) return (-error); } if (t->class_ipg >= 0) t3_set_sched_ipg(sc, t->sched, t->class_ipg); if (t->flow_ipg >= 0) { t->flow_ipg *= 1000; /* us -> ns */ t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); } if (t->mode >= 0) { int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, bit, t->mode ? bit : 0); } if (t->channel >= 0) t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 1 << t->sched, t->channel << t->sched); break; } case CHELSIO_GET_EEPROM: { int i; struct ch_eeprom *e = (struct ch_eeprom *)data; uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } e->magic = EEPROM_MAGIC; for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); if (!error) error = copyout(buf + e->offset, e->data, e->len); free(buf, M_DEVBUF); break; } case CHELSIO_CLEAR_STATS: { if (!(sc->flags & FULL_INIT_DONE)) return EAGAIN; PORT_LOCK(pi); t3_mac_update_stats(&pi->mac); memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); PORT_UNLOCK(pi); break; } case CHELSIO_GET_UP_LA: { struct ch_up_la *la = (struct ch_up_la *)data; uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } if (la->bufsize < LA_BUFSIZE) error = ENOBUFS; if (!error) error = -t3_get_up_la(sc, &la->stopped, &la->idx, &la->bufsize, buf); if (!error) error = copyout(buf, la->data, la->bufsize); free(buf, M_DEVBUF); break; } case CHELSIO_GET_UP_IOQS: { struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data; uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT); uint32_t *v; if (buf == NULL) { return (ENOMEM); } if (ioqs->bufsize < IOQS_BUFSIZE) error = ENOBUFS; if (!error) error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); if (!error) { v = (uint32_t *)buf; ioqs->ioq_rx_enable = *v++; ioqs->ioq_tx_enable = *v++; ioqs->ioq_rx_status = *v++; ioqs->ioq_tx_status = *v++; error = copyout(v, ioqs->data, ioqs->bufsize); } free(buf, M_DEVBUF); break; } case CHELSIO_SET_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); /* No TCAM */ if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); /* mc5 not setup yet */ if (nfilters == 0) return (EBUSY); /* TOE will use TCAM */ /* sanity checks */ if (f->filter_id >= nfilters || (f->val.dip && f->mask.dip != 0xffffffff) || (f->val.sport && f->mask.sport != 0xffff) || (f->val.dport && f->mask.dport != 0xffff) || (f->val.vlan && f->mask.vlan != 0xfff) || (f->val.vlan_prio && f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || f->qset >= SGE_QSETS || sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) return (EINVAL); /* Was allocated with M_WAITOK */ KASSERT(sc->filters, ("filter table NULL\n")); p = &sc->filters[f->filter_id]; if (p->locked) return (EPERM); bzero(p, sizeof(*p)); p->sip = f->val.sip; p->sip_mask = f->mask.sip; p->dip = f->val.dip; p->sport = f->val.sport; p->dport = f->val.dport; p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : FILTER_NO_VLAN_PRI; p->mac_hit = f->mac_hit; p->mac_vld = f->mac_addr_idx != 0xffff; p->mac_idx = f->mac_addr_idx; p->pkt_type = f->proto; p->report_filter_id = f->want_filter_id; p->pass = f->pass; p->rss = f->rss; p->qset = f->qset; error = set_filter(sc, f->filter_id, p); if (error == 0) p->valid = 1; break; } case CHELSIO_DEL_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); if (nfilters == 0 || sc->filters == NULL) return (EINVAL); if (f->filter_id >= nfilters) return (EINVAL); p = &sc->filters[f->filter_id]; if (p->locked) return (EPERM); if (!p->valid) return (EFAULT); /* Read "Bad address" as "Bad index" */ bzero(p, sizeof(*p)); p->sip = p->sip_mask = 0xffffffff; p->vlan = 0xfff; p->vlan_prio = FILTER_NO_VLAN_PRI; p->pkt_type = 1; error = set_filter(sc, f->filter_id, p); break; } case CHELSIO_GET_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int i, nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); if (nfilters == 0 || sc->filters == NULL) return (EINVAL); i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; for (; i < nfilters; i++) { p = &sc->filters[i]; if (!p->valid) continue; bzero(f, sizeof(*f)); f->filter_id = i; f->val.sip = p->sip; f->mask.sip = p->sip_mask; f->val.dip = p->dip; f->mask.dip = p->dip ? 0xffffffff : 0; f->val.sport = p->sport; f->mask.sport = p->sport ? 0xffff : 0; f->val.dport = p->dport; f->mask.dport = p->dport ? 0xffff : 0; f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 0 : p->vlan_prio; f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 0 : FILTER_NO_VLAN_PRI; f->mac_hit = p->mac_hit; f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; f->proto = p->pkt_type; f->want_filter_id = p->report_filter_id; f->pass = p->pass; f->rss = p->rss; f->qset = p->qset; break; } if (i == nfilters) f->filter_id = 0xffffffff; break; } default: return (EOPNOTSUPP); break; } return (error); } static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, unsigned int end) { uint32_t *p = (uint32_t *)(buf + start); for ( ; start <= end; start += sizeof(uint32_t)) *p++ = t3_read_reg(ap, start); } #define T3_REGMAP_SIZE (3 * 1024) static int cxgb_get_regs_len(void) { return T3_REGMAP_SIZE; } static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf) { /* * Version scheme: * bits 0..9: chip version * bits 10..15: chip revision * bit 31: set for PCIe cards */ regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); /* * We skip the MAC statistics registers because they are clear-on-read. * Also reading multi-register stats would need to synchronize with the * periodic mac stats accumulation. Hard to justify the complexity. */ memset(buf, 0, cxgb_get_regs_len()); reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, XGM_REG(A_XGM_SERDES_STAT3, 1)); reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); } static int alloc_filters(struct adapter *sc) { struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (nfilters == 0) return (0); p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO); sc->filters = p; p = &sc->filters[nfilters - 1]; p->vlan = 0xfff; p->vlan_prio = FILTER_NO_VLAN_PRI; p->pass = p->rss = p->valid = p->locked = 1; return (0); } static int setup_hw_filters(struct adapter *sc) { int i, rc; unsigned int nfilters = sc->params.mc5.nfilters; if (!sc->filters) return (0); t3_enable_filters(sc); for (i = rc = 0; i < nfilters && !rc; i++) { if (sc->filters[i].locked) rc = set_filter(sc, i, &sc->filters[i]); } return (rc); } static int set_filter(struct adapter *sc, int id, const struct filter_info *f) { int len; struct mbuf *m; struct ulp_txpkt *txpkt; struct work_request_hdr *wr; struct cpl_pass_open_req *oreq; struct cpl_set_tcb_field *sreq; len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); KASSERT(len <= MHLEN, ("filter request too big for an mbuf")); id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - sc->params.mc5.nfilters; m = m_gethdr(M_WAITOK, MT_DATA); m->m_len = m->m_pkthdr.len = len; bzero(mtod(m, char *), len); wr = mtod(m, struct work_request_hdr *); wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); oreq = (struct cpl_pass_open_req *)(wr + 1); txpkt = (struct ulp_txpkt *)oreq; txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); oreq->local_port = htons(f->dport); oreq->peer_port = htons(f->sport); oreq->local_ip = htonl(f->dip); oreq->peer_ip = htonl(f->sip); oreq->peer_netmask = htonl(f->sip_mask); oreq->opt0h = 0; oreq->opt0l = htonl(F_NO_OFFLOAD); oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | V_VLAN_PRI(f->vlan_prio >> 1) | V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); sreq = (struct cpl_set_tcb_field *)(oreq + 1); set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, (f->report_filter_id << 15) | (1 << 23) | ((u64)f->pass << 35) | ((u64)!f->rss << 36)); set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1); t3_mgmt_tx(sc, m); if (f->pass && !f->rss) { len = sizeof(*sreq); m = m_gethdr(M_WAITOK, MT_DATA); m->m_len = m->m_pkthdr.len = len; bzero(mtod(m, char *), len); sreq = mtod(m, struct cpl_set_tcb_field *); sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); mk_set_tcb_field(sreq, id, 25, 0x3f80000, (u64)sc->rrss_map[f->qset] << 19); t3_mgmt_tx(sc, m); } return 0; } static inline void mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid, unsigned int word, u64 mask, u64 val) { OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(word); req->mask = htobe64(mask); req->val = htobe64(val); } static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, unsigned int word, u64 mask, u64 val) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); mk_set_tcb_field(req, tid, word, mask, val); } void t3_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; mtx_lock(&t3_list_lock); SLIST_FOREACH(sc, &t3_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } mtx_unlock(&t3_list_lock); } #ifdef TCP_OFFLOAD static int toe_capability(struct port_info *pi, int enable) { int rc; struct adapter *sc = pi->adapter; ADAPTER_LOCK_ASSERT_OWNED(sc); if (!is_offload(sc)) return (ENODEV); if (enable) { if (!(sc->flags & FULL_INIT_DONE)) { log(LOG_WARNING, "You must enable a cxgb interface first\n"); return (EAGAIN); } if (isset(&sc->offload_map, pi->port_id)) return (0); if (!(sc->flags & TOM_INIT_DONE)) { rc = t3_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t3_tom.ko before trying " "to enable TOE on a cxgb interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(sc->flags & TOM_INIT_DONE, ("%s: TOM activated but flag not set", __func__)); } setbit(&sc->offload_map, pi->port_id); /* * XXX: Temporary code to allow iWARP to be enabled when TOE is * enabled on any port. Need to figure out how to enable, * disable, load, and unload iWARP cleanly. */ if (!isset(&sc->offload_map, MAX_NPORTS) && t3_activate_uld(sc, ULD_IWARP) == 0) setbit(&sc->offload_map, MAX_NPORTS); } else { if (!isset(&sc->offload_map, pi->port_id)) return (0); KASSERT(sc->flags & TOM_INIT_DONE, ("%s: TOM never initialized?", __func__)); clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t3_register_uld(struct uld_info *ui) { int rc = 0; struct uld_info *u; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(u, &t3_uld_list, link) { if (u->uld_id == ui->uld_id) { rc = EEXIST; goto done; } } SLIST_INSERT_HEAD(&t3_uld_list, ui, link); ui->refcount = 0; done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_unregister_uld(struct uld_info *ui) { int rc = EINVAL; struct uld_info *u; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(u, &t3_uld_list, link) { if (u == ui) { if (ui->refcount > 0) { rc = EBUSY; goto done; } SLIST_REMOVE(&t3_uld_list, ui, uld_info, link); rc = 0; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_activate_uld(struct adapter *sc, int id) { int rc = EAGAIN; struct uld_info *ui; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(ui, &t3_uld_list, link) { if (ui->uld_id == id) { rc = ui->activate(sc); if (rc == 0) ui->refcount++; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_deactivate_uld(struct adapter *sc, int id) { int rc = EINVAL; struct uld_info *ui; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(ui, &t3_uld_list, link) { if (ui->uld_id == id) { rc = ui->deactivate(sc); if (rc == 0) ui->refcount--; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } static int cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused, struct mbuf *m) { m_freem(m); return (EDOOFUS); } int t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) { uintptr_t *loc, new; if (opcode >= NUM_CPL_HANDLERS) return (EINVAL); new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; loc = (uintptr_t *) &sc->cpl_handler[opcode]; atomic_store_rel_ptr(loc, new); return (0); } #endif static int cxgbc_mod_event(module_t mod, int cmd, void *arg) { int rc = 0; switch (cmd) { case MOD_LOAD: mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF); SLIST_INIT(&t3_list); #ifdef TCP_OFFLOAD mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF); SLIST_INIT(&t3_uld_list); #endif break; case MOD_UNLOAD: #ifdef TCP_OFFLOAD mtx_lock(&t3_uld_list_lock); if (!SLIST_EMPTY(&t3_uld_list)) { rc = EBUSY; mtx_unlock(&t3_uld_list_lock); break; } mtx_unlock(&t3_uld_list_lock); mtx_destroy(&t3_uld_list_lock); #endif mtx_lock(&t3_list_lock); if (!SLIST_EMPTY(&t3_list)) { rc = EBUSY; mtx_unlock(&t3_list_lock); break; } mtx_unlock(&t3_list_lock); mtx_destroy(&t3_list_lock); break; } return (rc); } Index: stable/9/sys/dev =================================================================== --- stable/9/sys/dev (revision 277343) +++ stable/9/sys/dev (revision 277344) Property changes on: stable/9/sys/dev ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev:r276959 Index: stable/9/sys =================================================================== --- stable/9/sys (revision 277343) +++ stable/9/sys (revision 277344) Property changes on: stable/9/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r276959