diff --git a/sys/arm/allwinner/if_awg.c b/sys/arm/allwinner/if_awg.c index 7236bd94335d..7f94b88e7035 100644 --- a/sys/arm/allwinner/if_awg.c +++ b/sys/arm/allwinner/if_awg.c @@ -1,2023 +1,2023 @@ /*- * Copyright (c) 2016 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Allwinner Gigabit Ethernet MAC (EMAC) controller */ #include "opt_device_polling.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include "miibus_if.h" #include "gpio_if.h" #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) #define DESC_ALIGN 4 #define TX_DESC_COUNT 1024 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) #define RX_DESC_COUNT 256 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) #define TX_MAX_SEGS 20 #define SOFT_RST_RETRY 1000 #define MII_BUSY_RETRY 1000 #define MDIO_FREQ 2500000 #define BURST_LEN_DEFAULT 8 #define RX_TX_PRI_DEFAULT 0 #define PAUSE_TIME_DEFAULT 0x400 #define TX_INTERVAL_DEFAULT 64 #define RX_BATCH_DEFAULT 64 /* syscon EMAC clock register */ #define EMAC_CLK_REG 0x30 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ #define EMAC_CLK_EPHY_ADDR_SHIFT 20 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ #define EMAC_CLK_RMII_EN (1 << 13) #define EMAC_CLK_ETXDC (0x7 << 10) #define EMAC_CLK_ETXDC_SHIFT 10 #define EMAC_CLK_ERXDC (0x1f << 5) #define EMAC_CLK_ERXDC_SHIFT 5 #define EMAC_CLK_PIT (0x1 << 2) #define EMAC_CLK_PIT_MII (0 << 2) #define EMAC_CLK_PIT_RGMII (1 << 2) #define EMAC_CLK_SRC (0x3 << 0) #define EMAC_CLK_SRC_MII (0 << 0) #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) #define EMAC_CLK_SRC_RGMII (2 << 0) /* Burst length of RX and TX DMA transfers */ static int awg_burst_len = BURST_LEN_DEFAULT; TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); /* Pause time field in the transmitted control frame */ static int awg_pause_time = PAUSE_TIME_DEFAULT; TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); /* Request a TX interrupt every descriptors */ static int awg_tx_interval = TX_INTERVAL_DEFAULT; TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); /* Maximum number of mbufs to send to if_input */ static int awg_rx_batch = RX_BATCH_DEFAULT; TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); enum awg_type { EMAC_A83T = 1, EMAC_H3, EMAC_A64, }; static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-a83t-emac", EMAC_A83T }, { "allwinner,sun8i-h3-emac", EMAC_H3 }, { "allwinner,sun50i-a64-emac", EMAC_A64 }, { NULL, 0 } }; struct awg_bufmap { bus_dmamap_t map; struct mbuf *mbuf; }; struct awg_txring { bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct emac_desc *desc_ring; bus_addr_t desc_ring_paddr; bus_dma_tag_t buf_tag; struct awg_bufmap buf_map[TX_DESC_COUNT]; u_int cur, next, queued; u_int segs; }; struct awg_rxring { bus_dma_tag_t desc_tag; bus_dmamap_t desc_map; struct emac_desc *desc_ring; bus_addr_t desc_ring_paddr; bus_dma_tag_t buf_tag; struct awg_bufmap buf_map[RX_DESC_COUNT]; bus_dmamap_t buf_spare_map; u_int cur; }; enum { _RES_EMAC, _RES_IRQ, _RES_SYSCON, _RES_NITEMS }; struct awg_softc { struct resource *res[_RES_NITEMS]; struct mtx mtx; if_t ifp; device_t dev; device_t miibus; struct callout stat_ch; void *ih; u_int mdc_div_ratio_m; int link; int if_flags; enum awg_type type; struct syscon *syscon; struct awg_txring tx; struct awg_rxring rx; }; static struct resource_spec awg_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; static void awg_txeof(struct awg_softc *sc); static void awg_start_locked(struct awg_softc *sc); static void awg_tick(void *softc); static int awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay); static uint32_t syscon_read_emac_clk_reg(device_t dev); static void syscon_write_emac_clk_reg(device_t dev, uint32_t val); static phandle_t awg_get_phy_node(device_t dev); static bool awg_has_internal_phy(device_t dev); /* * MII functions */ static int awg_miibus_readreg(device_t dev, int phy, int reg) { struct awg_softc *sc; int retry, val; sc = device_get_softc(dev); val = 0; WR4(sc, EMAC_MII_CMD, (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | (phy << PHY_ADDR_SHIFT) | (reg << PHY_REG_ADDR_SHIFT) | MII_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { val = RD4(sc, EMAC_MII_DATA); break; } DELAY(10); } if (retry == 0) device_printf(dev, "phy read timeout, phy=%d reg=%d\n", phy, reg); return (val); } static int awg_miibus_writereg(device_t dev, int phy, int reg, int val) { struct awg_softc *sc; int retry; sc = device_get_softc(dev); WR4(sc, EMAC_MII_DATA, val); WR4(sc, EMAC_MII_CMD, (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | (phy << PHY_ADDR_SHIFT) | (reg << PHY_REG_ADDR_SHIFT) | MII_WR | MII_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) break; DELAY(10); } if (retry == 0) device_printf(dev, "phy write timeout, phy=%d reg=%d\n", phy, reg); return (0); } static void awg_miibus_statchg(device_t dev) { struct awg_softc *sc; struct mii_data *mii; uint32_t val; sc = device_get_softc(dev); AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: case IFM_100_TX: case IFM_10_T: sc->link = 1; break; default: sc->link = 0; break; } } else sc->link = 0; if (sc->link == 0) return; val = RD4(sc, EMAC_BASIC_CTL_0); val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; else val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= BASIC_CTL_DUPLEX; WR4(sc, EMAC_BASIC_CTL_0, val); val = RD4(sc, EMAC_RX_CTL_0); val &= ~RX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= RX_FLOW_CTL_EN; WR4(sc, EMAC_RX_CTL_0, val); val = RD4(sc, EMAC_TX_FLOW_CTL); val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= TX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= awg_pause_time << PAUSE_TIME_SHIFT; WR4(sc, EMAC_TX_FLOW_CTL, val); } /* * Media functions */ static void awg_media_status(if_t ifp, struct ifmediareq *ifmr) { struct awg_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); AWG_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; AWG_UNLOCK(sc); } static int awg_media_change(if_t ifp) { struct awg_softc *sc; struct mii_data *mii; int error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); AWG_LOCK(sc); error = mii_mediachg(mii); AWG_UNLOCK(sc); return (error); } /* * Core functions */ /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ static uint32_t bitrev32(uint32_t x) { x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); return (x >> 16) | (x << 16); } static u_int awg_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, hashreg, hashbit, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7f; crc = bitrev32(~crc) >> 26; hashreg = (crc >> 5); hashbit = (crc & 0x1f); hash[hashreg] |= (1 << hashbit); return (1); } static void awg_setup_rxfilter(struct awg_softc *sc) { uint32_t val, hash[2], machi, maclo; uint8_t *eaddr; if_t ifp; AWG_ASSERT_LOCKED(sc); ifp = sc->ifp; val = 0; hash[0] = hash[1] = 0; if (if_getflags(ifp) & IFF_PROMISC) val |= DIS_ADDR_FILTER; else if (if_getflags(ifp) & IFF_ALLMULTI) { val |= RX_ALL_MULTICAST; hash[0] = hash[1] = ~0; } else if (if_foreach_llmaddr(ifp, awg_hash_maddr, hash) > 0) val |= HASH_MULTICAST; /* Write our unicast address */ - eaddr = IF_LLADDR(ifp); + eaddr = if_getlladdr(ifp); machi = (eaddr[5] << 8) | eaddr[4]; maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | (eaddr[0] << 0); WR4(sc, EMAC_ADDR_HIGH(0), machi); WR4(sc, EMAC_ADDR_LOW(0), maclo); /* Multicast hash filters */ WR4(sc, EMAC_RX_HASH_0, hash[1]); WR4(sc, EMAC_RX_HASH_1, hash[0]); /* RX frame filter config */ WR4(sc, EMAC_RX_FRM_FLT, val); } static void awg_setup_core(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Configure DMA burst length and priorities */ val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; if (awg_rx_tx_pri) val |= BASIC_CTL_RX_TX_PRI; WR4(sc, EMAC_BASIC_CTL_1, val); } static void awg_enable_mac(struct awg_softc *sc, bool enable) { uint32_t tx, rx; AWG_ASSERT_LOCKED(sc); tx = RD4(sc, EMAC_TX_CTL_0); rx = RD4(sc, EMAC_RX_CTL_0); if (enable) { tx |= TX_EN; rx |= RX_EN | CHECK_CRC; } else { tx &= ~TX_EN; rx &= ~(RX_EN | CHECK_CRC); } WR4(sc, EMAC_TX_CTL_0, tx); WR4(sc, EMAC_RX_CTL_0, rx); } static void awg_get_eaddr(device_t dev, uint8_t *eaddr) { struct awg_softc *sc; uint32_t maclo, machi, rnd; u_char rootkey[16]; uint32_t rootkey_size; sc = device_get_softc(dev); machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; maclo = RD4(sc, EMAC_ADDR_LOW(0)); rootkey_size = sizeof(rootkey); if (maclo == 0xffffffff && machi == 0xffff) { /* MAC address in hardware is invalid, create one */ if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey, &rootkey_size) == 0 && (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | rootkey[15]) != 0) { /* MAC address is derived from the root key in SID */ maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | (rootkey[3] << 8) | 0x02; machi = (rootkey[15] << 8) | rootkey[14]; } else { /* Create one */ rnd = arc4random(); maclo = 0x00f2 | (rnd & 0xffff0000); machi = rnd & 0xffff; } } eaddr[0] = maclo & 0xff; eaddr[1] = (maclo >> 8) & 0xff; eaddr[2] = (maclo >> 16) & 0xff; eaddr[3] = (maclo >> 24) & 0xff; eaddr[4] = machi & 0xff; eaddr[5] = (machi >> 8) & 0xff; } /* * DMA functions */ static void awg_enable_dma_intr(struct awg_softc *sc) { /* Enable interrupts */ WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); } static void awg_disable_dma_intr(struct awg_softc *sc) { /* Disable interrupts */ WR4(sc, EMAC_INT_EN, 0); } static void awg_init_dma(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Enable interrupts */ #ifdef DEVICE_POLLING if ((if_getcapenable(sc->ifp) & IFCAP_POLLING) == 0) awg_enable_dma_intr(sc); else awg_disable_dma_intr(sc); #else awg_enable_dma_intr(sc); #endif /* Enable transmit DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); /* Enable receive DMA */ val = RD4(sc, EMAC_RX_CTL_1); WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); } static void awg_stop_dma(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Stop transmit DMA and flush data in the TX FIFO */ val = RD4(sc, EMAC_TX_CTL_1); val &= ~TX_DMA_EN; val |= FLUSH_TX_FIFO; WR4(sc, EMAC_TX_CTL_1, val); /* Disable interrupts */ awg_disable_dma_intr(sc); /* Disable transmit DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); /* Disable receive DMA */ val = RD4(sc, EMAC_RX_CTL_1); WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); } static int awg_encap(struct awg_softc *sc, struct mbuf **mp) { bus_dmamap_t map; bus_dma_segment_t segs[TX_MAX_SEGS]; int error, nsegs, cur, first, last, i; u_int csum_flags; uint32_t flags, status; struct mbuf *m; cur = first = sc->tx.cur; map = sc->tx.buf_map[first].map; m = *mp; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); if (m == NULL) { device_printf(sc->dev, "awg_encap: m_collapse failed\n"); m_freem(*mp); *mp = NULL; return (ENOMEM); } *mp = m; error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*mp); *mp = NULL; } } if (error != 0) { device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n"); return (error); } if (nsegs == 0) { m_freem(*mp); *mp = NULL; return (EIO); } if (sc->tx.queued + nsegs > TX_DESC_COUNT) { bus_dmamap_unload(sc->tx.buf_tag, map); return (ENOBUFS); } bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE); flags = TX_FIR_DESC; status = 0; if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) csum_flags = TX_CHECKSUM_CTL_FULL; else csum_flags = TX_CHECKSUM_CTL_IP; flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); } for (i = 0; i < nsegs; i++) { sc->tx.segs++; if (i == nsegs - 1) { flags |= TX_LAST_DESC; /* * Can only request TX completion * interrupt on last descriptor. */ if (sc->tx.segs >= awg_tx_interval) { sc->tx.segs = 0; flags |= TX_INT_CTL; } } sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr); sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len); sc->tx.desc_ring[cur].status = htole32(status); flags &= ~TX_FIR_DESC; /* * Setting of the valid bit in the first descriptor is * deferred until the whole chain is fully set up. */ status = TX_DESC_CTL; ++sc->tx.queued; cur = TX_NEXT(cur); } sc->tx.cur = cur; /* Store mapping and mbuf in the last segment */ last = TX_SKIP(cur, TX_DESC_COUNT - 1); sc->tx.buf_map[first].map = sc->tx.buf_map[last].map; sc->tx.buf_map[last].map = map; sc->tx.buf_map[last].mbuf = m; /* * The whole mbuf chain has been DMA mapped, * fix the first descriptor. */ sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL); return (0); } static void awg_clean_txbuf(struct awg_softc *sc, int index) { struct awg_bufmap *bmap; --sc->tx.queued; bmap = &sc->tx.buf_map[index]; if (bmap->mbuf != NULL) { bus_dmamap_sync(sc->tx.buf_tag, bmap->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx.buf_tag, bmap->map); m_freem(bmap->mbuf); bmap->mbuf = NULL; } } static void awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) { uint32_t status, size; status = RX_DESC_CTL; size = MCLBYTES - 1; sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); sc->rx.desc_ring[index].size = htole32(size); sc->rx.desc_ring[index].status = htole32(status); } static void awg_reuse_rxdesc(struct awg_softc *sc, int index) { sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL); } static int awg_newbuf_rx(struct awg_softc *sc, int index) { struct mbuf *m; bus_dma_segment_t seg; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map, m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } if (sc->rx.buf_map[index].mbuf != NULL) { bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); } map = sc->rx.buf_map[index].map; sc->rx.buf_map[index].map = sc->rx.buf_spare_map; sc->rx.buf_spare_map = map; bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, BUS_DMASYNC_PREREAD); sc->rx.buf_map[index].mbuf = m; awg_setup_rxdesc(sc, index, seg.ds_addr); return (0); } static void awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static int awg_setup_dma(device_t dev) { struct awg_softc *sc; int error, i; sc = device_get_softc(dev); /* Setup TX ring */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ DESC_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TX_DESC_SIZE, 1, /* maxsize, nsegs */ TX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx.desc_tag); if (error != 0) { device_printf(dev, "cannot create TX descriptor ring tag\n"); return (error); } error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); if (error != 0) { device_printf(dev, "cannot allocate TX descriptor ring\n"); return (error); } error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, &sc->tx.desc_ring_paddr, 0); if (error != 0) { device_printf(dev, "cannot load TX descriptor ring\n"); return (error); } for (i = 0; i < TX_DESC_COUNT; i++) sc->tx.desc_ring[i].next = htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx.buf_tag); if (error != 0) { device_printf(dev, "cannot create TX buffer tag\n"); return (error); } sc->tx.queued = 0; for (i = 0; i < TX_DESC_COUNT; i++) { error = bus_dmamap_create(sc->tx.buf_tag, 0, &sc->tx.buf_map[i].map); if (error != 0) { device_printf(dev, "cannot create TX buffer map\n"); return (error); } } /* Setup RX ring */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ DESC_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RX_DESC_SIZE, 1, /* maxsize, nsegs */ RX_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rx.desc_tag); if (error != 0) { device_printf(dev, "cannot create RX descriptor ring tag\n"); return (error); } error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); if (error != 0) { device_printf(dev, "cannot allocate RX descriptor ring\n"); return (error); } error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, &sc->rx.desc_ring_paddr, 0); if (error != 0) { device_printf(dev, "cannot load RX descriptor ring\n"); return (error); } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rx.buf_tag); if (error != 0) { device_printf(dev, "cannot create RX buffer tag\n"); return (error); } error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map); if (error != 0) { device_printf(dev, "cannot create RX buffer spare map\n"); return (error); } for (i = 0; i < RX_DESC_COUNT; i++) { sc->rx.desc_ring[i].next = htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i))); error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_map[i].map); if (error != 0) { device_printf(dev, "cannot create RX buffer map\n"); return (error); } sc->rx.buf_map[i].mbuf = NULL; error = awg_newbuf_rx(sc, i); if (error != 0) { device_printf(dev, "cannot create RX buffer\n"); return (error); } } bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE); /* Write transmit and receive descriptor base address registers */ WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); return (0); } static void awg_dma_start_tx(struct awg_softc *sc) { uint32_t val; AWG_ASSERT_LOCKED(sc); /* Start and run TX DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); } /* * if_ functions */ static void awg_start_locked(struct awg_softc *sc) { struct mbuf *m; if_t ifp; int cnt, err; AWG_ASSERT_LOCKED(sc); if (!sc->link) return; ifp = sc->ifp; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; for (cnt = 0; ; cnt++) { m = if_dequeue(ifp); if (m == NULL) break; err = awg_encap(sc, &m); if (err != 0) { if (err == ENOBUFS) if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if (m != NULL) if_sendq_prepend(ifp, m); break; } if_bpfmtap(ifp, m); } if (cnt != 0) { bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); awg_dma_start_tx(sc); } } static void awg_start(if_t ifp) { struct awg_softc *sc; sc = if_getsoftc(ifp); AWG_LOCK(sc); awg_start_locked(sc); AWG_UNLOCK(sc); } static void awg_init_locked(struct awg_softc *sc) { struct mii_data *mii; if_t ifp; mii = device_get_softc(sc->miibus); ifp = sc->ifp; AWG_ASSERT_LOCKED(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; awg_setup_rxfilter(sc); awg_setup_core(sc); awg_enable_mac(sc, true); awg_init_dma(sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); mii_mediachg(mii); callout_reset(&sc->stat_ch, hz, awg_tick, sc); } static void awg_init(void *softc) { struct awg_softc *sc; sc = softc; AWG_LOCK(sc); awg_init_locked(sc); AWG_UNLOCK(sc); } static void awg_stop(struct awg_softc *sc) { if_t ifp; uint32_t val; int i; AWG_ASSERT_LOCKED(sc); ifp = sc->ifp; callout_stop(&sc->stat_ch); awg_stop_dma(sc); awg_enable_mac(sc, false); sc->link = 0; /* Finish handling transmitted buffers */ awg_txeof(sc); /* Release any untransmitted buffers. */ for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { val = le32toh(sc->tx.desc_ring[i].status); if ((val & TX_DESC_CTL) != 0) break; awg_clean_txbuf(sc, i); } sc->tx.next = i; for (; sc->tx.queued > 0; i = TX_NEXT(i)) { sc->tx.desc_ring[i].status = 0; awg_clean_txbuf(sc, i); } sc->tx.cur = sc->tx.next; bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Setup RX buffers for reuse */ bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rx.cur; ; i = RX_NEXT(i)) { val = le32toh(sc->rx.desc_ring[i].status); if ((val & RX_DESC_CTL) != 0) break; awg_reuse_rxdesc(sc, i); } sc->rx.cur = i; bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static int awg_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct awg_softc *sc; struct mii_data *mii; struct ifreq *ifr; int flags, mask, error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFFLAGS: AWG_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->if_flags; if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) awg_setup_rxfilter(sc); } else awg_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) awg_stop(sc); } sc->if_flags = if_getflags(ifp); AWG_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { AWG_LOCK(sc); awg_setup_rxfilter(sc); AWG_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(awg_poll, ifp); if (error != 0) break; AWG_LOCK(sc); awg_disable_dma_intr(sc); if_setcapenablebit(ifp, IFCAP_POLLING, 0); AWG_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); AWG_LOCK(sc); awg_enable_dma_intr(sc); if_setcapenablebit(ifp, 0, IFCAP_POLLING); AWG_UNLOCK(sc); } } #endif if (mask & IFCAP_VLAN_MTU) if_togglecapenable(ifp, IFCAP_VLAN_MTU); if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); else if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * Interrupts functions */ static int awg_rxintr(struct awg_softc *sc) { if_t ifp; struct mbuf *m, *mh, *mt; int error, index, len, cnt, npkt; uint32_t status; ifp = sc->ifp; mh = mt = NULL; cnt = 0; npkt = 0; bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (index = sc->rx.cur; ; index = RX_NEXT(index)) { status = le32toh(sc->rx.desc_ring[index].status); if ((status & RX_DESC_CTL) != 0) break; len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; if (len == 0) { if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); awg_reuse_rxdesc(sc, index); continue; } m = sc->rx.buf_map[index].mbuf; error = awg_newbuf_rx(sc, index); if (error != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); awg_reuse_rxdesc(sc, index); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && (status & RX_FRM_TYPE) != 0) { m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; if ((status & RX_HEADER_ERR) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((status & RX_PAYLOAD_ERR) == 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } m->m_nextpkt = NULL; if (mh == NULL) mh = m; else mt->m_nextpkt = m; mt = m; ++cnt; ++npkt; if (cnt == awg_rx_batch) { AWG_UNLOCK(sc); if_input(ifp, mh); AWG_LOCK(sc); mh = mt = NULL; cnt = 0; } } if (index != sc->rx.cur) { bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (mh != NULL) { AWG_UNLOCK(sc); if_input(ifp, mh); AWG_LOCK(sc); } sc->rx.cur = index; return (npkt); } static void awg_txeof(struct awg_softc *sc) { struct emac_desc *desc; uint32_t status, size; if_t ifp; int i, prog; AWG_ASSERT_LOCKED(sc); bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->ifp; prog = 0; for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { desc = &sc->tx.desc_ring[i]; status = le32toh(desc->status); if ((status & TX_DESC_CTL) != 0) break; size = le32toh(desc->size); if (size & TX_LAST_DESC) { if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } prog++; awg_clean_txbuf(sc, i); } if (prog > 0) { sc->tx.next = i; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } } static void awg_intr(void *arg) { struct awg_softc *sc; uint32_t val; sc = arg; AWG_LOCK(sc); val = RD4(sc, EMAC_INT_STA); WR4(sc, EMAC_INT_STA, val); if (val & RX_INT) awg_rxintr(sc); if (val & TX_INT) awg_txeof(sc); if (val & (TX_INT | TX_BUF_UA_INT)) { if (!if_sendq_empty(sc->ifp)) awg_start_locked(sc); } AWG_UNLOCK(sc); } #ifdef DEVICE_POLLING static int awg_poll(if_t ifp, enum poll_cmd cmd, int count) { struct awg_softc *sc; uint32_t val; int rx_npkts; sc = if_getsoftc(ifp); rx_npkts = 0; AWG_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { AWG_UNLOCK(sc); return (0); } rx_npkts = awg_rxintr(sc); awg_txeof(sc); if (!if_sendq_empty(ifp)) awg_start_locked(sc); if (cmd == POLL_AND_CHECK_STATUS) { val = RD4(sc, EMAC_INT_STA); if (val != 0) WR4(sc, EMAC_INT_STA, val); } AWG_UNLOCK(sc); return (rx_npkts); } #endif /* * syscon functions */ static uint32_t syscon_read_emac_clk_reg(device_t dev) { struct awg_softc *sc; sc = device_get_softc(dev); if (sc->syscon != NULL) return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG)); else if (sc->res[_RES_SYSCON] != NULL) return (bus_read_4(sc->res[_RES_SYSCON], 0)); return (0); } static void syscon_write_emac_clk_reg(device_t dev, uint32_t val) { struct awg_softc *sc; sc = device_get_softc(dev); if (sc->syscon != NULL) SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val); else if (sc->res[_RES_SYSCON] != NULL) bus_write_4(sc->res[_RES_SYSCON], 0, val); } /* * PHY functions */ static phandle_t awg_get_phy_node(device_t dev) { phandle_t node; pcell_t phy_handle; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) <= 0) return (0); return (OF_node_from_xref(phy_handle)); } static bool awg_has_internal_phy(device_t dev) { phandle_t node, phy_node; node = ofw_bus_get_node(dev); /* Legacy binding */ if (OF_hasprop(node, "allwinner,use-internal-phy")) return (true); phy_node = awg_get_phy_node(dev); return (phy_node != 0 && ofw_bus_node_is_compatible(OF_parent(phy_node), "allwinner,sun8i-h3-mdio-internal") != 0); } static int awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay) { phandle_t node; uint32_t delay; if (tx_delay == NULL || rx_delay == NULL) return (EINVAL); *tx_delay = *rx_delay = 0; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "tx-delay", &delay, sizeof(delay)) >= 0) *tx_delay = delay; else if (OF_getencprop(node, "allwinner,tx-delay-ps", &delay, sizeof(delay)) >= 0) { if ((delay % 100) != 0) { device_printf(dev, "tx-delay-ps is not a multiple of 100\n"); return (EDOM); } *tx_delay = delay / 100; } if (*tx_delay > 7) { device_printf(dev, "tx-delay out of range\n"); return (ERANGE); } if (OF_getencprop(node, "rx-delay", &delay, sizeof(delay)) >= 0) *rx_delay = delay; else if (OF_getencprop(node, "allwinner,rx-delay-ps", &delay, sizeof(delay)) >= 0) { if ((delay % 100) != 0) { device_printf(dev, "rx-delay-ps is not within documented domain\n"); return (EDOM); } *rx_delay = delay / 100; } if (*rx_delay > 31) { device_printf(dev, "rx-delay out of range\n"); return (ERANGE); } return (0); } static int awg_setup_phy(device_t dev) { struct awg_softc *sc; clk_t clk_tx, clk_tx_parent; const char *tx_parent_name; char *phy_type; phandle_t node; uint32_t reg, tx_delay, rx_delay; int error; bool use_syscon; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); use_syscon = false; if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_type) == 0) return (0); if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL) use_syscon = true; if (bootverbose) device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, use_syscon ? "reg" : "clk"); if (use_syscon) { /* * Abstract away writing to syscon for devices like the pine64. * For the pine64, we get dtb from U-Boot and it still uses the * legacy setup of specifying syscon register in emac node * rather than as its own node and using an xref in emac. * These abstractions can go away once U-Boot dts is up-to-date. */ reg = syscon_read_emac_clk_reg(dev); reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); if (strncmp(phy_type, "rgmii", 5) == 0) reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; else if (strcmp(phy_type, "rmii") == 0) reg |= EMAC_CLK_RMII_EN; else reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; /* * Fail attach if we fail to parse either of the delay * parameters. If we don't have the proper delay to write to * syscon, then awg likely won't function properly anyways. * Lack of delay is not an error! */ error = awg_parse_delay(dev, &tx_delay, &rx_delay); if (error != 0) goto fail; /* Default to 0 and we'll increase it if we need to. */ reg &= ~(EMAC_CLK_ETXDC | EMAC_CLK_ERXDC); if (tx_delay > 0) reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); if (rx_delay > 0) reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); if (sc->type == EMAC_H3) { if (awg_has_internal_phy(dev)) { reg |= EMAC_CLK_EPHY_SELECT; reg &= ~EMAC_CLK_EPHY_SHUTDOWN; if (OF_hasprop(node, "allwinner,leds-active-low")) reg |= EMAC_CLK_EPHY_LED_POL; else reg &= ~EMAC_CLK_EPHY_LED_POL; /* Set internal PHY addr to 1 */ reg &= ~EMAC_CLK_EPHY_ADDR; reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); } else { reg &= ~EMAC_CLK_EPHY_SELECT; } } if (bootverbose) device_printf(dev, "EMAC clock: 0x%08x\n", reg); syscon_write_emac_clk_reg(dev, reg); } else { if (strncmp(phy_type, "rgmii", 5) == 0) tx_parent_name = "emac_int_tx"; else tx_parent_name = "mii_phy_tx"; /* Get the TX clock */ error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); if (error != 0) { device_printf(dev, "cannot get tx clock\n"); goto fail; } /* Find the desired parent clock based on phy-mode property */ error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); if (error != 0) { device_printf(dev, "cannot get clock '%s'\n", tx_parent_name); goto fail; } /* Set TX clock parent */ error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); if (error != 0) { device_printf(dev, "cannot set tx clock parent\n"); goto fail; } /* Enable TX clock */ error = clk_enable(clk_tx); if (error != 0) { device_printf(dev, "cannot enable tx clock\n"); goto fail; } } error = 0; fail: OF_prop_free(phy_type); return (error); } static int awg_setup_extres(device_t dev) { struct awg_softc *sc; phandle_t node, phy_node; hwreset_t rst_ahb, rst_ephy; clk_t clk_ahb, clk_ephy; regulator_t reg; uint64_t freq; int error, div; sc = device_get_softc(dev); rst_ahb = rst_ephy = NULL; clk_ahb = clk_ephy = NULL; reg = NULL; node = ofw_bus_get_node(dev); phy_node = awg_get_phy_node(dev); if (phy_node == 0 && OF_hasprop(node, "phy-handle")) { error = ENXIO; device_printf(dev, "cannot get phy handle\n"); goto fail; } /* Get AHB clock and reset resources */ error = hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_ahb); if (error != 0) error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); if (error != 0) { device_printf(dev, "cannot get ahb reset\n"); goto fail; } if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) if (phy_node == 0 || hwreset_get_by_ofw_idx(dev, phy_node, 0, &rst_ephy) != 0) rst_ephy = NULL; error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk_ahb); if (error != 0) error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) if (phy_node == 0 || clk_get_by_ofw_index(dev, phy_node, 0, &clk_ephy) != 0) clk_ephy = NULL; if (OF_hasprop(node, "syscon") && syscon_get_by_ofw_property(dev, node, "syscon", &sc->syscon) != 0) { device_printf(dev, "cannot get syscon driver handle\n"); goto fail; } /* Configure PHY for MII or RGMII mode */ if (awg_setup_phy(dev) != 0) goto fail; /* Enable clocks */ error = clk_enable(clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } if (clk_ephy != NULL) { error = clk_enable(clk_ephy); if (error != 0) { device_printf(dev, "cannot enable ephy clock\n"); goto fail; } } /* De-assert reset */ error = hwreset_deassert(rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert ahb reset\n"); goto fail; } if (rst_ephy != NULL) { /* * The ephy reset is left de-asserted by U-Boot. Assert it * here to make sure that we're in a known good state going * into the PHY reset. */ hwreset_assert(rst_ephy); error = hwreset_deassert(rst_ephy); if (error != 0) { device_printf(dev, "cannot de-assert ephy reset\n"); goto fail; } } /* Enable PHY regulator if applicable */ if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { error = regulator_enable(reg); if (error != 0) { device_printf(dev, "cannot enable PHY regulator\n"); goto fail; } } /* Determine MDC clock divide ratio based on AHB clock */ error = clk_get_freq(clk_ahb, &freq); if (error != 0) { device_printf(dev, "cannot get AHB clock frequency\n"); goto fail; } div = freq / MDIO_FREQ; if (div <= 16) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; else if (div <= 32) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; else if (div <= 64) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; else if (div <= 128) sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; else { device_printf(dev, "cannot determine MDC clock divide ratio\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", (uintmax_t)freq, sc->mdc_div_ratio_m); return (0); fail: if (reg != NULL) regulator_release(reg); if (clk_ephy != NULL) clk_release(clk_ephy); if (clk_ahb != NULL) clk_release(clk_ahb); if (rst_ephy != NULL) hwreset_release(rst_ephy); if (rst_ahb != NULL) hwreset_release(rst_ahb); return (error); } #ifdef AWG_DEBUG static void awg_dump_regs(device_t dev) { static const struct { const char *name; u_int reg; } regs[] = { { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, { "INT_STA", EMAC_INT_STA }, { "INT_EN", EMAC_INT_EN }, { "TX_CTL_0", EMAC_TX_CTL_0 }, { "TX_CTL_1", EMAC_TX_CTL_1 }, { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, { "RX_CTL_0", EMAC_RX_CTL_0 }, { "RX_CTL_1", EMAC_RX_CTL_1 }, { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, { "RX_HASH_0", EMAC_RX_HASH_0 }, { "RX_HASH_1", EMAC_RX_HASH_1 }, { "MII_CMD", EMAC_MII_CMD }, { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, { "TX_DMA_STA", EMAC_TX_DMA_STA }, { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, { "RX_DMA_STA", EMAC_RX_DMA_STA }, { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, { "RGMII_STA", EMAC_RGMII_STA }, }; struct awg_softc *sc; unsigned int n; sc = device_get_softc(dev); for (n = 0; n < nitems(regs); n++) device_printf(dev, " %-20s %08x\n", regs[n].name, RD4(sc, regs[n].reg)); } #endif #define GPIO_ACTIVE_LOW 1 static int awg_phy_reset(device_t dev) { pcell_t gpio_prop[4], delay_prop[3]; phandle_t node, gpio_node; device_t gpio; uint32_t pin, flags; uint32_t pin_value; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, sizeof(gpio_prop)) <= 0) return (0); if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, sizeof(delay_prop)) <= 0) return (ENXIO); gpio_node = OF_node_from_xref(gpio_prop[0]); if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) return (ENXIO); if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, gpio_prop + 1, &pin, &flags) != 0) return (ENXIO); pin_value = GPIO_PIN_LOW; if (OF_hasprop(node, "allwinner,reset-active-low")) pin_value = GPIO_PIN_HIGH; if (flags & GPIO_ACTIVE_LOW) pin_value = !pin_value; GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[0]); GPIO_PIN_SET(gpio, pin, !pin_value); DELAY(delay_prop[1]); GPIO_PIN_SET(gpio, pin, pin_value); DELAY(delay_prop[2]); return (0); } static int awg_reset(device_t dev) { struct awg_softc *sc; int retry; sc = device_get_softc(dev); /* Reset PHY if necessary */ if (awg_phy_reset(dev) != 0) { device_printf(dev, "failed to reset PHY\n"); return (ENXIO); } /* Soft reset all registers and logic */ WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); /* Wait for soft reset bit to self-clear */ for (retry = SOFT_RST_RETRY; retry > 0; retry--) { if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) break; DELAY(10); } if (retry == 0) { device_printf(dev, "soft reset timed out\n"); #ifdef AWG_DEBUG awg_dump_regs(dev); #endif return (ETIMEDOUT); } return (0); } /* * Stats */ static void awg_tick(void *softc) { struct awg_softc *sc; struct mii_data *mii; if_t ifp; int link; sc = softc; ifp = sc->ifp; mii = device_get_softc(sc->miibus); AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; link = sc->link; mii_tick(mii); if (sc->link && !link) awg_start_locked(sc); callout_reset(&sc->stat_ch, hz, awg_tick, sc); } /* * Probe/attach functions */ static int awg_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Gigabit Ethernet"); return (BUS_PROBE_DEFAULT); } static int awg_attach(device_t dev) { uint8_t eaddr[ETHER_ADDR_LEN]; struct awg_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); /* Setup clocks and regulators */ error = awg_setup_extres(dev); if (error != 0) return (error); /* Read MAC address before resetting the chip */ awg_get_eaddr(dev, eaddr); /* Soft reset EMAC core */ error = awg_reset(dev); if (error != 0) return (error); /* Setup DMA descriptors */ error = awg_setup_dma(dev); if (error != 0) return (error); /* Install interrupt handler */ error = bus_setup_intr(dev, sc->res[_RES_IRQ], INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); if (error != 0) { device_printf(dev, "cannot setup interrupt handler\n"); return (error); } /* Setup ethernet interface */ sc->ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp, sc); if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(sc->ifp, awg_start); if_setioctlfn(sc->ifp, awg_ioctl); if_setinitfn(sc->ifp, awg_init); if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); if_setsendqready(sc->ifp); if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); #endif /* Attach MII driver */ error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "cannot attach PHY\n"); return (error); } /* Attach ethernet interface */ ether_ifattach(sc->ifp, eaddr); return (0); } static device_method_t awg_methods[] = { /* Device interface */ DEVMETHOD(device_probe, awg_probe), DEVMETHOD(device_attach, awg_attach), /* MII interface */ DEVMETHOD(miibus_readreg, awg_miibus_readreg), DEVMETHOD(miibus_writereg, awg_miibus_writereg), DEVMETHOD(miibus_statchg, awg_miibus_statchg), DEVMETHOD_END }; static driver_t awg_driver = { "awg", awg_methods, sizeof(struct awg_softc), }; DRIVER_MODULE(awg, simplebus, awg_driver, 0, 0); DRIVER_MODULE(miibus, awg, miibus_driver, 0, 0); MODULE_DEPEND(awg, ether, 1, 1, 1); MODULE_DEPEND(awg, miibus, 1, 1, 1); MODULE_DEPEND(awg, aw_sid, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/ath/if_ath_ioctl.c b/sys/dev/ath/if_ath_ioctl.c index f58742d6e5fb..cec769d615d7 100644 --- a/sys/dev/ath/if_ath_ioctl.c +++ b/sys/dev/ath/if_ath_ioctl.c @@ -1,307 +1,307 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include /* * ioctl() related pieces. * * Some subsystems (eg spectral, dfs) have their own ioctl method which * we call. */ /* * Fetch the rate control statistics for the given node. */ static int ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) { struct ath_node *an; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; int error = 0; /* Perform a lookup on the given node */ ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); if (ni == NULL) { error = EINVAL; goto bad; } /* Lock the ath_node */ an = ATH_NODE(ni); ATH_NODE_LOCK(an); /* Fetch the rate control stats for this node */ error = ath_rate_fetch_node_stats(sc, an, rs); /* No matter what happens here, just drop through */ /* Unlock the ath_node */ ATH_NODE_UNLOCK(an); /* Unref the node */ ieee80211_node_decref(ni); bad: return (error); } #ifdef ATH_DIAGAPI /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatibility. */ static int ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) { struct ath_hal *ah = sc->sc_ah; u_int id = ad->ad_id & ATH_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = ad->ad_in_size; u_int32_t outsize = ad->ad_out_size; int error = 0; if (ad->ad_id & ATH_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(ad->ad_in_data, indata, insize); if (error) goto bad; } if (ad->ad_id & ATH_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT | M_ZERO); if (outdata == NULL) { error = ENOMEM; goto bad; } } ATH_LOCK(sc); if (id != HAL_DIAG_REGS) ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { if (outsize < ad->ad_out_size) ad->ad_out_size = outsize; if (outdata != NULL) error = copyout(outdata, ad->ad_out_data, ad->ad_out_size); } else { error = EINVAL; } ATH_LOCK(sc); if (id != HAL_DIAG_REGS) ath_power_restore_power_state(sc); ATH_UNLOCK(sc); bad: if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } #endif /* ATH_DIAGAPI */ int ath_ioctl(struct ieee80211com *ic, u_long cmd, void *data) { struct ifreq *ifr = data; struct ath_softc *sc = ic->ic_softc; switch (cmd) { case SIOCGATHSTATS: { struct ieee80211vap *vap; - struct ifnet *ifp; + if_t ifp; const HAL_RATE_TABLE *rt; /* NB: embed these numbers to get a consistent view */ sc->sc_stats.ast_tx_packets = 0; sc->sc_stats.ast_rx_packets = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; sc->sc_stats.ast_tx_packets += ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); sc->sc_stats.ast_rx_packets += ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); } sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); #ifdef IEEE80211_SUPPORT_TDMA sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); #endif rt = sc->sc_currates; sc->sc_stats.ast_tx_rate = rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; return copyout(&sc->sc_stats, ifr_data_get_ptr(ifr), sizeof (sc->sc_stats)); } case SIOCGATHAGSTATS: return copyout(&sc->sc_aggr_stats, ifr_data_get_ptr(ifr), sizeof (sc->sc_aggr_stats)); case SIOCZATHSTATS: { int error; error = priv_check(curthread, PRIV_DRIVER); if (error == 0) { memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); memset(&sc->sc_aggr_stats, 0, sizeof(sc->sc_aggr_stats)); memset(&sc->sc_intr_stats, 0, sizeof(sc->sc_intr_stats)); } return (error); } #ifdef ATH_DIAGAPI case SIOCGATHDIAG: return (ath_ioctl_diag(sc, data)); case SIOCGATHPHYERR: return (ath_ioctl_phyerr(sc, data)); #endif case SIOCGATHSPECTRAL: return (ath_ioctl_spectral(sc, data)); case SIOCGATHNODERATESTATS: return (ath_ioctl_ratestats(sc, data)); case SIOCGATHBTCOEX: return (ath_btcoex_ioctl(sc, data)); default: /* * This signals the net80211 layer that we didn't handle this * ioctl. */ return (ENOTTY); } } diff --git a/sys/dev/ath/if_ath_led.c b/sys/dev/ath/if_ath_led.c index 8b4ac821f2be..e8da930f7217 100644 --- a/sys/dev/ath/if_ath_led.c +++ b/sys/dev/ath/if_ath_led.c @@ -1,200 +1,201 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include +#include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include /* * Software LED driver routines. */ /* * XXX TODO: move the LED sysctls here. */ /* * Configure the hardware for software and LED blinking. * The user may choose to configure part of each, depending upon the * NIC being used. * * This requires the configuration to be set before this function * is called. */ void ath_led_config(struct ath_softc *sc) { ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); /* Software LED blinking - GPIO controlled LED */ if (sc->sc_softled) { ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin, HAL_GPIO_OUTPUT_MUX_AS_OUTPUT); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); } /* Hardware LED blinking - MAC controlled LED */ if (sc->sc_hardled) { /* * Only enable each LED if required. * * Some NICs only have one LED connected; others may * have GPIO1/GPIO2 connected to other hardware. */ if (sc->sc_led_pwr_pin > 0) ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_led_pwr_pin, HAL_GPIO_OUTPUT_MUX_MAC_POWER_LED); if (sc->sc_led_net_pin > 0) ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_led_net_pin, HAL_GPIO_OUTPUT_MUX_MAC_NETWORK_LED); } ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } static void ath_led_done(void *arg) { struct ath_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void ath_led_off(void *arg) { struct ath_softc *sc = arg; ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void ath_led_blink(struct ath_softc *sc, int on, int off) { DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); } void ath_led_event(struct ath_softc *sc, int rix) { sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff); } diff --git a/sys/dev/ath/if_ath_misc.h b/sys/dev/ath/if_ath_misc.h index b108c29bab4b..c19e028e64bd 100644 --- a/sys/dev/ath/if_ath_misc.h +++ b/sys/dev/ath/if_ath_misc.h @@ -1,154 +1,154 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #ifndef __IF_ATH_MISC_H__ #define __IF_ATH_MISC_H__ /* * This is where definitions for "public things" in if_ath.c * will go for the time being. * * Anything in here should eventually be moved out of if_ath.c * and into something else. */ extern int ath_rxbuf; extern int ath_txbuf; extern int ath_txbuf_mgmt; extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate); extern struct ath_buf * ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype); extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype); extern struct ath_buf * ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf); /* XXX change this to NULL the buffer pointer? */ extern void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf); extern void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf); extern void ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf); extern int ath_reset(struct ath_softc *, ATH_RESET_TYPE, HAL_RESET_TYPE ah_reset_type); extern void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail); extern void ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, int rc_framelen, int nframes, int nbad); extern int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs); extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status); extern void ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq); extern void ath_txqmove(struct ath_txq *dst, struct ath_txq *src); extern void ath_mode_init(struct ath_softc *sc); extern void ath_setdefantenna(struct ath_softc *sc, u_int antenna); extern void ath_setslottime(struct ath_softc *sc); extern void ath_legacy_attach_comp_func(struct ath_softc *sc); extern void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq); extern void ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type); extern void ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf); extern int ath_stoptxdma(struct ath_softc *sc); extern void ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, int enable); /* * This is only here so that the RX proc function can call it. * It's very likely that the "start TX after RX" call should be * done via something in if_ath.c, moving "rx tasklet" into * if_ath.c and do the ath_start() call there. Once that's done, * we can kill this. */ -extern void ath_start(struct ifnet *ifp); +extern void ath_start(if_t ifp); extern void ath_start_task(void *arg, int npending); extern void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq); /* * Power state tracking. */ extern void _ath_power_setpower(struct ath_softc *sc, int power_state, int selfgen, const char *file, int line); extern void _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line); extern void _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line); extern void _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line); #define ath_power_setpower(sc, ps, sg) _ath_power_setpower(sc, ps, sg, \ __FILE__, __LINE__) #define ath_power_setselfgen(sc, ps) _ath_power_set_selfgen(sc, ps, \ __FILE__, __LINE__) #define ath_power_set_power_state(sc, ps) \ _ath_power_set_power_state(sc, ps, __FILE__, __LINE__) #define ath_power_restore_power_state(sc) \ _ath_power_restore_power_state(sc, __FILE__, __LINE__) /* * Kick the frame TX task. */ static inline void ath_tx_kick(struct ath_softc *sc) { /* XXX NULL for now */ } /* * Kick the software TX queue task. */ static inline void ath_tx_swq_kick(struct ath_softc *sc) { taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); } #endif diff --git a/sys/dev/ath/if_ath_tdma.c b/sys/dev/ath/if_ath_tdma.c index f0900435c6d6..420b4a2b6d68 100644 --- a/sys/dev/ath/if_ath_tdma.c +++ b/sys/dev/ath/if_ath_tdma.c @@ -1,690 +1,690 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #ifdef ATH_DEBUG_ALQ #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval); static void ath_tdma_bintvalsetup(struct ath_softc *sc, const struct ieee80211_tdma_state *tdma); #endif /* IEEE80211_SUPPORT_TDMA */ #ifdef IEEE80211_SUPPORT_TDMA static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) { struct ath_hal *ah = sc->sc_ah; HAL_BEACON_TIMERS bt; bt.bt_intval = bintval | HAL_BEACON_ENA; bt.bt_nexttbtt = nexttbtt; bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; bt.bt_nextatim = nexttbtt+1; /* Enables TBTT, DBA, SWBA timers by default */ bt.bt_flags = 0; #if 0 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "%s: intval=%d (0x%08x) nexttbtt=%u (0x%08x), nextdba=%u (0x%08x), nextswba=%u (0x%08x),nextatim=%u (0x%08x)\n", __func__, bt.bt_intval, bt.bt_intval, bt.bt_nexttbtt, bt.bt_nexttbtt, bt.bt_nextdba, bt.bt_nextdba, bt.bt_nextswba, bt.bt_nextswba, bt.bt_nextatim, bt.bt_nextatim); #endif #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_SET)) { struct if_ath_alq_tdma_timer_set t; t.bt_intval = htobe32(bt.bt_intval); t.bt_nexttbtt = htobe32(bt.bt_nexttbtt); t.bt_nextdba = htobe32(bt.bt_nextdba); t.bt_nextswba = htobe32(bt.bt_nextswba); t.bt_nextatim = htobe32(bt.bt_nextatim); t.bt_flags = htobe32(bt.bt_flags); t.sc_tdmadbaprep = htobe32(sc->sc_tdmadbaprep); t.sc_tdmaswbaprep = htobe32(sc->sc_tdmaswbaprep); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_SET, sizeof(t), (char *) &t); } #endif DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "%s: nexttbtt=%u (0x%08x), nexttbtt tsf=%lld (0x%08llx)\n", __func__, bt.bt_nexttbtt, bt.bt_nexttbtt, (long long) ( ((u_int64_t) (bt.bt_nexttbtt)) << 10), (long long) ( ((u_int64_t) (bt.bt_nexttbtt)) << 10)); ath_hal_beaconsettimers(ah, &bt); } /* * Calculate the beacon interval. This is periodic in the * superframe for the bss. We assume each station is configured * identically wrt transmit rate so the guard time we calculate * above will be the same on all stations. Note we need to * factor in the xmit time because the hardware will schedule * a frame for transmit if the start of the frame is within * the burst time. When we get hardware that properly kills * frames in the PCU we can reduce/eliminate the guard time. * * Roundup to 1024 is so we have 1 TU buffer in the guard time * to deal with the granularity of the nexttbtt timer. 11n MAC's * with 1us timer granularity should allow us to reduce/eliminate * this. */ static void ath_tdma_bintvalsetup(struct ath_softc *sc, const struct ieee80211_tdma_state *tdma) { /* copy from vap state (XXX check all vaps have same value?) */ sc->sc_tdmaslotlen = tdma->tdma_slotlen; sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * tdma->tdma_slotcnt, 1024); sc->sc_tdmabintval >>= 10; /* TSF -> TU */ if (sc->sc_tdmabintval & 1) sc->sc_tdmabintval++; if (tdma->tdma_slot == 0) { /* * Only slot 0 beacons; other slots respond. */ sc->sc_imask |= HAL_INT_SWBA; sc->sc_tdmaswba = 0; /* beacon immediately */ } else { /* XXX all vaps must be slot 0 or slot !0 */ sc->sc_imask &= ~HAL_INT_SWBA; } } /* * Max 802.11 overhead. This assumes no 4-address frames and * the encapsulation done by ieee80211_encap (llc). We also * include potential crypto overhead. */ #define IEEE80211_MAXOVERHEAD \ (sizeof(struct ieee80211_qosframe) \ + sizeof(struct llc) \ + IEEE80211_ADDR_LEN \ + IEEE80211_WEP_IVLEN \ + IEEE80211_WEP_KIDLEN \ + IEEE80211_WEP_CRCLEN \ + IEEE80211_WEP_MICLEN \ + IEEE80211_CRC_LEN) /* * Setup initially for tdma operation. Start the beacon * timers and enable SWBA if we are slot 0. Otherwise * we wait for slot 0 to arrive so we can sync up before * starting to transmit. */ void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) { struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; const struct ieee80211_txparam *tp; const struct ieee80211_tdma_state *tdma = NULL; int rix; if (vap == NULL) { vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ if (vap == NULL) { device_printf(sc->sc_dev, "%s: no vaps?\n", __func__); return; } } /* XXX should take a locked ref to iv_bss */ tp = vap->iv_bss->ni_txparms; /* * Calculate the guard time for each slot. This is the * time to send a maximal-size frame according to the * fixed/lowest transmit rate. Note that the interface * mtu does not include the 802.11 overhead so we must * tack that on (ath_hal_computetxtime includes the * preamble and plcp in its calculation). */ tdma = vap->iv_tdma; if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rix = ath_tx_findrix(sc, tp->ucastrate); else rix = ath_tx_findrix(sc, tp->mcastrate); /* * If the chip supports enforcing TxOP on transmission, * we can just delete the guard window. It isn't at all required. */ if (sc->sc_hasenforcetxop) { sc->sc_tdmaguard = 0; } else { /* XXX short preamble assumed */ /* XXX non-11n rate assumed */ sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, - vap->iv_ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE, + if_getmtu(vap->iv_ifp) + IEEE80211_MAXOVERHEAD, rix, AH_TRUE, AH_TRUE); } ath_hal_intrset(ah, 0); ath_beaconq_config(sc); /* setup h/w beacon q */ if (sc->sc_setcca) ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ ath_tdma_settimers(sc, sc->sc_tdmabintval, sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); sc->sc_syncbeacon = 0; sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; ath_hal_intrset(ah, sc->sc_imask); DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, sc->sc_tdmadbaprep); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_CONFIG)) { struct if_ath_alq_tdma_timer_config t; t.tdma_slot = htobe32(tdma->tdma_slot); t.tdma_slotlen = htobe32(tdma->tdma_slotlen); t.tdma_slotcnt = htobe32(tdma->tdma_slotcnt); t.tdma_bintval = htobe32(tdma->tdma_bintval); t.tdma_guard = htobe32(sc->sc_tdmaguard); t.tdma_scbintval = htobe32(sc->sc_tdmabintval); t.tdma_dbaprep = htobe32(sc->sc_tdmadbaprep); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_CONFIG, sizeof(t), (char *) &t); } #endif /* ATH_DEBUG_ALQ */ } /* * Update tdma operation. Called from the 802.11 layer * when a beacon is received from the TDMA station operating * in the slot immediately preceding us in the bss. Use * the rx timestamp for the beacon frame to update our * beacon timers so we follow their schedule. Note that * by using the rx timestamp we implicitly include the * propagation delay in our schedule. * * XXX TODO: since the changes for the AR5416 and later chips * involved changing the TSF/TU calculations, we need to make * sure that various calculations wrap consistently. * * A lot of the problems stemmed from the calculations wrapping * at 65,535 TU. Since a lot of the math is still being done in * TU, please audit it to ensure that when the TU values programmed * into the timers wrap at (2^31)-1 TSF, all the various terms * wrap consistently. */ void ath_tdma_update(struct ieee80211_node *ni, const struct ieee80211_tdma_param *tdma, int changed) { #define TSF_TO_TU(_h,_l) \ ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt = sc->sc_currates; u_int64_t tsf, rstamp, nextslot, nexttbtt, nexttbtt_full; u_int32_t txtime, nextslottu; int32_t tudelta, tsfdelta; const struct ath_rx_status *rs; int rix; sc->sc_stats.ast_tdma_update++; /* * Check for and adopt configuration changes. */ if (changed != 0) { const struct ieee80211_tdma_state *ts = vap->iv_tdma; ath_tdma_bintvalsetup(sc, ts); if (changed & TDMA_UPDATE_SLOTLEN) ath_wme_update(ic); DPRINTF(sc, ATH_DEBUG_TDMA, "%s: adopt slot %u slotcnt %u slotlen %u us " "bintval %u TU\n", __func__, ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, sc->sc_tdmabintval); /* XXX right? */ ath_hal_intrset(ah, sc->sc_imask); /* NB: beacon timers programmed below */ } /* extend rx timestamp to 64 bits */ rs = sc->sc_lastrs; tsf = ath_hal_gettsf64(ah); rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); /* * The rx timestamp is set by the hardware on completing * reception (at the point where the rx descriptor is DMA'd * to the host). To find the start of our next slot we * must adjust this time by the time required to send * the packet just received. */ rix = rt->rateCodeToIndex[rs->rs_rate]; /* * To calculate the packet duration for legacy rates, we * only need the rix and preamble. * * For 11n non-aggregate frames, we also need the channel * width and short/long guard interval. * * For 11n aggregate frames, the required hacks are a little * more subtle. You need to figure out the frame duration * for each frame, including the delimiters. However, when * a frame isn't received successfully, we won't hear it * (unless you enable reception of CRC errored frames), so * your duration calculation is going to be off. * * However, we can assume that the beacon frames won't be * transmitted as aggregate frames, so we should be okay. * Just add a check to ensure that we aren't handed something * bad. * * For ath_hal_pkt_txtime() - for 11n rates, shortPreamble is * actually short guard interval. For legacy rates, * it's short preamble. */ txtime = ath_hal_pkt_txtime(ah, rt, rs->rs_datalen, rix, !! (rs->rs_flags & HAL_RX_2040), (rix & 0x80) ? (! (rs->rs_flags & HAL_RX_GI)) : rt->info[rix].shortPreamble, AH_TRUE); /* NB: << 9 is to cvt to TU and /2 */ nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); /* * For 802.11n chips: nextslottu needs to be the full TSF space, * not just 0..65535 TU. */ nextslottu = TSF_TO_TU(nextslot>>32, nextslot); /* * Retrieve the hardware NextTBTT in usecs * and calculate the difference between what the * other station thinks and what we have programmed. This * lets us figure how to adjust our timers to match. The * adjustments are done by pulling the TSF forward and possibly * rewriting the beacon timers. */ /* * The logic here assumes the nexttbtt counter is in TSF * but the prr-11n NICs are in TU. The HAL shifts them * to TSF but there's two important differences: * * + The TU->TSF values have 0's for the low 9 bits, and * + The counter wraps at TU_TO_TSF(HAL_BEACON_PERIOD + 1) for * the pre-11n NICs, but not for the 11n NICs. * * So for now, just make sure the nexttbtt value we get * matches the second issue or once nexttbtt exceeds this * value, tsfdelta ends up becoming very negative and all * of the adjustments get very messed up. */ /* * We need to track the full nexttbtt rather than having it * truncated at HAL_BEACON_PERIOD, as programming the * nexttbtt (and related) registers for the 11n chips is * actually going to take the full 32 bit space, rather than * just 0..65535 TU. */ nexttbtt_full = ath_hal_getnexttbtt(ah); nexttbtt = nexttbtt_full % (TU_TO_TSF(HAL_BEACON_PERIOD + 1)); tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "rs->rstamp %llu rstamp %llu tsf %llu txtime %d, nextslot %llu, " "nextslottu %d, nextslottume %d\n", (unsigned long long) rs->rs_tstamp, (unsigned long long) rstamp, (unsigned long long) tsf, txtime, (unsigned long long) nextslot, nextslottu, TSF_TO_TU(nextslot >> 32, nextslot)); DPRINTF(sc, ATH_DEBUG_TDMA, " beacon tstamp: %llu (0x%016llx)\n", (unsigned long long) le64toh(ni->ni_tstamp.tsf), (unsigned long long) le64toh(ni->ni_tstamp.tsf)); DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "nexttbtt %llu (0x%08llx) tsfdelta %d avg +%d/-%d\n", (unsigned long long) nexttbtt, (long long) nexttbtt, tsfdelta, TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); if (tsfdelta < 0) { TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); tsfdelta = -tsfdelta % 1024; nextslottu++; } else if (tsfdelta > 0) { TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); tsfdelta = 1024 - (tsfdelta % 1024); nextslottu++; } else { TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); } tudelta = nextslottu - TSF_TO_TU(nexttbtt_full >> 32, nexttbtt_full); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_BEACON_STATE)) { struct if_ath_alq_tdma_beacon_state t; t.rx_tsf = htobe64(rstamp); t.beacon_tsf = htobe64(le64toh(ni->ni_tstamp.tsf)); t.tsf64 = htobe64(tsf); t.nextslot_tsf = htobe64(nextslot); t.nextslot_tu = htobe32(nextslottu); t.txtime = htobe32(txtime); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_BEACON_STATE, sizeof(t), (char *) &t); } if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_SLOT_CALC)) { struct if_ath_alq_tdma_slot_calc t; t.nexttbtt = htobe64(nexttbtt_full); t.next_slot = htobe64(nextslot); t.tsfdelta = htobe32(tsfdelta); t.avg_plus = htobe32(TDMA_AVG(sc->sc_avgtsfdeltap)); t.avg_minus = htobe32(TDMA_AVG(sc->sc_avgtsfdeltam)); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_SLOT_CALC, sizeof(t), (char *) &t); } #endif /* * Copy sender's timetstamp into tdma ie so they can * calculate roundtrip time. We submit a beacon frame * below after any timer adjustment. The frame goes out * at the next TBTT so the sender can calculate the * roundtrip by inspecting the tdma ie in our beacon frame. * * NB: This tstamp is subtlely preserved when * IEEE80211_BEACON_TDMA is marked (e.g. when the * slot position changes) because ieee80211_add_tdma * skips over the data. */ memcpy(vap->iv_bcn_off.bo_tdma + __offsetof(struct ieee80211_tdma_param, tdma_tstamp), &ni->ni_tstamp.data, 8); #if 0 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", (unsigned long long) tsf, (unsigned long long) nextslot, (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); #endif /* * Adjust the beacon timers only when pulling them forward * or when going back by less than the beacon interval. * Negative jumps larger than the beacon interval seem to * cause the timers to stop and generally cause instability. * This basically filters out jumps due to missed beacons. */ if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "%s: calling ath_tdma_settimers; nextslottu=%d, bintval=%d\n", __func__, nextslottu, sc->sc_tdmabintval); ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); sc->sc_stats.ast_tdma_timers++; } if (tsfdelta > 0) { uint64_t tsf; /* XXX should just teach ath_hal_adjusttsf() to do this */ tsf = ath_hal_gettsf64(ah); ath_hal_settsf64(ah, tsf + tsfdelta); DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, "%s: calling ath_hal_adjusttsf: TSF=%llu, tsfdelta=%d\n", __func__, (unsigned long long) tsf, tsfdelta); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_TSF_ADJUST)) { struct if_ath_alq_tdma_tsf_adjust t; t.tsfdelta = htobe32(tsfdelta); t.tsf64_old = htobe64(tsf); t.tsf64_new = htobe64(tsf + tsfdelta); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TSF_ADJUST, sizeof(t), (char *) &t); } #endif /* ATH_DEBUG_ALQ */ sc->sc_stats.ast_tdma_tsf++; } ath_tdma_beacon_send(sc, vap); /* prepare response */ #undef TU_TO_TSF #undef TSF_TO_TU } /* * Transmit a beacon frame at SWBA. Dynamic updates * to the frame contents are done as needed. */ void ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; int otherant; /* * Check if the previous beacon has gone out. If * not don't try to post another, skip this period * and wait for the next. Missed beacons indicate * a problem and should not occur. If we miss too * many consecutive beacons reset the device. */ if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { sc->sc_bmisscount++; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: missed %u consecutive beacons\n", __func__, sc->sc_bmisscount); if (sc->sc_bmisscount >= ath_bstuck_threshold) taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); return; } if (sc->sc_bmisscount != 0) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: resume beacon xmit after %u misses\n", __func__, sc->sc_bmisscount); sc->sc_bmisscount = 0; } /* * Check recent per-antenna transmit statistics and flip * the default antenna if noticeably more frames went out * on the non-default antenna. * XXX assumes 2 anntenae */ if (!sc->sc_diversity) { otherant = sc->sc_defant & 1 ? 2 : 1; if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) ath_setdefantenna(sc, otherant); sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; } bf = ath_beacon_generate(sc, vap); /* XXX We don't do cabq traffic, but just for completeness .. */ ATH_TXQ_LOCK(sc->sc_cabq); ath_beacon_cabq_start(sc); ATH_TXQ_UNLOCK(sc->sc_cabq); if (bf != NULL) { /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ if ((! sc->sc_isedma) && (! ath_hal_stoptxdma(ah, sc->sc_bhalq))) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u did not stop?\n", __func__, sc->sc_bhalq); /* NB: the HAL still stops DMA, so proceed */ } ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); ath_hal_txstart(ah, sc->sc_bhalq); sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ /* * Record local TSF for our last send for use * in arbitrating slot collisions. */ /* XXX should take a locked ref to iv_bss */ vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); } } #endif /* IEEE80211_SUPPORT_TDMA */ diff --git a/sys/dev/bnxt/bnxt_mgmt.c b/sys/dev/bnxt/bnxt_mgmt.c index d54d7a2a2fe1..7185fa2f5c0a 100644 --- a/sys/dev/bnxt/bnxt_mgmt.c +++ b/sys/dev/bnxt/bnxt_mgmt.c @@ -1,373 +1,373 @@ /* * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2022 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include "bnxt_mgmt.h" #include "bnxt.h" #include "bnxt_hwrm.h" #include #include #include #include /* Function prototypes */ static d_open_t bnxt_mgmt_open; static d_close_t bnxt_mgmt_close; static d_ioctl_t bnxt_mgmt_ioctl; /* Character device entry points */ static struct cdevsw bnxt_mgmt_cdevsw = { .d_version = D_VERSION, .d_open = bnxt_mgmt_open, .d_close = bnxt_mgmt_close, .d_ioctl = bnxt_mgmt_ioctl, .d_name = "bnxt_mgmt", }; /* Global vars */ static struct cdev *bnxt_mgmt_dev; struct mtx mgmt_lock; MALLOC_DEFINE(M_BNXT, "bnxt_mgmt_buffer", "buffer for bnxt_mgmt module"); /* * This function is called by the kld[un]load(2) system calls to * determine what actions to take when a module is loaded or unloaded. */ static int bnxt_mgmt_loader(struct module *m, int what, void *arg) { int error = 0; switch (what) { case MOD_LOAD: error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &bnxt_mgmt_dev, &bnxt_mgmt_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bnxt_mgmt"); if (error != 0) { printf("%s: %s:%s:%d Failed to create the" "bnxt_mgmt device node\n", DRIVER_NAME, __FILE__, __FUNCTION__, __LINE__); return (error); } mtx_init(&mgmt_lock, "BNXT MGMT Lock", NULL, MTX_DEF); break; case MOD_UNLOAD: mtx_destroy(&mgmt_lock); destroy_dev(bnxt_mgmt_dev); break; default: error = EOPNOTSUPP; break; } return (error); } static int bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct bnxt_softc *softc = NULL; struct bnxt_mgmt_req mgmt_req = {}; struct bnxt_mgmt_fw_msg msg_temp, *msg, *msg2 = NULL; struct iflib_dma_info dma_data = {}; void *user_ptr, *req, *resp; int ret = 0; uint16_t num_ind = 0; memcpy(&user_ptr, data, sizeof(user_ptr)); if (copyin(user_ptr, &mgmt_req, sizeof(struct bnxt_mgmt_req))) { printf("%s: %s:%d Failed to copy data from user\n", DRIVER_NAME, __FUNCTION__, __LINE__); return -EFAULT; } softc = bnxt_find_dev(mgmt_req.hdr.domain, mgmt_req.hdr.bus, mgmt_req.hdr.devfn, NULL); if (!softc) { printf("%s: %s:%d unable to find softc reference\n", DRIVER_NAME, __FUNCTION__, __LINE__); return -ENODEV; } if (copyin((void*)mgmt_req.req.hreq, &msg_temp, sizeof(msg_temp))) { device_printf(softc->dev, "%s:%d Failed to copy data from user\n", __FUNCTION__, __LINE__); return -EFAULT; } if (msg_temp.len_req > BNXT_MGMT_MAX_HWRM_REQ_LENGTH || msg_temp.len_resp > BNXT_MGMT_MAX_HWRM_RESP_LENGTH) { device_printf(softc->dev, "%s:%d Invalid length\n", __FUNCTION__, __LINE__); return -EINVAL; } if (msg_temp.num_dma_indications > 1) { device_printf(softc->dev, "%s:%d Max num_dma_indications " "supported is 1 \n", __FUNCTION__, __LINE__); return -EINVAL; } req = malloc(msg_temp.len_req, M_BNXT, M_WAITOK | M_ZERO); if(!req) { device_printf(softc->dev, "%s:%d Memory allocation failed", __FUNCTION__, __LINE__); return -ENOMEM; } resp = malloc(msg_temp.len_resp, M_BNXT, M_WAITOK | M_ZERO); if(!resp) { device_printf(softc->dev, "%s:%d Memory allocation failed", __FUNCTION__, __LINE__); ret = -ENOMEM; goto end; } if (copyin((void *)msg_temp.usr_req, req, msg_temp.len_req)) { device_printf(softc->dev, "%s:%d Failed to copy data from user\n", __FUNCTION__, __LINE__); ret = -EFAULT; goto end; } msg = &msg_temp; num_ind = msg_temp.num_dma_indications; if (num_ind) { int size; void *dma_ptr; uint64_t *dmap; size = sizeof(struct bnxt_mgmt_fw_msg) + (num_ind * sizeof(struct dma_info)); msg2 = malloc(size, M_BNXT, M_WAITOK | M_ZERO); if(!msg2) { device_printf(softc->dev, "%s:%d Memory allocation failed", __FUNCTION__, __LINE__); ret = -ENOMEM; goto end; } if (copyin((void *)mgmt_req.req.hreq, msg2, size)) { device_printf(softc->dev, "%s:%d Failed to copy" "data from user\n", __FUNCTION__, __LINE__); ret = -EFAULT; goto end; } msg = msg2; ret = iflib_dma_alloc(softc->ctx, msg->dma[0].length, &dma_data, BUS_DMA_NOWAIT); if (ret) { device_printf(softc->dev, "%s:%d iflib_dma_alloc" "failed with ret = 0x%x\n", __FUNCTION__, __LINE__, ret); ret = -ENOMEM; goto end; } if (!(msg->dma[0].read_or_write)) { if (copyin((void *)msg->dma[0].data, dma_data.idi_vaddr, msg->dma[0].length)) { device_printf(softc->dev, "%s:%d Failed to copy" "data from user\n", __FUNCTION__, __LINE__); ret = -EFAULT; goto end; } } dma_ptr = (void *) ((uint64_t) req + msg->dma[0].offset); dmap = dma_ptr; *dmap = htole64(dma_data.idi_paddr); } ret = bnxt_hwrm_passthrough(softc, req, msg->len_req, resp, msg->len_resp, msg->timeout); if(ret) goto end; if (num_ind) { if ((msg->dma[0].read_or_write)) { if (copyout(dma_data.idi_vaddr, (void *)msg->dma[0].data, msg->dma[0].length)) { device_printf(softc->dev, "%s:%d Failed to copy data" "to user\n", __FUNCTION__, __LINE__); ret = -EFAULT; goto end; } } } if (copyout(resp, (void *) msg->usr_resp, msg->len_resp)) { device_printf(softc->dev, "%s:%d Failed to copy response to user\n", __FUNCTION__, __LINE__); ret = -EFAULT; goto end; } end: if (req) free(req, M_BNXT); if (resp) free(resp, M_BNXT); if (msg2) free(msg2, M_BNXT); if (dma_data.idi_paddr) iflib_dma_free(&dma_data); return ret; } static int bnxt_mgmt_get_dev_info(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct bnxt_softc *softc = NULL; struct bnxt_dev_info dev_info; void *user_ptr; uint32_t dev_sn_lo, dev_sn_hi; int dev_sn_offset = 0; char dsn[16]; uint16_t lnk; int capreg; memcpy(&user_ptr, data, sizeof(user_ptr)); if (copyin(user_ptr, &dev_info, sizeof(dev_info))) { printf("%s: %s:%d Failed to copy data from user\n", DRIVER_NAME, __FUNCTION__, __LINE__); return -EFAULT; } softc = bnxt_find_dev(0, 0, 0, dev_info.nic_info.dev_name); if (!softc) { printf("%s: %s:%d unable to find softc reference\n", DRIVER_NAME, __FUNCTION__, __LINE__); return -ENODEV; } strncpy(dev_info.nic_info.driver_version, bnxt_driver_version, 64); strncpy(dev_info.nic_info.driver_name, device_get_name(softc->dev), 64); dev_info.pci_info.domain_no = softc->domain; dev_info.pci_info.bus_no = softc->bus; dev_info.pci_info.device_no = softc->slot; dev_info.pci_info.function_no = softc->function; dev_info.pci_info.vendor_id = pci_get_vendor(softc->dev); dev_info.pci_info.device_id = pci_get_device(softc->dev); dev_info.pci_info.sub_system_vendor_id = pci_get_subvendor(softc->dev); dev_info.pci_info.sub_system_device_id = pci_get_subdevice(softc->dev); dev_info.pci_info.revision = pci_read_config(softc->dev, PCIR_REVID, 1); dev_info.pci_info.chip_rev_id = (dev_info.pci_info.device_id << 16); dev_info.pci_info.chip_rev_id |= dev_info.pci_info.revision; if (pci_find_extcap(softc->dev, PCIZ_SERNUM, &dev_sn_offset)) { device_printf(softc->dev, "%s:%d device serial number is not found" "or not supported\n", __FUNCTION__, __LINE__); } else { dev_sn_lo = pci_read_config(softc->dev, dev_sn_offset + 4, 4); dev_sn_hi = pci_read_config(softc->dev, dev_sn_offset + 8, 4); snprintf(dsn, sizeof(dsn), "%02x%02x%02x%02x%02x%02x%02x%02x", (dev_sn_lo & 0x000000FF), (dev_sn_lo >> 8) & 0x0000FF, (dev_sn_lo >> 16) & 0x00FF, (dev_sn_lo >> 24 ) & 0xFF, (dev_sn_hi & 0x000000FF), (dev_sn_hi >> 8) & 0x0000FF, (dev_sn_hi >> 16) & 0x00FF, (dev_sn_hi >> 24 ) & 0xFF); strncpy(dev_info.nic_info.device_serial_number, dsn, sizeof(dsn)); } if_t ifp = iflib_get_ifp(softc->ctx); - dev_info.nic_info.mtu = ifp->if_mtu; + dev_info.nic_info.mtu = if_getmtu(ifp); memcpy(dev_info.nic_info.mac, softc->func.mac_addr, ETHER_ADDR_LEN); if (pci_find_cap(softc->dev, PCIY_EXPRESS, &capreg)) { device_printf(softc->dev, "%s:%d pci link capability is not found" "or not supported\n", __FUNCTION__, __LINE__); } else { lnk = pci_read_config(softc->dev, capreg + PCIER_LINK_STA, 2); dev_info.nic_info.pci_link_speed = (lnk & PCIEM_LINK_STA_SPEED); dev_info.nic_info.pci_link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; } if (copyout(&dev_info, user_ptr, sizeof(dev_info))) { device_printf(softc->dev, "%s:%d Failed to copy data to user\n", __FUNCTION__, __LINE__); return -EFAULT; } return 0; } /* * IOCTL entry point. */ static int bnxt_mgmt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { int ret = 0; switch(cmd) { case BNXT_MGMT_OPCODE_GET_DEV_INFO: ret = bnxt_mgmt_get_dev_info(dev, cmd, data, flag, td); break; case BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM: mtx_lock(&mgmt_lock); ret = bnxt_mgmt_process_hwrm(dev, cmd, data, flag, td); mtx_unlock(&mgmt_lock); break; default: printf("%s: Unknown command 0x%lx\n", DRIVER_NAME, cmd); ret = -EINVAL; break; } return ret; } static int bnxt_mgmt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return (0); } static int bnxt_mgmt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return (0); } DEV_MODULE(bnxt_mgmt, bnxt_mgmt_loader, NULL); diff --git a/sys/dev/bnxt/if_bnxt.c b/sys/dev/bnxt/if_bnxt.c index d6cb4d613c96..6f45c0963d1d 100644 --- a/sys/dev/bnxt/if_bnxt.c +++ b/sys/dev/bnxt/if_bnxt.c @@ -1,3401 +1,3401 @@ /*- * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "ifdi_if.h" #include "bnxt.h" #include "bnxt_hwrm.h" #include "bnxt_ioctl.h" #include "bnxt_sysctl.h" #include "hsi_struct_def.h" #include "bnxt_mgmt.h" /* * PCI Device ID Table */ static pci_vendor_info_t bnxt_vendor_info_array[] = { PVID(BROADCOM_VENDOR_ID, BCM57301, "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57302, "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57304, "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57311, "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57312, "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57314, "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57402, "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR, "Broadcom BCM57402 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57404, "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR, "Broadcom BCM57404 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57406, "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR, "Broadcom BCM57406 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57407, "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR, "Broadcom BCM57407 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57407_SFP, "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57412, "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1, "Broadcom BCM57412 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2, "Broadcom BCM57412 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57414, "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1, "Broadcom BCM57414 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2, "Broadcom BCM57414 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416, "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1, "Broadcom BCM57416 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2, "Broadcom BCM57416 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416_SFP, "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57417, "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1, "Broadcom BCM57417 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2, "Broadcom BCM57417 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57417_SFP, "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57454, "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM58700, "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57508, "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57504, "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57502, "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1, "Broadcom NetXtreme-E Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2, "Broadcom NetXtreme-E Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3, "Broadcom NetXtreme-E Ethernet Virtual Function"), /* required last entry */ PVID_END }; /* * Function prototypes */ SLIST_HEAD(softc_list, bnxt_softc_list) pf_list; int bnxt_num_pfs = 0; static void *bnxt_register(device_t dev); /* Soft queue setup and teardown */ static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void bnxt_queues_free(if_ctx_t ctx); /* Device setup and teardown */ static int bnxt_attach_pre(if_ctx_t ctx); static int bnxt_attach_post(if_ctx_t ctx); static int bnxt_detach(if_ctx_t ctx); /* Device configuration */ static void bnxt_init(if_ctx_t ctx); static void bnxt_stop(if_ctx_t ctx); static void bnxt_multi_set(if_ctx_t ctx); static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu); static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); static int bnxt_media_change(if_ctx_t ctx); static int bnxt_promisc_set(if_ctx_t ctx, int flags); static uint64_t bnxt_get_counter(if_ctx_t, ift_counter); static void bnxt_update_admin_status(if_ctx_t ctx); static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid); /* Interrupt enable / disable */ static void bnxt_intr_enable(if_ctx_t ctx); static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static void bnxt_disable_intr(if_ctx_t ctx); static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix); /* vlan support */ static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag); static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag); /* ioctl */ static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); static int bnxt_shutdown(if_ctx_t ctx); static int bnxt_suspend(if_ctx_t ctx); static int bnxt_resume(if_ctx_t ctx); /* Internal support functions */ static int bnxt_probe_phy(struct bnxt_softc *softc); static void bnxt_add_media_types(struct bnxt_softc *softc); static int bnxt_pci_mapping(struct bnxt_softc *softc); static void bnxt_pci_mapping_free(struct bnxt_softc *softc); static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state); static int bnxt_handle_def_cp(void *arg); static int bnxt_handle_isr(void *arg); static void bnxt_clear_ids(struct bnxt_softc *softc); static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr); static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr); static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr); static void bnxt_def_cp_task(void *context); static void bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl); static uint8_t get_phy_type(struct bnxt_softc *softc); static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link); static void bnxt_get_wol_settings(struct bnxt_softc *softc); static int bnxt_wol_config(if_ctx_t ctx); /* * Device Interface Declaration */ static device_method_t bnxt_methods[] = { /* Device interface */ DEVMETHOD(device_register, bnxt_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t bnxt_driver = { "bnxt", bnxt_methods, sizeof(struct bnxt_softc), }; DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0); MODULE_DEPEND(bnxt, pci, 1, 1, 1); MODULE_DEPEND(bnxt, ether, 1, 1, 1); MODULE_DEPEND(bnxt, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array); static device_method_t bnxt_iflib_methods[] = { DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, bnxt_queues_free), DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre), DEVMETHOD(ifdi_attach_post, bnxt_attach_post), DEVMETHOD(ifdi_detach, bnxt_detach), DEVMETHOD(ifdi_init, bnxt_init), DEVMETHOD(ifdi_stop, bnxt_stop), DEVMETHOD(ifdi_multi_set, bnxt_multi_set), DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set), DEVMETHOD(ifdi_media_status, bnxt_media_status), DEVMETHOD(ifdi_media_change, bnxt_media_change), DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set), DEVMETHOD(ifdi_get_counter, bnxt_get_counter), DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status), DEVMETHOD(ifdi_timer, bnxt_if_timer), DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr), DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign), DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register), DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister), DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl), DEVMETHOD(ifdi_suspend, bnxt_suspend), DEVMETHOD(ifdi_shutdown, bnxt_shutdown), DEVMETHOD(ifdi_resume, bnxt_resume), DEVMETHOD_END }; static driver_t bnxt_iflib_driver = { "bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc) }; /* * iflib shared context */ #define BNXT_DRIVER_VERSION "2.20.0.1" char bnxt_driver_version[] = BNXT_DRIVER_VERSION; extern struct if_txrx bnxt_txrx; static struct if_shared_ctx bnxt_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_driver = &bnxt_iflib_driver, .isc_nfl = 2, // Number of Free Lists .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header), // Only use a single segment to avoid page size constraints .isc_rx_nsegments = 1, .isc_ntxqs = 3, .isc_nrxqs = 3, .isc_nrxd_min = {16, 16, 16}, .isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8, PAGE_SIZE / sizeof(struct rx_prod_pkt_bd), PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)}, .isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD}, .isc_ntxd_min = {16, 16, 16}, .isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2, PAGE_SIZE / sizeof(struct tx_bd_short), PAGE_SIZE / sizeof(struct cmpl_base) * 2}, .isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD}, .isc_admin_intrcnt = 1, .isc_vendor_info = bnxt_vendor_info_array, .isc_driver_version = bnxt_driver_version, }; /* * Device Methods */ static void * bnxt_register(device_t dev) { return (&bnxt_sctx_init); } static void bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets) { if (softc->nq_rings) return; softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets, M_DEVBUF, M_NOWAIT | M_ZERO); } static void bnxt_nq_free(struct bnxt_softc *softc) { if (softc->nq_rings) free(softc->nq_rings, M_DEVBUF); softc->nq_rings = NULL; } /* * Device Dependent Configuration Functions */ /* Soft queue setup and teardown */ static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct bnxt_softc *softc; int i; int rc; softc = iflib_get_softc(ctx); if (BNXT_CHIP_P5(softc)) { bnxt_nq_alloc(softc, ntxqsets); if (!softc->nq_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate NQ rings\n"); rc = ENOMEM; goto nq_alloc_fail; } } softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->tx_cp_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate TX completion rings\n"); rc = ENOMEM; goto cp_alloc_fail; } softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->tx_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate TX rings\n"); rc = ENOMEM; goto ring_alloc_fail; } for (i=0; i < ntxqsets; i++) { rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats), &softc->tx_stats[i], 0); if (rc) goto dma_alloc_fail; bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map, BUS_DMASYNC_PREREAD); } for (i = 0; i < ntxqsets; i++) { /* Set up the completion ring */ softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.softc = softc; softc->tx_cp_rings[i].ring.idx = i; softc->tx_cp_rings[i].ring.id = (softc->scctx->isc_nrxqsets * 2) + 1 + i; softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80; softc->tx_cp_rings[i].ring.ring_size = softc->scctx->isc_ntxd[0]; softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs]; softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs]; /* Set up the TX ring */ softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_rings[i].softc = softc; softc->tx_rings[i].idx = i; softc->tx_rings[i].id = (softc->scctx->isc_nrxqsets * 2) + 1 + i; softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80; softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1]; softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1]; softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1]; bnxt_create_tx_sysctls(softc, i); if (BNXT_CHIP_P5(softc)) { /* Set up the Notification ring (NQ) */ softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->nq_rings[i].ring.softc = softc; softc->nq_rings[i].ring.idx = i; softc->nq_rings[i].ring.id = i; softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80; softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2]; softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2]; softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2]; } } softc->ntxqsets = ntxqsets; return rc; dma_alloc_fail: for (i = i - 1; i >= 0; i--) iflib_dma_free(&softc->tx_stats[i]); free(softc->tx_rings, M_DEVBUF); ring_alloc_fail: free(softc->tx_cp_rings, M_DEVBUF); cp_alloc_fail: bnxt_nq_free(softc); nq_alloc_fail: return rc; } static void bnxt_queues_free(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); int i; // Free TX queues for (i=0; intxqsets; i++) iflib_dma_free(&softc->tx_stats[i]); free(softc->tx_rings, M_DEVBUF); softc->tx_rings = NULL; free(softc->tx_cp_rings, M_DEVBUF); softc->tx_cp_rings = NULL; softc->ntxqsets = 0; // Free RX queues for (i=0; inrxqsets; i++) iflib_dma_free(&softc->rx_stats[i]); iflib_dma_free(&softc->hw_tx_port_stats); iflib_dma_free(&softc->hw_rx_port_stats); free(softc->grp_info, M_DEVBUF); free(softc->ag_rings, M_DEVBUF); free(softc->rx_rings, M_DEVBUF); free(softc->rx_cp_rings, M_DEVBUF); bnxt_nq_free(softc); } static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct bnxt_softc *softc; int i; int rc; softc = iflib_get_softc(ctx); softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->rx_cp_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate RX completion rings\n"); rc = ENOMEM; goto cp_alloc_fail; } softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->rx_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate RX rings\n"); rc = ENOMEM; goto ring_alloc_fail; } softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->ag_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate aggregation rings\n"); rc = ENOMEM; goto ag_alloc_fail; } softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->grp_info) { device_printf(iflib_get_dev(ctx), "unable to allocate ring groups\n"); rc = ENOMEM; goto grp_alloc_fail; } for (i=0; i < nrxqsets; i++) { rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats), &softc->rx_stats[i], 0); if (rc) goto hw_stats_alloc_fail; bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map, BUS_DMASYNC_PREREAD); } /* * Additional 512 bytes for future expansion. * To prevent corruption when loaded with newer firmwares with added counters. * This can be deleted when there will be no further additions of counters. */ #define BNXT_PORT_STAT_PADDING 512 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING, &softc->hw_rx_port_stats, 0); if (rc) goto hw_port_rx_stats_alloc_fail; bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag, softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD); rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING, &softc->hw_tx_port_stats, 0); if (rc) goto hw_port_tx_stats_alloc_fail; bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag, softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD); softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr; softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr; for (i = 0; i < nrxqsets; i++) { /* Allocation the completion ring */ softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.softc = softc; softc->rx_cp_rings[i].ring.idx = i; softc->rx_cp_rings[i].ring.id = i + 1; softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80; /* * If this ring overflows, RX stops working. */ softc->rx_cp_rings[i].ring.ring_size = softc->scctx->isc_nrxd[0]; softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs]; softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs]; /* Allocate the RX ring */ softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_rings[i].softc = softc; softc->rx_rings[i].idx = i; softc->rx_rings[i].id = i + 1; softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80; softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1]; softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1]; softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1]; /* Allocate the TPA start buffer */ softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) * (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->rx_rings[i].tpa_start == NULL) { rc = -ENOMEM; device_printf(softc->dev, "Unable to allocate space for TPA\n"); goto tpa_alloc_fail; } /* Allocate the AG ring */ softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->ag_rings[i].softc = softc; softc->ag_rings[i].idx = i; softc->ag_rings[i].id = nrxqsets + i + 1; softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80; softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2]; softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2]; softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2]; /* Allocate the ring group */ softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE; softc->grp_info[i].stats_ctx = softc->rx_cp_rings[i].stats_ctx_id; softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id; softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id; softc->grp_info[i].cp_ring_id = softc->rx_cp_rings[i].ring.phys_id; bnxt_create_rx_sysctls(softc, i); } /* * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS * HWRM every sec with which firmware timeouts can happen */ if (BNXT_PF(softc)) bnxt_create_port_stats_sysctls(softc); /* And finally, the VNIC */ softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.filter_id = -1; softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN; softc->vnic_info.mc_list_count = 0; softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT; rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN, &softc->vnic_info.mc_list, 0); if (rc) goto mc_list_alloc_fail; /* The VNIC RSS Hash Key */ rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE, &softc->vnic_info.rss_hash_key_tbl, 0); if (rc) goto rss_hash_alloc_fail; bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag, softc->vnic_info.rss_hash_key_tbl.idi_map, BUS_DMASYNC_PREWRITE); memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr, softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE); /* Allocate the RSS tables */ rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t), &softc->vnic_info.rss_grp_tbl, 0); if (rc) goto rss_grp_alloc_fail; bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag, softc->vnic_info.rss_grp_tbl.idi_map, BUS_DMASYNC_PREWRITE); memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff, softc->vnic_info.rss_grp_tbl.idi_size); softc->nrxqsets = nrxqsets; return rc; rss_grp_alloc_fail: iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl); rss_hash_alloc_fail: iflib_dma_free(&softc->vnic_info.mc_list); tpa_alloc_fail: mc_list_alloc_fail: for (i = i - 1; i >= 0; i--) free(softc->rx_rings[i].tpa_start, M_DEVBUF); iflib_dma_free(&softc->hw_tx_port_stats); hw_port_tx_stats_alloc_fail: iflib_dma_free(&softc->hw_rx_port_stats); hw_port_rx_stats_alloc_fail: for (i = i - 1; i >= 0; i--) iflib_dma_free(&softc->rx_stats[i]); hw_stats_alloc_fail: free(softc->grp_info, M_DEVBUF); grp_alloc_fail: free(softc->ag_rings, M_DEVBUF); ag_alloc_fail: free(softc->rx_rings, M_DEVBUF); ring_alloc_fail: free(softc->rx_cp_rings, M_DEVBUF); cp_alloc_fail: return rc; } static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc) { if (softc->hwrm_short_cmd_req_addr.idi_vaddr) iflib_dma_free(&softc->hwrm_short_cmd_req_addr); softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL; } static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc) { int rc; rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len, &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT); return rc; } static void bnxt_free_ring(struct bnxt_softc *bp, struct bnxt_ring_mem_info *rmem) { int i; for (i = 0; i < rmem->nr_pages; i++) { if (!rmem->pg_arr[i].idi_vaddr) continue; iflib_dma_free(&rmem->pg_arr[i]); rmem->pg_arr[i].idi_vaddr = NULL; } if (rmem->pg_tbl.idi_vaddr) { iflib_dma_free(&rmem->pg_tbl); rmem->pg_tbl.idi_vaddr = NULL; } if (rmem->vmem_size && *rmem->vmem) { free(*rmem->vmem, M_DEVBUF); *rmem->vmem = NULL; } } static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem) { uint64_t valid_bit = 0; int i; int rc; if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) valid_bit = PTU_PTE_VALID; if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) { size_t pg_tbl_size = rmem->nr_pages * 8; if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) pg_tbl_size = rmem->page_size; rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0); if (rc) return -ENOMEM; } for (i = 0; i < rmem->nr_pages; i++) { uint64_t extra_bits = valid_bit; uint64_t *ptr; rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0); if (rc) return -ENOMEM; if (rmem->init_val) memset(rmem->pg_arr[i].idi_vaddr, rmem->init_val, rmem->page_size); if (rmem->nr_pages > 1 || rmem->depth > 0) { if (i == rmem->nr_pages - 2 && (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) extra_bits |= PTU_PTE_NEXT_TO_LAST; else if (i == rmem->nr_pages - 1 && (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) extra_bits |= PTU_PTE_LAST; ptr = (void *) rmem->pg_tbl.idi_vaddr; ptr[i] = htole64(rmem->pg_arr[i].idi_paddr | extra_bits); } } if (rmem->vmem_size) { *rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO); if (!(*rmem->vmem)) return -ENOMEM; } return 0; } #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \ (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc, struct bnxt_ctx_pg_info *ctx_pg) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; rmem->page_size = BNXT_PAGE_SIZE; rmem->pg_arr = ctx_pg->ctx_arr; rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; if (rmem->depth >= 1) rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; return bnxt_alloc_ring(softc, rmem); } static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc, struct bnxt_ctx_pg_info *ctx_pg, uint32_t mem_size, uint8_t depth, bool use_init_val) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; int rc; if (!mem_size) return 0; ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { ctx_pg->nr_pages = 0; return -EINVAL; } if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { int nr_tbls, i; rmem->depth = 2; ctx_pg->ctx_pg_tbl = malloc(MAX_CTX_PAGES * sizeof(ctx_pg), M_DEVBUF, M_NOWAIT | M_ZERO); if (!ctx_pg->ctx_pg_tbl) return -ENOMEM; nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); rmem->nr_pages = nr_tbls; rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg); if (rc) return rc; for (i = 0; i < nr_tbls; i++) { struct bnxt_ctx_pg_info *pg_tbl; pg_tbl = malloc(sizeof(*pg_tbl), M_DEVBUF, M_NOWAIT | M_ZERO); if (!pg_tbl) return -ENOMEM; ctx_pg->ctx_pg_tbl[i] = pg_tbl; rmem = &pg_tbl->ring_mem; memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info)); rmem->depth = 1; rmem->nr_pages = MAX_CTX_PAGES; if (use_init_val) rmem->init_val = softc->ctx_mem->ctx_kind_initializer; if (i == (nr_tbls - 1)) { int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; if (rem) rmem->nr_pages = rem; } rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl); if (rc) break; } } else { rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); if (rmem->nr_pages > 1 || depth) rmem->depth = 1; if (use_init_val) rmem->init_val = softc->ctx_mem->ctx_kind_initializer; rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg); } return rc; } static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc, struct bnxt_ctx_pg_info *ctx_pg) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || ctx_pg->ctx_pg_tbl) { int i, nr_tbls = rmem->nr_pages; for (i = 0; i < nr_tbls; i++) { struct bnxt_ctx_pg_info *pg_tbl; struct bnxt_ring_mem_info *rmem2; pg_tbl = ctx_pg->ctx_pg_tbl[i]; if (!pg_tbl) continue; rmem2 = &pg_tbl->ring_mem; bnxt_free_ring(softc, rmem2); ctx_pg->ctx_arr[i].idi_vaddr = NULL; free(pg_tbl , M_DEVBUF); ctx_pg->ctx_pg_tbl[i] = NULL; } free(ctx_pg->ctx_pg_tbl , M_DEVBUF); ctx_pg->ctx_pg_tbl = NULL; } bnxt_free_ring(softc, rmem); ctx_pg->nr_pages = 0; } static void bnxt_free_ctx_mem(struct bnxt_softc *softc) { struct bnxt_ctx_mem_info *ctx = softc->ctx_mem; int i; if (!ctx) return; if (ctx->tqm_mem[0]) { for (i = 0; i < softc->max_q + 1; i++) { if (!ctx->tqm_mem[i]) continue; bnxt_free_ctx_pg_tbls(softc, ctx->tqm_mem[i]); } free(ctx->tqm_mem[0] , M_DEVBUF); ctx->tqm_mem[0] = NULL; } bnxt_free_ctx_pg_tbls(softc, &ctx->tim_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->mrav_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->stat_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->vnic_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->cq_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->srq_mem); bnxt_free_ctx_pg_tbls(softc, &ctx->qp_mem); ctx->flags &= ~BNXT_CTX_FLAG_INITED; free(softc->ctx_mem, M_DEVBUF); softc->ctx_mem = NULL; } static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; uint32_t mem_size, ena, entries; int i, rc; if (!BNXT_CHIP_P5(softc)) return 0; rc = bnxt_hwrm_func_backing_store_qcaps(softc); if (rc) { device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n", rc); return rc; } ctx = softc->ctx_mem; if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) return 0; ctx_pg = &ctx->qp_mem; ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + (1024 * 64); /* FIXME: Enable 64K QPs */ mem_size = ctx->qp_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true); if (rc) return rc; ctx_pg = &ctx->srq_mem; /* FIXME: Temporarily enable 8K RoCE SRQs */ ctx_pg->entries = ctx->srq_max_l2_entries + (1024 * 8); mem_size = ctx->srq_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true); if (rc) return rc; ctx_pg = &ctx->cq_mem; /* FIXME: Temporarily enable 64K RoCE CQ */ ctx_pg->entries = ctx->cq_max_l2_entries + (1024 * 64 * 2); mem_size = ctx->cq_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true); if (rc) return rc; ctx_pg = &ctx->vnic_mem; ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx->vnic_max_ring_table_entries; mem_size = ctx->vnic_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true); if (rc) return rc; ctx_pg = &ctx->stat_mem; ctx_pg->entries = ctx->stat_max_entries; mem_size = ctx->stat_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true); if (rc) return rc; ctx_pg = &ctx->mrav_mem; /* FIXME: Temporarily enable 256K RoCE MRs */ ctx_pg->entries = 1024 * 256; mem_size = ctx->mrav_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true); if (rc) return rc; ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV; ctx_pg = &ctx->tim_mem; /* Firmware needs number of TIM entries equal to * number of Total QP contexts enabled, including * L2 QPs. */ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + 1024 * 64; /* FIXME: L2 driver is not able to create queue depth * worth of 1M 32bit timers. Need a fix when l2-roce * interface is well designed. */ mem_size = ctx->tim_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false); if (rc) return rc; ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM; /* FIXME: Temporarily increase the TQM queue depth * by 1K for 1K RoCE QPs. */ entries = ctx->qp_max_l2_entries + 1024 * 64; entries = roundup(entries, ctx->tqm_entries_multiple); entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, ctx->tqm_max_entries_per_ring); for (i = 0; i < softc->max_q + 1; i++) { ctx_pg = ctx->tqm_mem[i]; ctx_pg->entries = entries; mem_size = ctx->tqm_entry_size * entries; rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false); if (rc) return rc; ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; } ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; rc = bnxt_hwrm_func_backing_store_cfg(softc, ena); if (rc) device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n", rc); else ctx->flags |= BNXT_CTX_FLAG_INITED; return 0; } /* * If we update the index, a write barrier is needed after the write to ensure * the completion ring has space before the RX/TX ring does. Since we can't * make the RX and AG doorbells covered by the same barrier without remapping * MSI-X vectors, we create the barrier over the enture doorbell bar. * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells * for a single ring group. * * A barrier of just the size of the write is used to ensure the ordering * remains correct and no writes are lost. */ static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx) { struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr; struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar; bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4, BUS_SPACE_BARRIER_WRITE); bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell, htole32(RX_DOORBELL_KEY_RX | idx)); } static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx) { struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr; struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar; bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4, BUS_SPACE_BARRIER_WRITE); bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell, htole32(TX_DOORBELL_KEY_TX | idx)); } static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq) { struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr; struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar; bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4, BUS_SPACE_BARRIER_WRITE); bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell, htole32(CMPL_DOORBELL_KEY_CMPL | ((cpr->cons == UINT32_MAX) ? 0 : (cpr->cons | CMPL_DOORBELL_IDX_VALID)) | ((enable_irq) ? 0 : CMPL_DOORBELL_MASK))); bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size, BUS_SPACE_BARRIER_WRITE); } static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx) { struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr; struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar; bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8, BUS_SPACE_BARRIER_WRITE); bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell, htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) | ((uint64_t)ring->phys_id << DBR_XID_SFT))); } static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx) { struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr; struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar; bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8, BUS_SPACE_BARRIER_WRITE); bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell, htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) | ((uint64_t)ring->phys_id << DBR_XID_SFT))); } static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq) { struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr; struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar; dbc_dbc_t db_msg = { 0 }; uint32_t cons = cpr->cons; if (cons == UINT32_MAX) cons = 0; else cons = RING_NEXT(&cpr->ring, cons); db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK); db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 | ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ); bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8, BUS_SPACE_BARRIER_WRITE); bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell, htole64(*(uint64_t *)&db_msg)); bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size, BUS_SPACE_BARRIER_WRITE); } static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq) { struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr; struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar; dbc_dbc_t db_msg = { 0 }; uint32_t cons = cpr->cons; db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK); db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 | ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ); bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8, BUS_SPACE_BARRIER_WRITE); bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell, htole64(*(uint64_t *)&db_msg)); bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size, BUS_SPACE_BARRIER_WRITE); } static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq) { struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr; struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar; dbc_dbc_t db_msg = { 0 }; uint32_t cons = cpr->cons; db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK); db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 | ((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ); bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8, BUS_SPACE_BARRIER_WRITE); bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell, htole64(*(uint64_t *)&db_msg)); bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size, BUS_SPACE_BARRIER_WRITE); } struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name) { struct bnxt_softc_list *sc = NULL; SLIST_FOREACH(sc, &pf_list, next) { /* get the softc reference based on device name */ if (dev_name && !strncmp(dev_name, iflib_get_ifp(sc->softc->ctx)->if_xname, BNXT_MAX_STR)) { return sc->softc; } /* get the softc reference based on domain,bus,device,function */ if (!dev_name && (domain == sc->softc->domain) && (bus == sc->softc->bus) && (dev_fn == sc->softc->dev_fn)) { return sc->softc; } } return NULL; } /* Device setup and teardown */ static int bnxt_attach_pre(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_softc_ctx_t scctx; int rc = 0; softc->ctx = ctx; softc->dev = iflib_get_dev(ctx); softc->media = iflib_get_media(ctx); softc->scctx = iflib_get_softc_ctx(ctx); softc->sctx = iflib_get_sctx(ctx); scctx = softc->scctx; /* TODO: Better way of detecting NPAR/VF is needed */ switch (pci_get_device(softc->dev)) { case BCM57402_NPAR: case BCM57404_NPAR: case BCM57406_NPAR: case BCM57407_NPAR: case BCM57412_NPAR1: case BCM57412_NPAR2: case BCM57414_NPAR1: case BCM57414_NPAR2: case BCM57416_NPAR1: case BCM57416_NPAR2: softc->flags |= BNXT_FLAG_NPAR; break; case NETXTREME_C_VF1: case NETXTREME_C_VF2: case NETXTREME_C_VF3: case NETXTREME_E_VF1: case NETXTREME_E_VF2: case NETXTREME_E_VF3: softc->flags |= BNXT_FLAG_VF; break; } #define PCI_DEVFN(device, func) ((((device) & 0x1f) << 3) | ((func) & 0x07)) softc->domain = pci_get_domain(softc->dev); softc->bus = pci_get_bus(softc->dev); softc->slot = pci_get_slot(softc->dev); softc->function = pci_get_function(softc->dev); softc->dev_fn = PCI_DEVFN(softc->slot, softc->function); if (bnxt_num_pfs == 0) SLIST_INIT(&pf_list); bnxt_num_pfs++; softc->list.softc = softc; SLIST_INSERT_HEAD(&pf_list, &softc->list, next); pci_enable_busmaster(softc->dev); if (bnxt_pci_mapping(softc)) return (ENXIO); /* HWRM setup/init */ BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev)); rc = bnxt_alloc_hwrm_dma_mem(softc); if (rc) goto dma_fail; /* Get firmware version and compare with driver */ softc->ver_info = malloc(sizeof(struct bnxt_ver_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->ver_info == NULL) { rc = ENOMEM; device_printf(softc->dev, "Unable to allocate space for version info\n"); goto ver_alloc_fail; } /* Default minimum required HWRM version */ softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR; softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR; softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE; rc = bnxt_hwrm_ver_get(softc); if (rc) { device_printf(softc->dev, "attach: hwrm ver get failed\n"); goto ver_fail; } /* Now perform a function reset */ rc = bnxt_hwrm_func_reset(softc); if ((softc->flags & BNXT_FLAG_SHORT_CMD) || softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { rc = bnxt_alloc_hwrm_short_cmd_req(softc); if (rc) goto hwrm_short_cmd_alloc_fail; } if ((softc->ver_info->chip_num == BCM57508) || (softc->ver_info->chip_num == BCM57504) || (softc->ver_info->chip_num == BCM57502)) softc->flags |= BNXT_FLAG_CHIP_P5; softc->flags |= BNXT_FLAG_TPA; /* No TPA for Thor A0 */ if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) && (!softc->ver_info->chip_metal)) softc->flags &= ~BNXT_FLAG_TPA; /* TBD ++ Add TPA support from Thor B1 */ if (BNXT_CHIP_P5(softc)) softc->flags &= ~BNXT_FLAG_TPA; /* Get NVRAM info */ if (BNXT_PF(softc)) { softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->nvm_info == NULL) { rc = ENOMEM; device_printf(softc->dev, "Unable to allocate space for NVRAM info\n"); goto nvm_alloc_fail; } rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id, &softc->nvm_info->device_id, &softc->nvm_info->sector_size, &softc->nvm_info->size, &softc->nvm_info->reserved_size, &softc->nvm_info->available_size); } if (BNXT_CHIP_P5(softc)) { softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx; softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx; softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq; softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq; softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq; } else { softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx; softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx; softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq; softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq; } /* Register the driver with the FW */ rc = bnxt_hwrm_func_drv_rgtr(softc); if (rc) { device_printf(softc->dev, "attach: hwrm drv rgtr failed\n"); goto drv_rgtr_fail; } rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0); if (rc) { device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n"); goto drv_rgtr_fail; } /* Get the queue config */ rc = bnxt_hwrm_queue_qportcfg(softc); if (rc) { device_printf(softc->dev, "attach: hwrm qportcfg failed\n"); goto failed; } if (softc->hwrm_spec_code >= 0x10803) { rc = bnxt_alloc_ctx_mem(softc); if (rc) { device_printf(softc->dev, "attach: alloc_ctx_mem failed\n"); return rc; } rc = bnxt_hwrm_func_resc_qcaps(softc, true); if (!rc) softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM; } /* Get the HW capabilities */ rc = bnxt_hwrm_func_qcaps(softc); if (rc) goto failed; /* Get the current configuration of this function */ rc = bnxt_hwrm_func_qcfg(softc); if (rc) { device_printf(softc->dev, "attach: hwrm func qcfg failed\n"); goto failed; } iflib_set_mac(ctx, softc->func.mac_addr); scctx->isc_txrx = &bnxt_txrx; scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO); scctx->isc_capabilities = scctx->isc_capenable = /* These are translated to hwassit bits */ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 | /* These are checked by iflib */ IFCAP_LRO | IFCAP_VLAN_HWFILTER | /* These are part of the iflib mask */ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | /* These likely get lost... */ IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU; if (bnxt_wol_supported(softc)) scctx->isc_capabilities |= IFCAP_WOL_MAGIC; bnxt_get_wol_settings(softc); if (softc->wol) scctx->isc_capenable |= IFCAP_WOL_MAGIC; /* Get the queue config */ bnxt_get_wol_settings(softc); if (BNXT_CHIP_P5(softc)) bnxt_hwrm_reserve_pf_rings(softc); rc = bnxt_hwrm_func_qcfg(softc); if (rc) { device_printf(softc->dev, "attach: hwrm func qcfg failed\n"); goto failed; } bnxt_clear_ids(softc); if (rc) goto failed; /* Now set up iflib sc */ scctx->isc_tx_nsegments = 31, scctx->isc_tx_tso_segments_max = 31; scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE; scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE; scctx->isc_vectors = softc->func.max_cp_rings; scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE; scctx->isc_txrx = &bnxt_txrx; if (scctx->isc_nrxd[0] < ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2])) device_printf(softc->dev, "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n", scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]); if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2) device_printf(softc->dev, "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n", scctx->isc_ntxd[0], scctx->isc_ntxd[1]); scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0]; scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) * scctx->isc_ntxd[1]; scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2]; scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0]; scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) * scctx->isc_nrxd[1]; scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) * scctx->isc_nrxd[2]; scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1, softc->fn_qcfg.alloc_completion_rings - 1); scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max, softc->fn_qcfg.alloc_rx_rings); scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max, softc->fn_qcfg.alloc_vnics); scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings, softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1); scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE; scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1; /* iflib will map and release this bar */ scctx->isc_msix_bar = pci_msix_table_bar(softc->dev); /* * Default settings for HW LRO (TPA): * Disable HW LRO by default * Can be enabled after taking care of 'packet forwarding' */ if (softc->flags & BNXT_FLAG_TPA) { softc->hw_lro.enable = 0; softc->hw_lro.is_mode_gro = 0; softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */ softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX; softc->hw_lro.min_agg_len = 512; } /* Allocate the default completion ring */ softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.softc = softc; softc->def_cp_ring.ring.id = 0; softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ? DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80; softc->def_cp_ring.ring.ring_size = PAGE_SIZE / sizeof(struct cmpl_base); rc = iflib_dma_alloc(ctx, sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size, &softc->def_cp_ring_mem, 0); softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr; softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr; iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task, "dflt_cp"); rc = bnxt_init_sysctl_ctx(softc); if (rc) goto init_sysctl_failed; if (BNXT_PF(softc)) { rc = bnxt_create_nvram_sysctls(softc->nvm_info); if (rc) goto failed; } arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0); softc->vnic_info.rss_hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; rc = bnxt_create_config_sysctls_pre(softc); if (rc) goto failed; rc = bnxt_create_hw_lro_sysctls(softc); if (rc) goto failed; rc = bnxt_create_pause_fc_sysctls(softc); if (rc) goto failed; /* Initialize the vlan list */ SLIST_INIT(&softc->vnic_info.vlan_tags); softc->vnic_info.vlan_tag_list.idi_vaddr = NULL; softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF, M_WAITOK|M_ZERO); return (rc); failed: bnxt_free_sysctl_ctx(softc); init_sysctl_failed: bnxt_hwrm_func_drv_unrgtr(softc, false); drv_rgtr_fail: if (BNXT_PF(softc)) free(softc->nvm_info, M_DEVBUF); nvm_alloc_fail: bnxt_free_hwrm_short_cmd_req(softc); hwrm_short_cmd_alloc_fail: ver_fail: free(softc->ver_info, M_DEVBUF); ver_alloc_fail: bnxt_free_hwrm_dma_mem(softc); dma_fail: BNXT_HWRM_LOCK_DESTROY(softc); bnxt_pci_mapping_free(softc); pci_disable_busmaster(softc->dev); return (rc); } static int bnxt_attach_post(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int rc; bnxt_create_config_sysctls_post(softc); /* Update link state etc... */ rc = bnxt_probe_phy(softc); if (rc) goto failed; /* Needs to be done after probing the phy */ bnxt_create_ver_sysctls(softc); bnxt_add_media_types(softc); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); - softc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + + softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE); failed: return rc; } static int bnxt_detach(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *tag; struct bnxt_vlan_tag *tmp; int i; SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next); bnxt_num_pfs--; bnxt_wol_config(ctx); bnxt_do_disable_intr(&softc->def_cp_ring); bnxt_free_sysctl_ctx(softc); bnxt_hwrm_func_reset(softc); bnxt_free_ctx_mem(softc); bnxt_clear_ids(softc); iflib_irq_free(ctx, &softc->def_cp_ring.irq); iflib_config_gtask_deinit(&softc->def_cp_task); /* We need to free() these here... */ for (i = softc->nrxqsets-1; i>=0; i--) { if (BNXT_CHIP_P5(softc)) iflib_irq_free(ctx, &softc->nq_rings[i].irq); else iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq); } iflib_dma_free(&softc->vnic_info.mc_list); iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl); iflib_dma_free(&softc->vnic_info.rss_grp_tbl); if (softc->vnic_info.vlan_tag_list.idi_vaddr) iflib_dma_free(&softc->vnic_info.vlan_tag_list); SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp) free(tag, M_DEVBUF); iflib_dma_free(&softc->def_cp_ring_mem); for (i = 0; i < softc->nrxqsets; i++) free(softc->rx_rings[i].tpa_start, M_DEVBUF); free(softc->ver_info, M_DEVBUF); if (BNXT_PF(softc)) free(softc->nvm_info, M_DEVBUF); bnxt_hwrm_func_drv_unrgtr(softc, false); bnxt_free_hwrm_dma_mem(softc); bnxt_free_hwrm_short_cmd_req(softc); BNXT_HWRM_LOCK_DESTROY(softc); free(softc->state_bv, M_DEVBUF); pci_disable_busmaster(softc->dev); bnxt_pci_mapping_free(softc); return 0; } static void bnxt_hwrm_resource_free(struct bnxt_softc *softc) { int i, rc = 0; rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->def_cp_ring.ring, (uint16_t)HWRM_NA_SIGNATURE); if (rc) goto fail; for (i = 0; i < softc->ntxqsets; i++) { rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, &softc->tx_rings[i], softc->tx_cp_rings[i].ring.phys_id); if (rc) goto fail; rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->tx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE); if (rc) goto fail; rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]); if (rc) goto fail; } rc = bnxt_hwrm_free_filter(softc); if (rc) goto fail; rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id); if (rc) goto fail; for (i = 0; i < softc->nrxqsets; i++) { rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]); if (rc) goto fail; rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG, &softc->ag_rings[i], (uint16_t)HWRM_NA_SIGNATURE); if (rc) goto fail; rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i], softc->rx_cp_rings[i].ring.phys_id); if (rc) goto fail; rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->rx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE); if (rc) goto fail; if (BNXT_CHIP_P5(softc)) { rc = bnxt_hwrm_ring_free(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ, &softc->nq_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE); if (rc) goto fail; } rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]); if (rc) goto fail; } fail: return; } static void bnxt_func_reset(struct bnxt_softc *softc) { if (!BNXT_CHIP_P5(softc)) { bnxt_hwrm_func_reset(softc); return; } bnxt_hwrm_resource_free(softc); return; } static void bnxt_rss_grp_tbl_init(struct bnxt_softc *softc) { uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr; int i, j; for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { if (BNXT_CHIP_P5(softc)) { rgt[i++] = htole16(softc->rx_rings[j].phys_id); rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id); } else { rgt[i] = htole16(softc->grp_info[j].grp_id); } if (++j == softc->nrxqsets) j = 0; } } /* Device configuration */ static void bnxt_init(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifmediareq ifmr; int i; int rc; if (!BNXT_CHIP_P5(softc)) { rc = bnxt_hwrm_func_reset(softc); if (rc) return; } else if (softc->is_dev_init) { bnxt_stop(ctx); } softc->is_dev_init = true; bnxt_clear_ids(softc); // TBD -- Check if it is needed for Thor as well if (BNXT_CHIP_P5(softc)) goto skip_def_cp_ring; /* Allocate the default completion ring */ softc->def_cp_ring.cons = UINT32_MAX; softc->def_cp_ring.v_bit = 1; bnxt_mark_cpr_invalid(&softc->def_cp_ring); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->def_cp_ring.ring); if (rc) goto fail; skip_def_cp_ring: for (i = 0; i < softc->nrxqsets; i++) { /* Allocate the statistics context */ rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i], softc->rx_stats[i].idi_paddr); if (rc) goto fail; if (BNXT_CHIP_P5(softc)) { /* Allocate the NQ */ softc->nq_rings[i].cons = 0; softc->nq_rings[i].v_bit = 1; softc->nq_rings[i].last_idx = UINT32_MAX; bnxt_mark_cpr_invalid(&softc->nq_rings[i]); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ, &softc->nq_rings[i].ring); if (rc) goto fail; softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1); } /* Allocate the completion ring */ softc->rx_cp_rings[i].cons = UINT32_MAX; softc->rx_cp_rings[i].v_bit = 1; softc->rx_cp_rings[i].last_idx = UINT32_MAX; bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->rx_cp_rings[i].ring); if (rc) goto fail; if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1); /* Allocate the RX ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]); if (rc) goto fail; softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0); /* Allocate the AG ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG, &softc->ag_rings[i]); if (rc) goto fail; softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0); /* Allocate the ring group */ softc->grp_info[i].stats_ctx = softc->rx_cp_rings[i].stats_ctx_id; softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id; softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id; softc->grp_info[i].cp_ring_id = softc->rx_cp_rings[i].ring.phys_id; rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]); if (rc) goto fail; } /* And now set the default CP / NQ ring for the async */ rc = bnxt_cfg_async_cr(softc); if (rc) goto fail; /* Allocate the VNIC RSS context */ rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id); if (rc) goto fail; /* Allocate the vnic */ softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id; softc->vnic_info.mru = softc->scctx->isc_max_frame_size; rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_set_filter(softc); if (rc) goto fail; bnxt_rss_grp_tbl_init(softc); rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info, softc->vnic_info.rss_hash_type); if (rc) goto fail; rc = bnxt_hwrm_vnic_tpa_cfg(softc); if (rc) goto fail; for (i = 0; i < softc->ntxqsets; i++) { /* Allocate the statistics context */ rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i], softc->tx_stats[i].idi_paddr); if (rc) goto fail; /* Allocate the completion ring */ softc->tx_cp_rings[i].cons = UINT32_MAX; softc->tx_cp_rings[i].v_bit = 1; bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->tx_cp_rings[i].ring); if (rc) goto fail; if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1); /* Allocate the TX ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, &softc->tx_rings[i]); if (rc) goto fail; softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0); } bnxt_do_enable_intr(&softc->def_cp_ring); bnxt_media_status(softc->ctx, &ifmr); bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); return; fail: bnxt_func_reset(softc); bnxt_clear_ids(softc); return; } static void bnxt_stop(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); softc->is_dev_init = false; bnxt_do_disable_intr(&softc->def_cp_ring); bnxt_func_reset(softc); bnxt_clear_ids(softc); return; } static u_int bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint8_t *mta = arg; if (cnt == BNXT_MAX_MC_ADDRS) return (1); bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); return (1); } static void bnxt_multi_set(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); uint8_t *mta; int mcnt; mta = softc->vnic_info.mc_list.idi_vaddr; bzero(mta, softc->vnic_info.mc_list.idi_size); mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta); if (mcnt > BNXT_MAX_MC_ADDRS) { softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); } else { softc->vnic_info.rx_mask &= ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag, softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE); softc->vnic_info.mc_list_count = mcnt; softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info)) device_printf(softc->dev, "set_multi: rx_mask set failed\n"); } } static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct bnxt_softc *softc = iflib_get_softc(ctx); if (mtu > BNXT_MAX_MTU) return EINVAL; softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE); return 0; } static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_link_info *link_info = &softc->link_info; struct ifmedia_entry *next; uint64_t target_baudrate = bnxt_get_baudrate(link_info); int active_media = IFM_UNKNOWN; bnxt_update_link(softc, true); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (link_info->link_up) ifmr->ifm_status |= IFM_ACTIVE; else ifmr->ifm_status &= ~IFM_ACTIVE; if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; /* * Go through the list of supported media which got prepared * as part of bnxt_add_media_types() using api ifmedia_add(). */ LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) { if (ifmedia_baudrate(next->ifm_media) == target_baudrate) { active_media = next->ifm_media; break; } } ifmr->ifm_active |= active_media; if (link_info->flow_ctrl.rx) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (link_info->flow_ctrl.tx) ifmr->ifm_active |= IFM_ETH_TXPAUSE; bnxt_report_link(softc); return; } static int bnxt_media_change(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); struct ifmediareq ifmr; int rc; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return EINVAL; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB; break; case IFM_1000_KX: case IFM_1000_T: case IFM_1000_SGMII: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB; break; case IFM_2500_KX: case IFM_2500_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB; break; case IFM_10G_CR1: case IFM_10G_KR: case IFM_10G_LR: case IFM_10G_SR: case IFM_10G_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB; break; case IFM_20G_KR2: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB; break; case IFM_25G_CR: case IFM_25G_KR: case IFM_25G_SR: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB; break; case IFM_40G_CR4: case IFM_40G_KR4: case IFM_40G_LR4: case IFM_40G_SR4: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; case IFM_50G_CR2: case IFM_50G_KR2: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; case IFM_100G_CR4: case IFM_100G_KR4: case IFM_100G_LR4: case IFM_100G_SR4: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; break; default: device_printf(softc->dev, "Unsupported media type! Using auto\n"); /* Fall-through */ case IFM_AUTO: // Auto softc->link_info.autoneg |= BNXT_AUTONEG_SPEED; break; } rc = bnxt_hwrm_set_link_setting(softc, true, true, true); bnxt_media_status(softc->ctx, &ifmr); return rc; } static int bnxt_promisc_set(if_ctx_t ctx, int flags) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int rc; - if (ifp->if_flags & IFF_ALLMULTI || + if (if_getflags(ifp) & IFF_ALLMULTI || if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS) softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; else softc->vnic_info.rx_mask &= ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; - if (ifp->if_flags & IFF_PROMISC) + if (if_getflags(ifp) & IFF_PROMISC) softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN; else softc->vnic_info.rx_mask &= ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS); rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); return rc; } static uint64_t bnxt_get_counter(if_ctx_t ctx, ift_counter cnt) { if_t ifp = iflib_get_ifp(ctx); if (cnt < IFCOUNTERS) return if_get_counter_default(ifp, cnt); return 0; } static void bnxt_update_admin_status(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); /* * When SR-IOV is enabled, avoid each VF sending this HWRM * request every sec with which firmware timeouts can happen */ if (!BNXT_PF(softc)) return; bnxt_hwrm_port_qstats(softc); if (BNXT_CHIP_P5(softc)) { struct ifmediareq ifmr; if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) { bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE); bnxt_media_status(softc->ctx, &ifmr); } } return; } static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); uint64_t ticks_now = ticks; /* Schedule bnxt_update_admin_status() once per sec */ if (ticks_now - softc->admin_ticks >= hz) { softc->admin_ticks = ticks_now; iflib_admin_intr_deferred(ctx); } return; } static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr) { struct bnxt_softc *softc = cpr->ring.softc; if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE) return; if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_nq(cpr, 1); else softc->db_ops.bnxt_db_rx_cq(cpr, 1); } static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr) { struct bnxt_softc *softc = cpr->ring.softc; if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE) return; if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_nq(cpr, 0); else softc->db_ops.bnxt_db_rx_cq(cpr, 0); } /* Enable all interrupts */ static void bnxt_intr_enable(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); int i; bnxt_do_enable_intr(&softc->def_cp_ring); for (i = 0; i < softc->nrxqsets; i++) if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1); else softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1); return; } /* Enable interrupt for a single queue */ static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1); else softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1); return 0; } static void bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl) { device_printf(softc->dev, "cmd sequence number %d\n", cmd_cmpl->sequence_id); return; } static void bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl) { struct bnxt_softc *softc = cpr->ring.softc; uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK; switch (type) { case HWRM_CMPL_TYPE_HWRM_DONE: bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl); break; case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT: bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl); break; default: device_printf(softc->dev, "%s:%d Unhandled async message %x\n", __FUNCTION__, __LINE__, type); break; } } static void process_nq(struct bnxt_softc *softc, uint16_t nqid) { struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid]; nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr; bool v_bit = cpr->v_bit; uint32_t cons = cpr->cons; uint16_t nq_type, nqe_cnt = 0; while (1) { if (!NQ_VALID(&cmp[cons], v_bit)) goto done; nq_type = NQ_CN_TYPE_MASK & cmp[cons].type; if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION) bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]); NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); nqe_cnt++; } done: if (nqe_cnt) { cpr->cons = cons; cpr->v_bit = v_bit; } } static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); if (BNXT_CHIP_P5(softc)) { process_nq(softc, qid); softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1); } softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1); return 0; } /* Disable all interrupts */ static void bnxt_disable_intr(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); int i; /* * NOTE: These TX interrupts should never get enabled, so don't * update the index */ for (i = 0; i < softc->nrxqsets; i++) if (BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0); else softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0); return; } static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_cp_ring *ring; struct if_irq *irq; uint16_t id; int rc; int i; char irq_name[16]; if (BNXT_CHIP_P5(softc)) goto skip_default_cp; rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq, softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN, bnxt_handle_def_cp, softc, 0, "def_cp"); if (rc) { device_printf(iflib_get_dev(ctx), "Failed to register default completion ring handler\n"); return rc; } skip_default_cp: for (i=0; iscctx->isc_nrxqsets; i++) { if (BNXT_CHIP_P5(softc)) { irq = &softc->nq_rings[i].irq; id = softc->nq_rings[i].ring.id; ring = &softc->nq_rings[i]; } else { irq = &softc->rx_cp_rings[i].irq; id = softc->rx_cp_rings[i].ring.id ; ring = &softc->rx_cp_rings[i]; } snprintf(irq_name, sizeof(irq_name), "rxq%d", i); rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX, bnxt_handle_isr, ring, i, irq_name); if (rc) { device_printf(iflib_get_dev(ctx), "Failed to register RX completion ring handler\n"); i--; goto fail; } } for (i=0; iscctx->isc_ntxqsets; i++) iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp"); return rc; fail: for (; i>=0; i--) iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq); iflib_irq_free(ctx, &softc->def_cp_ring.irq); return rc; } /* * We're explicitly allowing duplicates here. They will need to be * removed as many times as they are added. */ static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *new_tag; new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT); if (new_tag == NULL) return; new_tag->tag = vtag; new_tag->filter_id = -1; SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next); }; static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *vlan_tag; SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) { if (vlan_tag->tag == vtag) { SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag, bnxt_vlan_tag, next); free(vlan_tag, M_DEVBUF); break; } } } static int bnxt_wol_config(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); if (!softc) return -EBUSY; if (!bnxt_wol_supported(softc)) return -ENOTSUP; if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) { if (!softc->wol) { if (bnxt_hwrm_alloc_wol_fltr(softc)) return -EBUSY; softc->wol = 1; } } else { if (softc->wol) { if (bnxt_hwrm_free_wol_fltr(softc)) return -EBUSY; softc->wol = 0; } } return 0; } static int bnxt_shutdown(if_ctx_t ctx) { bnxt_wol_config(ctx); return 0; } static int bnxt_suspend(if_ctx_t ctx) { bnxt_wol_config(ctx); return 0; } static int bnxt_resume(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); bnxt_get_wol_settings(softc); return 0; } static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifreq *ifr = (struct ifreq *)data; struct bnxt_ioctl_header *ioh; size_t iol; int rc = ENOTSUP; struct bnxt_ioctl_data iod_storage, *iod = &iod_storage; switch (command) { case SIOCGPRIVATE_0: if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0) goto exit; ioh = ifr_buffer_get_buffer(ifr); iol = ifr_buffer_get_length(ifr); if (iol > sizeof(iod_storage)) return (EINVAL); if ((rc = copyin(ioh, iod, iol)) != 0) goto exit; switch (iod->hdr.type) { case BNXT_HWRM_NVM_FIND_DIR_ENTRY: { struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find = &iod->find; rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type, &find->ordinal, find->ext, &find->index, find->use_index, find->search_opt, &find->data_length, &find->item_length, &find->fw_ver); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_READ: { struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read; struct iflib_dma_info dma_data; size_t offset; size_t remain; size_t csize; /* * Some HWRM versions can't read more than 0x8000 bytes */ rc = iflib_dma_alloc(softc->ctx, min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT); if (rc) break; for (remain = rd->length, offset = 0; remain && offset < rd->length; offset += 0x8000) { csize = min(remain, 0x8000); rc = bnxt_hwrm_nvm_read(softc, rd->index, rd->offset + offset, csize, &dma_data); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); break; } else { copyout(dma_data.idi_vaddr, rd->data + offset, csize); iod->hdr.rc = 0; } remain -= csize; } if (iod->hdr.rc == 0) copyout(iod, ioh, iol); iflib_dma_free(&dma_data); rc = 0; goto exit; } case BNXT_HWRM_FW_RESET: { struct bnxt_ioctl_hwrm_fw_reset *rst = &iod->reset; rc = bnxt_hwrm_fw_reset(softc, rst->processor, &rst->selfreset); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_FW_QSTATUS: { struct bnxt_ioctl_hwrm_fw_qstatus *qstat = &iod->status; rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor, &qstat->selfreset); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_WRITE: { struct bnxt_ioctl_hwrm_nvm_write *wr = &iod->write; rc = bnxt_hwrm_nvm_write(softc, wr->data, true, wr->type, wr->ordinal, wr->ext, wr->attr, wr->option, wr->data_length, wr->keep, &wr->item_length, &wr->index); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_ERASE_DIR_ENTRY: { struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase = &iod->erase; rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_GET_DIR_INFO: { struct bnxt_ioctl_hwrm_nvm_get_dir_info *info = &iod->dir_info; rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries, &info->entry_length); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_GET_DIR_ENTRIES: { struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get = &iod->dir_entries; struct iflib_dma_info dma_data; rc = iflib_dma_alloc(softc->ctx, get->max_size, &dma_data, BUS_DMA_NOWAIT); if (rc) break; rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries, &get->entry_length, &dma_data); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { copyout(dma_data.idi_vaddr, get->data, get->entry_length * get->entries); iod->hdr.rc = 0; copyout(iod, ioh, iol); } iflib_dma_free(&dma_data); rc = 0; goto exit; } case BNXT_HWRM_NVM_VERIFY_UPDATE: { struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy = &iod->verify; rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type, vrfy->ordinal, vrfy->ext); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_INSTALL_UPDATE: { struct bnxt_ioctl_hwrm_nvm_install_update *inst = &iod->install; rc = bnxt_hwrm_nvm_install_update(softc, inst->install_type, &inst->installed_items, &inst->result, &inst->problem_item, &inst->reset_required); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_NVM_MODIFY: { struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify; rc = bnxt_hwrm_nvm_modify(softc, mod->index, mod->offset, mod->data, true, mod->length); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_FW_GET_TIME: { struct bnxt_ioctl_hwrm_fw_get_time *gtm = &iod->get_time; rc = bnxt_hwrm_fw_get_time(softc, >m->year, >m->month, >m->day, >m->hour, >m->minute, >m->second, >m->millisecond, >m->zone); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } case BNXT_HWRM_FW_SET_TIME: { struct bnxt_ioctl_hwrm_fw_set_time *stm = &iod->set_time; rc = bnxt_hwrm_fw_set_time(softc, stm->year, stm->month, stm->day, stm->hour, stm->minute, stm->second, stm->millisecond, stm->zone); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, iol); } rc = 0; goto exit; } } break; } exit: return rc; } /* * Support functions */ static int bnxt_probe_phy(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; int rc = 0; rc = bnxt_update_link(softc, false); if (rc) { device_printf(softc->dev, "Probe phy can't update link (rc: %x)\n", rc); return (rc); } /*initialize the ethool setting copy with NVM settings */ if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) link_info->autoneg |= BNXT_AUTONEG_SPEED; link_info->req_duplex = link_info->duplex_setting; if (link_info->autoneg & BNXT_AUTONEG_SPEED) link_info->req_link_speed = link_info->auto_link_speed; else link_info->req_link_speed = link_info->force_link_speed; return (rc); } static void bnxt_add_media_types(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; uint16_t supported; uint8_t phy_type = get_phy_type(softc); supported = link_info->support_speeds; /* Auto is always supported */ ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); if (softc->flags & BNXT_FLAG_NPAR) return; switch (phy_type) { case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE: BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX: BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE: BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX: BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY: BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN: /* Only Autoneg is supported for TYPE_UNKNOWN */ device_printf(softc->dev, "Unknown phy type\n"); break; default: /* Only Autoneg is supported for new phy type values */ device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type); break; } return; } static int bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable) { uint32_t flag; if (bar->res != NULL) { device_printf(softc->dev, "Bar %d already mapped\n", bar_num); return EDOOFUS; } bar->rid = PCIR_BAR(bar_num); flag = RF_ACTIVE; if (shareable) flag |= RF_SHAREABLE; if ((bar->res = bus_alloc_resource_any(softc->dev, SYS_RES_MEMORY, &bar->rid, flag)) == NULL) { device_printf(softc->dev, "PCI BAR%d mapping failure\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return 0; } static int bnxt_pci_mapping(struct bnxt_softc *softc) { int rc; rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true); if (rc) return rc; rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false); return rc; } static void bnxt_pci_mapping_free(struct bnxt_softc *softc) { if (softc->hwrm_bar.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->hwrm_bar.rid, softc->hwrm_bar.res); softc->hwrm_bar.res = NULL; if (softc->doorbell_bar.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->doorbell_bar.rid, softc->doorbell_bar.res); softc->doorbell_bar.res = NULL; } static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state) { struct bnxt_link_info *link_info = &softc->link_info; uint8_t link_up = link_info->link_up; int rc = 0; rc = bnxt_hwrm_port_phy_qcfg(softc); if (rc) goto exit; /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) link_info->link_up = 1; else link_info->link_up = 0; if (link_up != link_info->link_up) bnxt_report_link(softc); } else { /* always link down if not require to update link state */ link_info->link_up = 0; } exit: return rc; } void bnxt_report_link(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; const char *duplex = NULL, *flow_ctrl = NULL; if (link_info->link_up == link_info->last_link_up) { if (!link_info->link_up) return; if ((link_info->duplex == link_info->last_duplex) && (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info)))) return; } if (link_info->link_up) { if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL) duplex = "full duplex"; else duplex = "half duplex"; if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx) flow_ctrl = "FC - receive & transmit"; else if (link_info->flow_ctrl.tx) flow_ctrl = "FC - transmit"; else if (link_info->flow_ctrl.rx) flow_ctrl = "FC - receive"; else flow_ctrl = "FC - none"; iflib_link_state_change(softc->ctx, LINK_STATE_UP, IF_Gbps(100)); device_printf(softc->dev, "Link is UP %s, %s - %d Mbps \n", duplex, flow_ctrl, (link_info->link_speed * 100)); } else { iflib_link_state_change(softc->ctx, LINK_STATE_DOWN, bnxt_get_baudrate(&softc->link_info)); device_printf(softc->dev, "Link is Down\n"); } link_info->last_link_up = link_info->link_up; link_info->last_duplex = link_info->duplex; link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx; link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx; link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg; /* update media types */ ifmedia_removeall(softc->media); bnxt_add_media_types(softc); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); } static int bnxt_handle_isr(void *arg) { struct bnxt_cp_ring *cpr = arg; struct bnxt_softc *softc = cpr->ring.softc; cpr->int_count++; /* Disable further interrupts for this queue */ if (!BNXT_CHIP_P5(softc)) softc->db_ops.bnxt_db_rx_cq(cpr, 0); return FILTER_SCHEDULE_THREAD; } static int bnxt_handle_def_cp(void *arg) { struct bnxt_softc *softc = arg; softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0); GROUPTASK_ENQUEUE(&softc->def_cp_task); return FILTER_HANDLED; } static void bnxt_clear_ids(struct bnxt_softc *softc) { int i; softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE; softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; for (i = 0; i < softc->ntxqsets; i++) { softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; if (!softc->nq_rings) continue; softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; } for (i = 0; i < softc->nrxqsets; i++) { softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE; } softc->vnic_info.filter_id = -1; softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE; memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff, softc->vnic_info.rss_grp_tbl.idi_size); } static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr) { struct cmpl_base *cmp = (void *)cpr->ring.vaddr; int i; for (i = 0; i < cpr->ring.ring_size; i++) cmp[i].info3_v = !cpr->v_bit; } static void bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl) { struct hwrm_async_event_cmpl *ae = (void *)cmpl; uint16_t async_id = le16toh(ae->event_id); struct ifmediareq ifmr; switch (async_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: if (BNXT_CHIP_P5(softc)) bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE); else bnxt_media_status(softc->ctx, &ifmr); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR: device_printf(softc->dev, "Unhandled async completion type %u\n", async_id); break; default: device_printf(softc->dev, "Unknown async completion type %u\n", async_id); break; } } static void bnxt_def_cp_task(void *context) { if_ctx_t ctx = context; struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_cp_ring *cpr = &softc->def_cp_ring; /* Handle completions on the default completion ring */ struct cmpl_base *cmpl; uint32_t cons = cpr->cons; bool v_bit = cpr->v_bit; bool last_v_bit; uint32_t last_cons; uint16_t type; for (;;) { last_cons = cons; last_v_bit = v_bit; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons]; if (!CMP_VALID(cmpl, v_bit)) break; type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK; switch (type) { case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: bnxt_handle_async_event(softc, cmpl); break; case CMPL_BASE_TYPE_TX_L2: case CMPL_BASE_TYPE_RX_L2: case CMPL_BASE_TYPE_RX_AGG: case CMPL_BASE_TYPE_RX_TPA_START: case CMPL_BASE_TYPE_RX_TPA_END: case CMPL_BASE_TYPE_STAT_EJECT: case CMPL_BASE_TYPE_HWRM_DONE: case CMPL_BASE_TYPE_HWRM_FWD_REQ: case CMPL_BASE_TYPE_HWRM_FWD_RESP: case CMPL_BASE_TYPE_CQ_NOTIFICATION: case CMPL_BASE_TYPE_SRQ_EVENT: case CMPL_BASE_TYPE_DBQ_EVENT: case CMPL_BASE_TYPE_QP_EVENT: case CMPL_BASE_TYPE_FUNC_EVENT: device_printf(softc->dev, "Unhandled completion type %u\n", type); break; default: device_printf(softc->dev, "Unknown completion type %u\n", type); break; } } cpr->cons = last_cons; cpr->v_bit = last_v_bit; softc->db_ops.bnxt_db_rx_cq(cpr, 1); } static uint8_t get_phy_type(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; uint8_t phy_type = link_info->phy_type; uint16_t supported; if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN) return phy_type; /* Deduce the phy type from the media type and supported speeds */ supported = link_info->support_speeds; if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET; if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) { if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX; if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR; return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR; } if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR; return phy_type; } bool bnxt_check_hwrm_version(struct bnxt_softc *softc) { char buf[16]; sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major, softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update); if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } else if(softc->ver_info->hwrm_min_major == softc->ver_info->hwrm_if_major) { if (softc->ver_info->hwrm_min_minor > softc->ver_info->hwrm_if_minor) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } else if (softc->ver_info->hwrm_min_minor == softc->ver_info->hwrm_if_minor) { if (softc->ver_info->hwrm_min_update > softc->ver_info->hwrm_if_update) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } } } return true; } static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link) { switch (link->link_speed) { case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB: return IF_Mbps(100); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB: return IF_Gbps(1); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: return IF_Gbps(2); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB: return IF_Mbps(2500); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB: return IF_Gbps(10); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB: return IF_Gbps(20); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB: return IF_Gbps(25); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB: return IF_Gbps(40); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: return IF_Gbps(50); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: return IF_Gbps(100); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB: return IF_Mbps(10); } return IF_Gbps(100); } static void bnxt_get_wol_settings(struct bnxt_softc *softc) { uint16_t wol_handle = 0; if (!bnxt_wol_supported(softc)) return; do { wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle); } while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS); } diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c index 83b2aa588006..aa417ae6b95b 100644 --- a/sys/dev/bxe/bxe.c +++ b/sys/dev/bxe/bxe.c @@ -1,19474 +1,19474 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define BXE_DRIVER_VERSION "1.78.91" #include "bxe.h" #include "ecore_sp.h" #include "ecore_init.h" #include "ecore_init_ops.h" #include "57710_int_offsets.h" #include "57711_int_offsets.h" #include "57712_int_offsets.h" /* * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these * explicitly here for older kernels that don't include this changeset. */ #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #define sysctl_handle_64 sysctl_handle_quad #endif /* * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these * here as zero(0) for older kernels that don't include this changeset * thereby masking the functionality. */ #ifndef CSUM_TCP_IPV6 #define CSUM_TCP_IPV6 0 #define CSUM_UDP_IPV6 0 #endif #define BXE_DEF_SB_ATT_IDX 0x0001 #define BXE_DEF_SB_IDX 0x0002 /* * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per * function HW initialization. */ #define FLR_WAIT_USEC 10000 /* 10 msecs */ #define FLR_WAIT_INTERVAL 50 /* usecs */ #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; uint32_t init_crd; uint32_t crd; uint32_t crd_freed; }; struct pbf_pN_cmd_regs { int pN; uint32_t lines_occup; uint32_t lines_freed; }; /* * PCI Device ID Table used by bxe_probe(). */ #define BXE_DEVDESC_MAX 64 static struct bxe_device_type bxe_devs[] = { { BRCM_VENDORID, CHIP_NUM_57710, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57710 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711E, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711E 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { QLOGIC_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_2_20, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 2x20GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, { 0, 0, 0, 0, NULL } }; MALLOC_DECLARE(M_BXE_ILT); MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); /* * FreeBSD device entry points. */ static int bxe_probe(device_t); static int bxe_attach(device_t); static int bxe_detach(device_t); static int bxe_shutdown(device_t); /* * FreeBSD KLD module/device interface event handler method. */ static device_method_t bxe_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bxe_probe), DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), KOBJMETHOD_END }; /* * FreeBSD KLD Module data declaration */ static driver_t bxe_driver = { "bxe", /* module name */ bxe_methods, /* event handler */ sizeof(struct bxe_softc) /* extra data */ }; MODULE_DEPEND(bxe, pci, 1, 1, 1); MODULE_DEPEND(bxe, ether, 1, 1, 1); DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0); DEBUGNET_DEFINE(bxe); /* resources needed for unloading a previously loaded device */ #define BXE_PREV_WAIT_NEEDED 1 struct mtx bxe_prev_mtx; MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); struct bxe_prev_list_node { LIST_ENTRY(bxe_prev_list_node) node; uint8_t bus; uint8_t slot; uint8_t path; uint8_t aer; /* XXX automatic error recovery */ uint8_t undi; }; static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* Tunable device values... */ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "bxe driver parameters"); /* Debug */ unsigned long bxe_debug = 0; SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, &bxe_debug, 0, "Debug logging mode"); /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ static int bxe_interrupt_mode = INTR_MODE_MSIX; SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ static int bxe_queue_count = 4; SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count, 0, "Multi-Queue queue count"); /* max number of buffers per queue (default RX_BD_USABLE) */ static int bxe_max_rx_bufs = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); /* Host interrupt coalescing RX tick timer (usecs) */ static int bxe_hc_rx_ticks = 25; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); /* Host interrupt coalescing TX tick timer (usecs) */ static int bxe_hc_tx_ticks = 50; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); /* Maximum number of Rx packets to process at a time */ static int bxe_rx_budget = 0xffffffff; SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, &bxe_rx_budget, 0, "Rx processing budget"); /* Maximum LRO aggregation size */ static int bxe_max_aggregation_size = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, &bxe_max_aggregation_size, 0, "max aggregation size"); /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ static int bxe_mrrs = -1; SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs, 0, "PCIe maximum read request size"); /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ static int bxe_autogreeen = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, &bxe_autogreeen, 0, "AutoGrEEEn support"); /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ static int bxe_udp_rss = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, &bxe_udp_rss, 0, "UDP RSS support"); #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ #define STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_stats, stat_name) / 4) #define Q_STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_q_stats, stat_name) / 4) static const struct { uint32_t offset; uint32_t size; uint32_t flags; #define STATS_FLAGS_PORT 1 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) char string[STAT_NAME_LEN]; } bxe_eth_stats_arr[] = { { STATS_OFFSET32(total_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8, STATS_FLAGS_PORT, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8, STATS_FLAGS_PORT, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 8, STATS_FLAGS_PORT, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, STATS_FLAGS_PORT, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), 8, STATS_FLAGS_PORT, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), 8, STATS_FLAGS_PORT, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, STATS_FLAGS_PORT, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8, STATS_FLAGS_PORT, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8, STATS_FLAGS_PORT, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 8, STATS_FLAGS_PORT, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 8, STATS_FLAGS_PORT, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), 8, STATS_FLAGS_PORT, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), 4, STATS_FLAGS_FUNC, "rx_calls"}, { STATS_OFFSET32(rx_pkts), 4, STATS_FLAGS_FUNC, "rx_pkts"}, { STATS_OFFSET32(rx_tpa_pkts), 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_bxe_service_rxsgl), 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, { STATS_OFFSET32(rx_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_soft_errors), 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, { STATS_OFFSET32(rx_hw_csum_errors), 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, { STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, { STATS_OFFSET32(rx_budget_reached), 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, { STATS_OFFSET32(tx_pkts), 4, STATS_FLAGS_FUNC, "tx_pkts"}, { STATS_OFFSET32(tx_soft_errors), 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, { STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, { STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, { STATS_OFFSET32(tx_ofld_frames_lso), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, { STATS_OFFSET32(tx_encap_failures), 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, { STATS_OFFSET32(tx_hw_queue_full), 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, { STATS_OFFSET32(tx_hw_max_queue_depth), 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, { STATS_OFFSET32(tx_dma_mapping_failure), 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, { STATS_OFFSET32(tx_max_drbr_queue_depth), 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, { STATS_OFFSET32(tx_window_violation_std), 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, { STATS_OFFSET32(tx_queue_xoff), 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, { STATS_OFFSET32(mbuf_defrag_attempts), 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, { STATS_OFFSET32(mbuf_defrag_failures), 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, { STATS_OFFSET32(mbuf_alloc_tx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, { STATS_OFFSET32(mbuf_alloc_rx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, { STATS_OFFSET32(mbuf_alloc_sge), 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, { STATS_OFFSET32(tx_queue_full_return), 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, { STATS_OFFSET32(tx_request_link_down_failures), 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, { STATS_OFFSET32(bd_avail_too_less_failures), 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, { STATS_OFFSET32(tx_mq_not_empty), 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, { STATS_OFFSET32(nsegs_path1_errors), 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, { STATS_OFFSET32(nsegs_path2_errors), 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} }; static const struct { uint32_t offset; uint32_t size; char string[STAT_NAME_LEN]; } bxe_eth_q_stats_arr[] = { { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 8, "rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 8, "rx_bcast_packets" }, { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "rx_discards" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "tx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, "tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 8, "tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "tpa_aggregated_frames"}, { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "tpa_bytes"}, { Q_STATS_OFFSET32(rx_calls), 4, "rx_calls"}, { Q_STATS_OFFSET32(rx_pkts), 4, "rx_pkts"}, { Q_STATS_OFFSET32(rx_tpa_pkts), 4, "rx_tpa_pkts"}, { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, "rx_erroneous_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 4, "rx_bxe_service_rxsgl"}, { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 4, "rx_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_soft_errors), 4, "rx_soft_errors"}, { Q_STATS_OFFSET32(rx_hw_csum_errors), 4, "rx_hw_csum_errors"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, "rx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, "rx_ofld_frames_csum_tcp_udp"}, { Q_STATS_OFFSET32(rx_budget_reached), 4, "rx_budget_reached"}, { Q_STATS_OFFSET32(tx_pkts), 4, "tx_pkts"}, { Q_STATS_OFFSET32(tx_soft_errors), 4, "tx_soft_errors"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, "tx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, "tx_ofld_frames_csum_tcp"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, "tx_ofld_frames_csum_udp"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso), 4, "tx_ofld_frames_lso"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, "tx_ofld_frames_lso_hdr_splits"}, { Q_STATS_OFFSET32(tx_encap_failures), 4, "tx_encap_failures"}, { Q_STATS_OFFSET32(tx_hw_queue_full), 4, "tx_hw_queue_full"}, { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 4, "tx_hw_max_queue_depth"}, { Q_STATS_OFFSET32(tx_dma_mapping_failure), 4, "tx_dma_mapping_failure"}, { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 4, "tx_max_drbr_queue_depth"}, { Q_STATS_OFFSET32(tx_window_violation_std), 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), 4, "tx_frames_deferred"}, { Q_STATS_OFFSET32(tx_queue_xoff), 4, "tx_queue_xoff"}, { Q_STATS_OFFSET32(mbuf_defrag_attempts), 4, "mbuf_defrag_attempts"}, { Q_STATS_OFFSET32(mbuf_defrag_failures), 4, "mbuf_defrag_failures"}, { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, "mbuf_rx_bd_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, "mbuf_rx_bd_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, "mbuf_rx_tpa_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, "mbuf_rx_tpa_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, "mbuf_rx_sge_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, "mbuf_rx_sge_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_alloc_tx), 4, "mbuf_alloc_tx"}, { Q_STATS_OFFSET32(mbuf_alloc_rx), 4, "mbuf_alloc_rx"}, { Q_STATS_OFFSET32(mbuf_alloc_sge), 4, "mbuf_alloc_sge"}, { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"}, { Q_STATS_OFFSET32(tx_queue_full_return), 4, "tx_queue_full_return"}, { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, "bxe_tx_mq_sc_state_failures"}, { Q_STATS_OFFSET32(tx_request_link_down_failures), 4, "tx_request_link_down_failures"}, { Q_STATS_OFFSET32(bd_avail_too_less_failures), 4, "bd_avail_too_less_failures"}, { Q_STATS_OFFSET32(tx_mq_not_empty), 4, "tx_mq_not_empty"}, { Q_STATS_OFFSET32(nsegs_path1_errors), 4, "nsegs_path1_errors"}, { Q_STATS_OFFSET32(nsegs_path2_errors), 4, "nsegs_path2_errors"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type); static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port); static void bxe_set_reset_global(struct bxe_softc *sc); static void bxe_set_reset_in_progress(struct bxe_softc *sc); static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine); static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print); static void bxe_int_disable(struct bxe_softc *sc); static int bxe_release_leader_lock(struct bxe_softc *sc); static void bxe_pf_disable(struct bxe_softc *sc); static void bxe_free_fp_buffers(struct bxe_softc *sc); static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod); static void bxe_link_report_locked(struct bxe_softc *sc); static void bxe_link_report(struct bxe_softc *sc); static void bxe_link_status_update(struct bxe_softc *sc); static void bxe_periodic_callout_func(void *xsc); static void bxe_periodic_start(struct bxe_softc *sc); static void bxe_periodic_stop(struct bxe_softc *sc); static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index); static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue); static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index); static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp); static void bxe_task_fp(struct bxe_fastpath *fp); static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents); static int bxe_alloc_mem(struct bxe_softc *sc); static void bxe_free_mem(struct bxe_softc *sc); static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); static void bxe_free_fw_stats_mem(struct bxe_softc *sc); static int bxe_interrupt_attach(struct bxe_softc *sc); static void bxe_interrupt_detach(struct bxe_softc *sc); static void bxe_set_rx_mode(struct bxe_softc *sc); static int bxe_init_locked(struct bxe_softc *sc); static int bxe_stop_locked(struct bxe_softc *sc); static void bxe_sp_err_timeout_task(void *arg, int pending); void bxe_parity_recover(struct bxe_softc *sc); void bxe_handle_error(struct bxe_softc *sc); static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode); static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link); static void bxe_handle_sp_tq(void *context, int pending); static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); int bxe_grc_dump(struct bxe_softc *sc); static int bxe_alloc_buf_rings(struct bxe_softc *sc); static void bxe_free_buf_rings(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, uint32_t crc32_seed, uint8_t complement) { uint32_t byte = 0; uint32_t bit = 0; uint8_t msb = 0; uint32_t temp = 0; uint32_t shft = 0; uint8_t current_byte = 0; uint32_t crc32_result = crc32_seed; const uint32_t CRC32_POLY = 0x1edc6f41; if ((crc32_packet == NULL) || (crc32_length == 0) || ((crc32_length % 8) != 0)) { return (crc32_result); } for (byte = 0; byte < crc32_length; byte = byte + 1) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit = bit + 1) { /* msb = crc32_result[31]; */ msb = (uint8_t)(crc32_result >> 31); crc32_result = crc32_result << 1; /* it (msb != current_byte[bit]) */ if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; /* crc32_result[0] = 1 */ crc32_result |= 1; } } } /* Last step is to: * 1. "mirror" every bit * 2. swap the 4 bytes * 3. complement each bit */ /* Mirror */ temp = crc32_result; shft = sizeof(crc32_result) * 8 - 1; for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) { temp <<= 1; temp |= crc32_result & 1; shft-- ; } /* temp[31-bit] = crc32_result[bit] */ temp <<= shft; /* Swap */ /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ { uint32_t t0, t1, t2, t3; t0 = (0x000000ff & (temp >> 24)); t1 = (0x0000ff00 & (temp >> 8)); t2 = (0x00ff0000 & (temp << 8)); t3 = (0xff000000 & (temp << 24)); crc32_result = t0 | t1 | t2 | t3; } /* Complement */ if (complement) { crc32_result = ~crc32_result; } return (crc32_result); } int bxe_test_bit(int nr, volatile unsigned long *addr) { return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); } void bxe_set_bit(unsigned int nr, volatile unsigned long *addr) { atomic_set_acq_long(addr, (1 << nr)); } void bxe_clear_bit(int nr, volatile unsigned long *addr) { atomic_clear_acq_long(addr, (1 << nr)); } int bxe_test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_cmpxchg(volatile int *addr, int old, int new) { int x; do { x = *addr; } while (atomic_cmpset_acq_int(addr, old, new) == 0); return (x); } /* * Get DMA memory from the OS. * * Validates that the OS has provided DMA buffers in response to a * bus_dmamap_load call and saves the physical address of those buffers. * When the callback is used the OS will return 0 for the mapping function * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any * failures back to the caller. * * Returns: * Nothing. */ static void bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bxe_dma *dma = arg; if (error) { dma->paddr = 0; dma->nseg = 0; BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; } } /* * Allocate a block of memory and map it for DMA. No partial completions * allowed and release any resources acquired if we can't acquire all * resources. * * Returns: * 0 = Success, !0 = Failure */ int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg) { int rc; if (dma->size > 0) { BLOGE(sc, "dma block '%s' already has size %lu\n", msg, (unsigned long)dma->size); return (1); } memset(dma, 0, sizeof(*dma)); /* sanity */ dma->sc = sc; dma->size = size; snprintf(dma->msg, sizeof(dma->msg), "%s", msg); rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ BCM_PAGE_SIZE, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ size, /* max map size */ 1, /* num discontinuous */ size, /* max seg size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &dma->tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &dma->map); if (rc != 0) { BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, bxe_dma_map_addr, /* BLOGD in here */ dma, BUS_DMA_NOWAIT); if (rc != 0) { BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } return (0); } void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); } memset(dma, 0, sizeof(*dma)); } /* * These indirect read and write routines are only during init. * The locking is handled by the MCP. */ void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); } uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr) { uint32_t val; pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); return (val); } static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; int cnt; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } /* try every 5ms for 5 seconds */ for (cnt = 0; cnt < 1000; cnt++) { REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (0); } DELAY(5000); } BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", resource, resource_bit); return (-1); } static int bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } REG_WR(sc, hw_lock_control_reg, resource_bit); return (0); } static void bxe_acquire_phy_lock(struct bxe_softc *sc) { BXE_PHY_LOCK(sc); bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); } static void bxe_release_phy_lock(struct bxe_softc *sc) { bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); BXE_PHY_UNLOCK(sc); } /* * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, * had we done things the other way around, if two pfs from the same port * would attempt to access nvram at the same time, we could run into a * scenario such as: * pf A takes the port lock. * pf B succeeds in taking the same lock since they are from the same port. * pf A takes the per pf misc lock. Performs eeprom access. * pf A finishes. Unlocks the per pf misc lock. * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own * access corrupted by pf B).* */ static int bxe_acquire_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* acquire HW lock: protect against other PFs in PF Direct Assignment */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* request access to nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { break; } DELAY(5); } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { BLOGE(sc, "Cannot get access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } return (0); } static int bxe_release_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* relinquish nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { break; } DELAY(5); } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { BLOGE(sc, "Cannot free access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } /* release HW lock: protect against other PFs in PF Direct Assignment */ bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); return (0); } static void bxe_enable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* enable both bits, even on read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); } static void bxe_disable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* disable both bits, even after read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN))); } static int bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val, uint32_t cmd_flags) { int count, i, rc; uint32_t val; /* build the command word */ cmd_flags |= MCPR_NVM_COMMAND_DOIT; /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* address of the NVRAM to read from */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue a read command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ *ret_val = 0; rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes * converting to big-endian will do the work */ *ret_val = htobe32(val); rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram read timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } static int bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; int rc; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); /* read the first word(s) */ cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); /* advance to the next dword */ offset += sizeof(uint32_t); ret_buf += sizeof(uint32_t); buf_size -= sizeof(uint32_t); cmd_flags = 0; } if (rc == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val, uint32_t cmd_flags) { int count, i, rc; /* build the command word */ cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* write the data */ REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); /* address of the NVRAM to write to */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue the write command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram write timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) static int bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t align_offset; uint32_t val; int rc; if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); if (rc == 0) { val &= ~(0xff << BYTE_OFFSET(offset)); val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes * convert it back to cpu order */ val = be32toh(val); rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; uint32_t written_so_far; int rc; if (buf_size == 1) { return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); } if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if (buf_size == 0) { return (0); /* nothing to do */ } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); written_so_far = 0; cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(uint32_t))) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if ((offset % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_FIRST; } memcpy(&val, data_buf, 4); rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); /* advance to the next dword */ offset += sizeof(uint32_t); data_buf += sizeof(uint32_t); written_so_far += sizeof(uint32_t); cmd_flags = 0; } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } /* copy command into DMAE command memory and set DMAE command Go */ void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx) { uint32_t cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); } REG_WR(sc, dmae_reg_go_c[idx], 1); } uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) { return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | DMAE_CMD_C_TYPE_ENABLE)); } uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode) { return (opcode & ~DMAE_CMD_SRC_RESET); } uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type) { uint32_t opcode = 0; opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | (dst_type << DMAE_CMD_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) { opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); } return (opcode); } static void bxe_prep_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae, uint8_t src_type, uint8_t dst_type) { memset(dmae, 0, sizeof(struct dmae_cmd)); /* set the opcode */ dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, TRUE, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a DMAE command over the init channel and wait for completion */ static int bxe_issue_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae) { uint32_t *wb_comp = BXE_SP(sc, wb_comp); int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; BXE_DMAE_LOCK(sc); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); /* wait for completion */ DELAY(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } timeout--; DELAY(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } BXE_DMAE_UNLOCK(sc); return (0); } void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32) { struct dmae_cmd dmae; uint32_t *data; int i, rc; DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); if (!sc->dmae_ready) { data = BXE_SP(sc, wb_data[0]); for (i = 0; i < len32; i++) { data[i] = (CHIP_IS_E1(sc)) ? bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : REG_RD(sc, (src_addr + (i * 4))); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32) { struct dmae_cmd dmae; int rc; if (!sc->dmae_ready) { DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); if (CHIP_IS_E1(sc)) { ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } else { ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ dmae.dst_addr_hi = 0; dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); int offset = 0; while (len > dmae_wr_max) { bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ dmae_wr_max); offset += (dmae_wr_max * 4); len -= dmae_wr_max; } bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ len); } void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid) { /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); /* xcontext validation */ cxt->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } static void bxe_storm_memset_hc_timeout(struct bxe_softc *sc, uint8_t port, uint8_t fw_sb_id, uint8_t sb_index, uint8_t ticks) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); REG_WR8(sc, addr, ticks); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d ticks %d\n", port, fw_sb_id, sb_index, ticks); } static void bxe_storm_memset_hc_disable(struct bxe_softc *sc, uint8_t port, uint16_t fw_sb_id, uint8_t sb_index, uint8_t disable) { uint32_t enable_flag = (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); uint8_t flags; /* clear and set */ flags = REG_RD8(sc, addr); flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR8(sc, addr, flags); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); } void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec) { int port = SC_PORT(sc); uint8_t ticks = (usec / 4); /* XXX ??? */ bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); disable = (disable) ? 1 : ((usec) ? 0 : 1); bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); } void elink_cb_udelay(struct bxe_softc *sc, uint32_t usecs) { DELAY(usecs); } uint32_t elink_cb_reg_read(struct bxe_softc *sc, uint32_t reg_addr) { return (REG_RD(sc, reg_addr)); } void elink_cb_reg_write(struct bxe_softc *sc, uint32_t reg_addr, uint32_t val) { REG_WR(sc, reg_addr, val); } void elink_cb_reg_wb_write(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_WR_DMAE(sc, offset, wb_write, len); } void elink_cb_reg_wb_read(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_RD_DMAE(sc, offset, wb_write, len); } uint8_t elink_cb_path_id(struct bxe_softc *sc) { return (SC_PATH(sc)); } void elink_cb_event_log(struct bxe_softc *sc, const elink_log_id_t elink_log_id, ...) { /* XXX */ BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } static int bxe_set_spio(struct bxe_softc *sc, int spio, uint32_t mode) { uint32_t spio_reg; /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { case MISC_SPIO_OUTPUT_LOW: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_CLR_POS); break; case MISC_SPIO_OUTPUT_HIGH: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_SET_POS); break; case MISC_SPIO_INPUT_HI_Z: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_SPIO, spio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); return (0); } static int bxe_gpio_read(struct bxe_softc *sc, int gpio_num, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, gpio_mask); return (-1); } /* read GPIO value */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); /* get the requested pin value */ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; } static int bxe_gpio_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint32_t mode) { uint32_t gpio_reg; /* any port swapping should be handled by caller */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_int_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: BLOGD(sc, DBG_PHY, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: BLOGD(sc, DBG_PHY, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } uint32_t elink_cb_gpio_read(struct bxe_softc *sc, uint16_t gpio_num, uint8_t port) { return (bxe_gpio_read(sc, gpio_num, port)); } uint8_t elink_cb_gpio_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_write(sc, gpio_num, mode, port)); } uint8_t elink_cb_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint8_t mode) /* 0=low 1=high */ { return (bxe_gpio_mult_write(sc, pins, mode)); } uint8_t elink_cb_gpio_int_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_int_write(sc, gpio_num, mode, port)); } void elink_cb_notify_link_changed(struct bxe_softc *sc) { REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + (SC_FUNC(sc) * sizeof(uint32_t))), 1); } /* send the MCP a request, block until there is a reply */ uint32_t elink_cb_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t seq; uint32_t rc = 0; uint32_t cnt = 1; uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; BXE_FWMB_LOCK(sc); seq = ++sc->fw_seq; SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); BLOGD(sc, DBG_PHY, "wrote command 0x%08x to FW MB param 0x%08x\n", (command | seq), param); /* Let the FW do it's magic. GIve it up to 5 seconds... */ do { DELAY(delay * 1000); rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); BLOGD(sc, DBG_PHY, "[after %d ms] read 0x%x seq 0x%x from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { rc &= FW_MSG_CODE_MASK; } else { /* Ruh-roh! */ BLOGE(sc, "FW failed to respond!\n"); // XXX bxe_fw_dump(sc); rc = 0; } BXE_FWMB_UNLOCK(sc); return (rc); } static uint32_t bxe_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { return (elink_cb_fw_command(sc, command, param)); } static void __storm_memset_dma_mapping(struct bxe_softc *sc, uint32_t addr, bus_addr_t mapping) { REG_WR(sc, addr, U64_LO(mapping)); REG_WR(sc, (addr + 4), U64_HI(mapping)); } static void storm_memset_spq_addr(struct bxe_softc *sc, bus_addr_t mapping, uint16_t abs_fid) { uint32_t addr = (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); __storm_memset_dma_mapping(sc, addr, mapping); } static void storm_memset_vf_to_pf(struct bxe_softc *sc, uint16_t abs_fid, uint16_t pf_id) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); } static void storm_memset_func_en(struct bxe_softc *sc, uint16_t abs_fid, uint8_t enable) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); } static void storm_memset_eq_data(struct bxe_softc *sc, struct event_ring_data *eq_data, uint16_t pfid) { uint32_t addr; size_t size; addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); size = sizeof(struct event_ring_data); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); } static void storm_memset_eq_prod(struct bxe_softc *sc, uint16_t eq_prod, uint16_t pfid) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); REG_WR16(sc, addr, eq_prod); } /* * Post a slowpath command. * * A slowpath command is used to propagate a configuration change through * the controller in a controlled manner, allowing each STORM processor and * other H/W blocks to phase in the change. The commands sent on the * slowpath are referred to as ramrods. Depending on the ramrod used the * completion of the ramrod will occur in different ways. Here's a * breakdown of ramrods and how they complete: * * RAMROD_CMD_ID_ETH_PORT_SETUP * Used to setup the leading connection on a port. Completes on the * Receive Completion Queue (RCQ) of that port (typically fp[0]). * * RAMROD_CMD_ID_ETH_CLIENT_SETUP * Used to setup an additional connection on a port. Completes on the * RCQ of the multi-queue/RSS connection being initialized. * * RAMROD_CMD_ID_ETH_STAT_QUERY * Used to force the storm processors to update the statistics database * in host memory. This ramrod is send on the leading connection CID and * completes as an index increment of the CSTORM on the default status * block. * * RAMROD_CMD_ID_ETH_UPDATE * Used to update the state of the leading connection, usually to udpate * the RSS indirection table. Completes on the RCQ of the leading * connection. (Not currently used under FreeBSD until OS support becomes * available.) * * RAMROD_CMD_ID_ETH_HALT * Used when tearing down a connection prior to driver unload. Completes * on the RCQ of the multi-queue/RSS connection being torn down. Don't * use this on the leading connection. * * RAMROD_CMD_ID_ETH_SET_MAC * Sets the Unicast/Broadcast/Multicast used by the port. Completes on * the RCQ of the leading connection. * * RAMROD_CMD_ID_ETH_CFC_DEL * Used when tearing down a conneciton prior to driver unload. Completes * on the RCQ of the leading connection (since the current connection * has been completely removed from controller memory). * * RAMROD_CMD_ID_ETH_PORT_DEL * Used to tear down the leading connection prior to driver unload, * typically fp[0]. Completes as an index increment of the CSTORM on the * default status block. * * RAMROD_CMD_ID_ETH_FORWARD_SETUP * Used for connection offload. Completes on the RCQ of the multi-queue * RSS connection that is being offloaded. (Not currently used under * FreeBSD.) * * There can only be one command pending per function. * * Returns: * 0 = Success, !0 = Failure. */ /* must be called under the spq lock */ static inline struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) { struct eth_spe *next_spe = sc->spq_prod_bd; if (sc->spq_prod_bd == sc->spq_last_bd) { /* wrap back to the first eth_spq */ sc->spq_prod_bd = sc->spq; sc->spq_prod_idx = 0; } else { sc->spq_prod_bd++; sc->spq_prod_idx++; } return (next_spe); } /* must be called under the spq lock */ static inline void bxe_sp_prod_update(struct bxe_softc *sc) { int func = SC_FUNC(sc); /* * Make sure that BD data is updated before writing the producer. * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), sc->spq_prod_idx); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); } /** * bxe_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline int bxe_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { return (TRUE); } else { return (FALSE); } } /** * bxe_sp_post - place a single command on an SP ring * * @sc: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two uint32 fields. */ int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type) { struct eth_spe *spe; uint16_t type; int common; common = bxe_is_contextless_ramrod(command, cmd_type); BXE_SP_LOCK(sc); if (common) { if (!atomic_load_acq_long(&sc->eq_spq_left)) { BLOGE(sc, "EQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } else { if (!atomic_load_acq_long(&sc->cq_spq_left)) { BLOGE(sc, "SPQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } spe = bxe_sp_get_next(sc); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; /* TBD: Check if it works for VFs */ type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & SPE_HDR_T_FUNCTION_ID); spe->hdr.type = htole16(type); spe->data.update_data_addr.hi = htole32(data_hi); spe->data.update_data_addr.lo = htole32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the lock and unlock. Thus no more explict * memory barrier is needed. */ if (common) { atomic_subtract_acq_long(&sc->eq_spq_left, 1); } else { atomic_subtract_acq_long(&sc->cq_spq_left, 1); } BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); BLOGD(sc, DBG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", sc->spq_prod_idx, (uint32_t)U64_HI(sc->spq_dma.paddr), (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), command, common, HW_CID(sc, cid), data_hi, data_lo, type, atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); bxe_sp_prod_update(sc); BXE_SP_UNLOCK(sc); return (0); } /** * bxe_debug_print_ind_table - prints the indirection table configuration. * * @sc: driver hanlde * @p: pointer to rss configuration */ /* * FreeBSD Device probe function. * * Compares the device found to the driver's list of supported devices and * reports back to the bsd loader whether this is the right driver for the device. * This is the driver entry function called from the "kldload" command. * * Returns: * BUS_PROBE_DEFAULT on success, positive value on failure. */ static int bxe_probe(device_t dev) { struct bxe_device_type *t; char *descbuf; uint16_t did, sdid, svid, vid; /* Find our device structure */ t = bxe_devs; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); /* Look through the list of known devices for a match. */ while (t->bxe_name != NULL) { if ((vid == t->bxe_vid) && (did == t->bxe_did) && ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return (ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BXE_DEVDESC_MAX, "%s (%c%d) BXE v:%s", t->bxe_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf), BXE_DRIVER_VERSION); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bxe_init_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), "bxe%d_core_lock", sc->unit); sx_init(&sc->core_sx, sc->core_sx_name); #else snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), "bxe%d_core_lock", sc->unit); mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); #endif snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), "bxe%d_sp_lock", sc->unit); mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), "bxe%d_dmae_lock", sc->unit); mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), "bxe%d_phy_lock", sc->unit); mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), "bxe%d_fwmb_lock", sc->unit); mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), "bxe%d_print_lock", sc->unit); mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), "bxe%d_stats_lock", sc->unit); mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), "bxe%d_mcast_lock", sc->unit); mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); } static void bxe_release_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX sx_destroy(&sc->core_sx); #else if (mtx_initialized(&sc->core_mtx)) { mtx_destroy(&sc->core_mtx); } #endif if (mtx_initialized(&sc->sp_mtx)) { mtx_destroy(&sc->sp_mtx); } if (mtx_initialized(&sc->dmae_mtx)) { mtx_destroy(&sc->dmae_mtx); } if (mtx_initialized(&sc->port.phy_mtx)) { mtx_destroy(&sc->port.phy_mtx); } if (mtx_initialized(&sc->fwmb_mtx)) { mtx_destroy(&sc->fwmb_mtx); } if (mtx_initialized(&sc->print_mtx)) { mtx_destroy(&sc->print_mtx); } if (mtx_initialized(&sc->stats_mtx)) { mtx_destroy(&sc->stats_mtx); } if (mtx_initialized(&sc->mcast_mtx)) { mtx_destroy(&sc->mcast_mtx); } } static void bxe_tx_disable(struct bxe_softc* sc) { if_t ifp = sc->ifp; /* tell the stack the driver is stopped and TX queue is full */ if (ifp != NULL) { if_setdrvflags(ifp, 0); } } static void bxe_drv_pulse(struct bxe_softc *sc) { SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, sc->fw_drv_pulse_wr_seq); } static inline uint16_t bxe_tx_avail(struct bxe_softc *sc, struct bxe_fastpath *fp) { int16_t used; uint16_t prod; uint16_t cons; prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; used = SUB_S16(prod, cons); return (int16_t)(sc->tx_ring_size) - used; } static inline int bxe_tx_queue_has_work(struct bxe_fastpath *fp) { uint16_t hw_cons; mb(); /* status block fields can change */ hw_cons = le16toh(*fp->tx_cons_sb); return (hw_cons != fp->tx_pkt_cons); } static inline uint8_t bxe_has_tx_work(struct bxe_fastpath *fp) { /* expand this for multi-cos if ever supported */ return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; } static inline int bxe_has_rx_work(struct bxe_fastpath *fp) { uint16_t rx_cq_cons_sb; mb(); /* status block fields can change */ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) rx_cq_cons_sb++; return (fp->rx_cq_cons != rx_cq_cons_sb); } static void bxe_sp_event(struct bxe_softc *sc, struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe) { int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = ECORE_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = ECORE_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = ECORE_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = ECORE_Q_CMD_EMPTY; break; default: BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != ECORE_Q_CMD_MAX) && q_obj->complete_cmd(sc, q_obj, drv_cmd)) { /* * q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the sc->spq_left * because apparently we haven't sent this command the first * place. */ // bxe_panic(sc, ("Unexpected SP completion\n")); return; } atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); } /* * The current mbuf is part of an aggregation. Move the mbuf into the TPA * aggregation queue, put an empty mbuf back onto the receive chain, and mark * the current aggregation queue as in-progress. */ static void bxe_tpa_start(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue, uint16_t cons, uint16_t prod, struct eth_fast_path_rx_cqe *cqe) { struct bxe_sw_rx_bd tmp_bd; struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; int max_agg_queues __diagused; struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; uint16_t index; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " "cons=%d prod=%d\n", fp->index, queue, cons, prod); max_agg_queues = MAX_AGG_QS(sc); KASSERT((queue < max_agg_queues), ("fp[%02d] invalid aggr queue (%d >= %d)!", fp->index, queue, max_agg_queues)); KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", fp->index, queue)); /* copy the existing mbuf and mapping from the TPA pool */ tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { uint32_t *tmp; tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", fp->index, queue, cons, prod); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); /* XXX Error handling? */ return; } /* change the TPA queue to the start state */ tpa_info->state = BXE_TPA_STATE_START; tpa_info->placement_offset = cqe->placement_offset; tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); tpa_info->vlan_tag = le16toh(cqe->vlan_tag); tpa_info->len_on_bd = le16toh(cqe->len_on_bd); fp->rx_tpa_queue_used |= (1 << queue); /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ index = (sc->max_rx_bufs != RX_BD_USABLE) ? prod : cons; /* move the received mbuf and mapping to TPA pool */ tpa_info->bd = fp->rx_mbuf_chain[cons]; /* release any existing RX BD mbuf mappings */ if (cons != index) { rx_buf = &fp->rx_mbuf_chain[cons]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We get here when the maximum number of rx buffers is less than * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL * it out here without concern of a memory leak. */ fp->rx_mbuf_chain[cons].m = NULL; } /* update the Rx SW BD with the mbuf info from the TPA pool */ fp->rx_mbuf_chain[index] = tmp_bd; /* update the Rx BD with the empty mbuf phys address from the TPA pool */ rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); } /* * When a TPA aggregation is completed, loop through the individual mbufs * of the aggregation, combining them into a single mbuf which will be sent * up the stack. Refill all freed SGEs with mbufs as we go along. */ static int bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct mbuf *m, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { struct mbuf *m_frag; uint32_t frag_len, frag_size, i; uint16_t sge_idx; int rc = 0; int j; frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", fp->index, queue, tpa_info->len_on_bd, frag_size, pages); /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { uint32_t *tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } /* * Scan through the scatter gather list pulling individual mbufs into a * single mbuf for the host stack. */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); /* * Firmware gives the indices of the SGE as if the ring is an array * (meaning that the "next" element will consume 2 indices). */ frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " "sge_idx=%d frag_size=%d frag_len=%d\n", fp->index, queue, i, j, sge_idx, frag_size, frag_len); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } /* update the fragment length */ m_frag->m_len = frag_len; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); fp->eth_q_stats.mbuf_alloc_sge--; /* update the TPA mbuf size and remaining fragment size */ m->m_pkthdr.len += frag_len; frag_size -= frag_len; } BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", fp->index, queue, frag_size); return (rc); } static inline void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) { int i, j; for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; for (j = 0; j < 2; j++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } } static inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) { /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* * Clear the two last indices in the page to 1. These are the indices that * correspond to the "next" element, hence will never be indicated and * should be removed from the calculations. */ bxe_clear_sge_mask_next_elems(fp); } static inline void bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx) { uint16_t last_max = fp->last_max_sge; if (SUB_S16(idx, last_max) > 0) { fp->last_max_sge = idx; } } static inline void bxe_update_sge_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t sge_len, union eth_sgl_or_raw_data *cqe) { uint16_t last_max, last_elem, first_elem; uint16_t delta = 0; uint16_t i; if (!sge_len) { return; } /* first mark all used pages */ for (i = 0; i < sge_len; i++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16toh(cqe->sgl[i]))); } BLOGD(sc, DBG_LRO, "fp[%02d] fp_cqe->sgl[%d] = %d\n", fp->index, sge_len - 1, le16toh(cqe->sgl[sge_len - 1])); /* assume that the last SGE index is the biggest */ bxe_update_last_max_sge(fp, le16toh(cqe->sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* if ring is not full */ if (last_elem + 1 != first_elem) { last_elem++; } /* now update the prod */ for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { if (__predict_true(fp->sge_mask[i])) { break; } fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { fp->rx_sge_prod += delta; /* clear page-end entries */ bxe_clear_sge_mask_next_elems(fp); } BLOGD(sc, DBG_LRO, "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", fp->index, fp->last_max_sge, fp->rx_sge_prod); } /* * The aggregation on the current TPA queue has completed. Pull the individual * mbuf fragments together into a single mbuf, perform all necessary checksum * calculations, and send the resuting mbuf to the stack. */ static void bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { if_t ifp = sc->ifp; struct mbuf *m; int rc = 0; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", fp->index, queue, tpa_info->placement_offset, le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); m = tpa_info->bd.m; /* allocate a replacement before modifying existing mbuf */ rc = bxe_alloc_rx_tpa_mbuf(fp, queue); if (rc) { /* drop the frame and log an error */ fp->eth_q_stats.rx_soft_errors++; goto bxe_tpa_stop_exit; } /* we have a replacement, fixup the current mbuf */ m_adj(m, tpa_info->placement_offset); m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; /* mark the checksums valid (taken care of by the firmware) */ fp->eth_q_stats.rx_ofld_frames_csum_ip++; fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); /* aggregate all of the SGEs into a single mbuf */ rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); if (rc) { /* drop the packet and log an error */ fp->eth_q_stats.rx_soft_errors++; m_freem(m); } else { if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; m->m_flags |= M_VLANTAG; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); fp->eth_q_stats.rx_tpa_pkts++; /* pass the frame to the stack */ if_input(ifp, m); } /* we passed an mbuf up the stack or dropped the frame */ fp->eth_q_stats.mbuf_alloc_tpa--; bxe_tpa_stop_exit: fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; fp->rx_tpa_queue_used &= ~(1 << queue); } static uint8_t bxe_service_rxsgl( struct bxe_fastpath *fp, uint16_t len, uint16_t lenonbd, struct mbuf *m, struct eth_fast_path_rx_cqe *cqe_fp) { struct mbuf *m_frag; uint16_t frags, frag_len; uint16_t sge_idx = 0; uint16_t j; uint8_t i, rc = 0; uint32_t frag_size; /* adjust the mbuf */ m->m_len = lenonbd; frag_size = len - lenonbd; frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); m_frag->m_len = frag_len; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } fp->eth_q_stats.mbuf_alloc_sge--; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); frag_size -= frag_len; } bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); return rc; } static uint8_t bxe_rxeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; int rx_pkts = 0; int rc = 0; BXE_FP_RX_LOCK(fp); /* CQ "next element" is of the size of the regular element */ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { hw_cq_cons++; } bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_cq_cons = fp->rx_cq_cons; sw_cq_prod = fp->rx_cq_prod; /* * Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); BLOGD(sc, DBG_RX, "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", fp->index, hw_cq_cons, sw_cq_cons); while (sw_cq_cons != hw_cq_cons) { struct bxe_sw_rx_bd *rx_buf = NULL; union eth_rx_cqe *cqe; struct eth_fast_path_rx_cqe *cqe_fp; uint8_t cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; uint16_t len, lenonbd, pad; struct mbuf *m = NULL; comp_ring_cons = RCQ(sw_cq_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); cqe = &fp->rcq_chain[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; BLOGD(sc, DBG_RX, "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " "BD prod=%d cons=%d CQE type=0x%x err=0x%x " "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", fp->index, hw_cq_cons, sw_cq_cons, bd_prod, bd_cons, CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32toh(cqe_fp->rss_hash_result), le16toh(cqe_fp->vlan_tag), le16toh(cqe_fp->pkt_len_or_gro_seg_len), le16toh(cqe_fp->len_on_bd)); /* is this a slowpath msg? */ if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { bxe_sp_event(sc, fp, cqe); goto next_cqe; } rx_buf = &fp->rx_mbuf_chain[bd_cons]; if (!CQE_TYPE_FAST(cqe_fp_type)) { struct bxe_sw_tpa_info *tpa_info; uint16_t frag_size, pages; uint8_t queue; if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); m = NULL; /* packet not ready yet */ goto next_rx; } KASSERT(CQE_TYPE_STOP(cqe_fp_type), ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->rx_tpa_info[queue]; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", fp->index, queue); frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - tpa_info->len_on_bd); pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; bxe_tpa_stop(sc, fp, tpa_info, queue, pages, &cqe->end_agg_cqe, comp_ring_cons); bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); goto next_cqe; } /* non TPA */ /* is this an error packet? */ if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); fp->eth_q_stats.rx_soft_errors++; goto next_rx; } len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); lenonbd = le16toh(cqe_fp->len_on_bd); pad = cqe_fp->placement_offset; m = rx_buf->m; if (__predict_false(m == NULL)) { BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", bd_cons, fp->index); goto next_rx; } /* XXX double copy if packet length under a threshold */ /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, (sc->max_rx_bufs != RX_BD_USABLE) ? bd_prod : bd_cons); if (rc != 0) { /* we simply reuse the received mbuf and don't post it to the stack */ m = NULL; BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", fp->index, rc); fp->eth_q_stats.rx_soft_errors++; if (sc->max_rx_bufs != RX_BD_USABLE) { /* copy this consumer index to the producer index */ memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, sizeof(struct bxe_sw_rx_bd)); memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); } goto next_rx; } /* current mbuf was detached from the bd */ fp->eth_q_stats.mbuf_alloc_rx--; /* we allocated a replacement mbuf, fixup the current one */ m_adj(m, pad); m->m_pkthdr.len = m->m_len = len; if ((len > 60) && (len > lenonbd)) { fp->eth_q_stats.rx_bxe_service_rxsgl++; rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); if (rc) break; fp->eth_q_stats.rx_jumbo_sge_pkts++; } else if (lenonbd < len) { fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* assume no hardware checksum has complated */ m->m_pkthdr.csum_flags = 0; /* validate checksum if offload enabled */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* check for a valid IP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_ip++; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } /* check for a valid TCP/UDP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xFFFF; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* if there is a VLAN tag then flag that info */ if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; m->m_flags |= M_VLANTAG; } /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); next_rx: bd_cons = RX_BD_NEXT(bd_cons); bd_prod = RX_BD_NEXT(bd_prod); bd_prod_fw = RX_BD_NEXT(bd_prod_fw); /* pass the frame to the stack */ if (__predict_true(m != NULL)) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_pkts++; if_input(ifp, m); } next_cqe: sw_cq_prod = RCQ_NEXT(sw_cq_prod); sw_cq_cons = RCQ_NEXT(sw_cq_cons); /* limit spinning on the queue */ if (rc != 0) break; if (rx_pkts == sc->rx_budget) { fp->eth_q_stats.rx_budget_reached++; break; } } /* while work to do */ fp->rx_bd_cons = bd_cons; fp->rx_bd_prod = bd_prod_fw; fp->rx_cq_cons = sw_cq_cons; fp->rx_cq_prod = sw_cq_prod; /* Update producers */ bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); fp->eth_q_stats.rx_pkts += rx_pkts; fp->eth_q_stats.rx_calls++; BXE_FP_RX_UNLOCK(fp); return (sw_cq_cons != hw_cq_cons); } static uint16_t bxe_free_tx_pkt(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t idx) { struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_idx = TX_BD(tx_buf->first_bd); uint16_t new_cons; int nbd; /* unmap the mbuf from non-paged memory */ bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; new_cons = (tx_buf->first_bd + nbd); /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); fp->eth_q_stats.mbuf_alloc_tx--; } else { fp->eth_q_stats.tx_chain_lost_mbuf++; } tx_buf->m = NULL; tx_buf->first_bd = 0; return (new_cons); } /* transmit timeout watchdog */ static int bxe_watchdog(struct bxe_softc *sc, struct bxe_fastpath *fp) { BXE_FP_TX_LOCK(fp); if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { BXE_FP_TX_UNLOCK(fp); return (0); } BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); BXE_FP_TX_UNLOCK(fp); BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return (-1); } /* processes transmit completions */ static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); bd_cons = fp->tx_bd_cons; hw_cons = le16toh(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; while (sw_cons != hw_cons) { pkt_cons = TX_BD(sw_cons); BLOGD(sc, DBG_TX, "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", fp->index, hw_cons, sw_cons, pkt_cons); bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); sw_cons++; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; BLOGD(sc, DBG_TX, "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); mb(); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (fp->tx_pkt_prod != fp->tx_pkt_cons) { /* reset the watchdog timer if there are pending transmits */ fp->watchdog_timer = BXE_TX_TIMEOUT; return (TRUE); } else { /* clear watchdog when there are no pending transmits */ fp->watchdog_timer = 0; return (FALSE); } } static void bxe_drain_tx_queues(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, count; /* wait until all TX fastpath tasks have completed */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; count = 1000; while (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); if (count == 0) { BLOGE(sc, "Timeout waiting for fp[%d] " "transmits to complete!\n", i); bxe_panic(sc, ("tx drain failure\n")); return; } count--; DELAY(1000); rmb(); } } return; } static int bxe_del_all_macs(struct bxe_softc *sc, struct ecore_vlan_mac_obj *mac_obj, int mac_type, uint8_t wait_for_comp) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; int rc; /* wait for completion of requested */ if (wait_for_comp) { bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); } /* Set the mac type of addresses we want to clear */ bxe_set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", rc, mac_type, wait_for_comp); } return (rc); } static int bxe_fill_accept_flags(struct bxe_softc *sc, uint32_t rx_mode, unsigned long *rx_accept_flags, unsigned long *tx_accept_flags) { /* Clear the flags first */ *rx_accept_flags = 0; *tx_accept_flags = 0; switch (rx_mode) { case BXE_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BXE_RX_MODE_NORMAL: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_ALLMULTI: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_PROMISC: /* * According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(sc)) { bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); } else { bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); } break; default: BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (rx_mode != BXE_RX_MODE_NONE) { bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); } return (0); } static int bxe_set_q_rx_mode(struct bxe_softc *sc, uint8_t cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct ecore_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &sc->rx_mode_obj; ramrod_param.func_id = SC_FUNC(sc); ramrod_param.pstate = &sc->sp_state; ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " "rx_accept_flags 0x%x tx_accept_flags 0x%x " "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } return (0); } static int bxe_set_storm_rx_mode(struct bxe_softc *sc) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; int rc; rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, &tx_accept_flags); if (rc) { return (rc); } bxe_set_bit(RAMROD_RX, &ramrod_flags); bxe_set_bit(RAMROD_TX, &ramrod_flags); /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags)); } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_load_no_mcp(struct bxe_softc *sc) { int path = SC_PATH(sc); int port = SC_PORT(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]++; load_count[path][1 + port]++; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 1) { return (FW_MSG_CODE_DRV_LOAD_COMMON); } else if (load_count[path][1 + port] == 1) { return (FW_MSG_CODE_DRV_LOAD_PORT); } else { return (FW_MSG_CODE_DRV_LOAD_FUNCTION); } } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_unload_no_mcp(struct bxe_softc *sc) { int port = SC_PORT(sc); int path = SC_PATH(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_COMMON); } else if (load_count[path][1 + port] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_PORT); } else { return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); } } /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ static uint32_t bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } /* Send the request to the MCP */ if (!BXE_NOMCP(sc)) { reset_code = bxe_fw_command(sc, reset_code, 0); } else { reset_code = bxe_nic_unload_no_mcp(sc); } return (reset_code); } /* send UNLOAD_DONE command to the MCP */ static void bxe_send_unload_done(struct bxe_softc *sc, uint8_t keep_link) { uint32_t reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; /* Report UNLOAD_DONE to MCP */ if (!BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } } static int bxe_func_wait_started(struct bxe_softc *sc) { int tout = 50; if (!sc->port.pmf) { return (0); } /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantees us that attention handling started * 3. Wait, that TX disable/enable transaction completes * * 1+2 guarantee that if DCBX attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we already * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* XXX make sure default SB ISR is done */ /* need a way to synchronize an irq (intr_mtx?) */ /* XXX flush any work queues */ while (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED && tout--) { DELAY(20000); } if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit. */ struct ecore_func_state_params func_params = { NULL }; BLOGE(sc, "Unexpected function state! " "Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &sc->func_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_STOPPED */ func_params.cmd = ECORE_F_CMD_TX_STOP; ecore_func_state_change(sc, &func_params); /* TX_STOPPED-->STARTED */ func_params.cmd = ECORE_F_CMD_TX_START; return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_stop_queue(struct bxe_softc *sc, int index) { struct bxe_fastpath *fp = &sc->fp[index]; struct ecore_queue_state_params q_params = { NULL }; int rc; BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); q_params.q_obj = &sc->sp_objs[fp->index].q_obj; /* We want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = ECORE_Q_CMD_HALT; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...terminate the connection */ q_params.cmd = ECORE_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...delete cfc entry */ q_params.cmd = ECORE_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return (ecore_queue_state_change(sc, &q_params)); } /* wait for the outstanding SP commands */ static inline uint8_t bxe_wait_sp_comp(struct bxe_softc *sc, unsigned long mask) { unsigned long tmp; int tout = 5000; /* wait for 5 secs tops */ while (tout--) { mb(); if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { return (TRUE); } DELAY(1000); } mb(); tmp = atomic_load_acq_long(&sc->sp_state); if (tmp & mask) { BLOGE(sc, "Filtering completion timed out: " "sp_state 0x%lx, mask 0x%lx\n", tmp, mask); return (FALSE); } return (FALSE); } static int bxe_func_stop(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_STOP; /* * Try to stop the function the 'good way'. If it fails (in case * of a parity error during bxe_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_reset_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; /* Prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return (ecore_func_state_change(sc, &func_params)); } static void bxe_int_disable_sync(struct bxe_softc *sc, int disable_hw) { if (disable_hw) { /* prevent the HW from sending interrupts */ bxe_int_disable(sc); } /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ /* make sure all ISRs are done */ /* XXX make sure sp_task is not running */ /* cancel and flush work queues */ } static void bxe_chip_cleanup(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { int port = SC_PORT(sc); struct ecore_mcast_ramrod_params rparam = { NULL }; uint32_t reset_code; int i, rc = 0; bxe_drain_tx_queues(sc); /* give HW time to discard old tx messages */ DELAY(1000); /* Clean all ETH MACs */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); } /* Clean up UC list */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); if (rc < 0) { BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); } /* Disable LLH */ if (!CHIP_IS_E1(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } /* Set "drop all" to stop Rx */ /* * We need to take the BXE_MCAST_LOCK() here in order to prevent * a race between the completion code and this code. */ BXE_MCAST_LOCK(sc); if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); } else { bxe_set_storm_rx_mode(sc); } /* Clean up multicast configuration */ rparam.mcast_obj = &sc->mcast_obj; rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); // XXX bxe_iov_chip_cleanup(sc); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNCTION, PORT, or COMMON HW * reset. */ reset_code = bxe_send_unload_req(sc, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction */ rc = bxe_func_wait_started(sc); if (rc) { BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* * Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for (i = 0; i < sc->num_queues; i++) { if (bxe_stop_queue(sc, i)) { goto unload_error; } } /* * If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, keep_link); } static void bxe_disable_close_the_gate(struct bxe_softc *sc) { uint32_t val; int port = SC_PORT(sc); BLOGD(sc, DBG_LOAD, "Disabling 'close the gates'\n"); if (CHIP_IS_E1(sc)) { uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(sc, addr); val &= ~(0x300); REG_WR(sc, addr, val); } else { val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); } } /* * Cleans the object that have internal lists without sending * ramrods. Should be run when interrupts are disabled. */ static void bxe_squeeze_objects(struct bxe_softc *sc) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct ecore_mcast_ramrod_params rparam = { NULL }; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; int rc; /* Cleanup MACs' object first... */ /* Wait for completion of requested */ bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Perform a dry cleanup */ bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); /* Clean ETH primary MAC */ bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); } /* Cleanup UC list */ vlan_mac_flags = 0; bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); } /* Now clean mcast object... */ rparam.mcast_obj = &sc->mcast_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); /* Add a DEL command... */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } /* now wait until all pending commands are cleared */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); while (rc != 0) { if (rc < 0) { BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); return; } rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); } } /* stop the controller */ static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { uint8_t global = FALSE; uint32_t val; int i; BXE_CORE_LOCK_ASSERT(sc); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp; fp = &sc->fp[i]; fp->watchdog_timer = 0; BXE_FP_TX_LOCK(fp); BXE_FP_TX_UNLOCK(fp); } BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { /* * We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and * then ifconfig down has been issued. In this case we want to * unload and let other functions to complete a recovery * process. */ sc->recovery_state = BXE_RECOVERY_DONE; sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); } BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } /* * Nothing to do during unload if previous bxe_nic_load() * did not completed successfully - all resourses are released. */ if ((sc->state == BXE_STATE_CLOSED) || (sc->state == BXE_STATE_ERROR)) { return (0); } sc->state = BXE_STATE_CLOSING_WAITING_HALT; mb(); /* stop tx */ bxe_tx_disable(sc); sc->rx_mode = BXE_RX_MODE_NONE; /* XXX set rx mode ??? */ if (IS_PF(sc) && !sc->grcdump_done) { /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); bxe_stats_handle(sc, STATS_EVENT_STOP); bxe_save_statistics(sc); } /* wait till consumers catch up with producers in all queues */ bxe_drain_tx_queues(sc); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ if (IS_VF(sc)) { ; /* bxe_vfpf_close_vf(sc); */ } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip */ if (!sc->grcdump_done) bxe_chip_cleanup(sc, unload_mode, keep_link); } else { /* Send the UNLOAD_REQUEST to the MCP */ bxe_send_unload_req(sc, unload_mode); /* * Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global * attention once gloabl blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, FALSE); } /* * At this stage no more interrupts will arrive so we may safely clean * the queue'able objects here in case they failed to get cleaned so far. */ if (IS_PF(sc)) { bxe_squeeze_objects(sc); } /* There should be no more pending SP commands at this stage */ sc->sp_state = 0; sc->port.pmf = 0; bxe_free_fp_buffers(sc); if (IS_PF(sc)) { bxe_free_mem(sc); } bxe_free_fw_stats_mem(sc); sc->state = BXE_STATE_CLOSED; /* * Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { bxe_set_reset_in_progress(sc); /* Set RESET_IS_GLOBAL if needed */ if (global) { bxe_set_reset_global(sc); } } /* * The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ if (IS_PF(sc) && !bxe_clear_pf_load(sc) && bxe_reset_is_done(sc, SC_PATH(sc))) { bxe_disable_close_the_gate(sc); } BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); bxe_link_report(sc); return (0); } /* * Called by the OS to set various media options (i.e. link, speed, etc.) when * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". */ static int -bxe_ifmedia_update(struct ifnet *ifp) +bxe_ifmedia_update(if_t ifp) { struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); struct ifmedia *ifm; ifm = &sc->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return (EINVAL); } switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } /* * Called by the OS to get the current media status (i.e. link, speed, etc.). */ static void -bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) +bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr) { struct bxe_softc *sc = if_getsoftc(ifp); /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..." line if the IFM_AVALID flag is *NOT* set. So we need to set this flag unconditionally (irrespective of the admininistrative 'up/down' state of the interface) to ensure that the line is always displayed. */ ifmr->ifm_status = IFM_AVALID; /* Setup the default interface info. */ ifmr->ifm_active = IFM_ETHER; /* Report link down if the driver isn't running. */ - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__); BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n", __func__, sc->link_vars.link_up); return; } if (sc->link_vars.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_FDX; } else { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n", __func__); return; } ifmr->ifm_active |= sc->media; return; } static void bxe_handle_chip_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; long work = atomic_load_acq_long(&sc->chip_tq_flags); switch (work) { case CHIP_TQ_REINIT: if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { /* restart the interface */ BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } break; default: break; } } /* * Handles any IOCTL calls from the operating system. * * Returns: * 0 = Success, >0 Failure */ static int bxe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bxe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int mask = 0; int reinit = 0; int error = 0; int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); switch (command) { case SIOCSIFMTU: BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", ifr->ifr_mtu); if (sc->mtu == ifr->ifr_mtu) { /* nothing to change */ break; } if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", ifr->ifr_mtu, mtu_min, mtu_max); error = EINVAL; break; } atomic_store_rel_int((volatile unsigned int *)&sc->mtu, (unsigned long)ifr->ifr_mtu); /* atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), (unsigned long)ifr->ifr_mtu); XXX - Not sure why it needs to be atomic */ if_setmtu(ifp, ifr->ifr_mtu); reinit = 1; break; case SIOCSIFFLAGS: /* toggle the interface state up or down */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); BXE_CORE_LOCK(sc); /* check if the interface is up */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ bxe_set_rx_mode(sc); } else if(sc->state != BXE_STATE_DISABLED) { bxe_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); bxe_stop_locked(sc); } } BXE_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* add/delete multicast addresses */ BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); /* check if the interface is up */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ BXE_CORE_LOCK(sc); bxe_set_rx_mode(sc); BXE_CORE_UNLOCK(sc); } break; case SIOCSIFCAP: /* find out which capabilities have changed */ mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", mask); /* toggle the LRO capabilites enable flag */ if (mask & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); reinit = 1; } /* toggle the TXCSUM checksum capabilites enable flag */ if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle the RXCSUM checksum capabilities enable flag */ if (mask & IFCAP_RXCSUM) { if_togglecapenable(ifp, IFCAP_RXCSUM); BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle TSO4 capabilities enabled flag */ if (mask & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); } /* toggle TSO6 capabilities enabled flag */ if (mask & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); } /* toggle VLAN_HWTSO capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); } /* toggle VLAN_HWCSUM capabilities enabled flag */ if (mask & IFCAP_VLAN_HWCSUM) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); error = EINVAL; } /* toggle VLAN_MTU capabilities enable flag */ if (mask & IFCAP_VLAN_MTU) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWTAGGING capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTAGGING) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWFILTER capabilities enabled flag */ if (mask & IFCAP_VLAN_HWFILTER) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); error = EINVAL; } /* XXX not yet... * IFCAP_WOL_MAGIC */ break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* set/get interface media */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", (command & 0xff)); error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", (command & 0xff)); error = ether_ioctl(ifp, command, data); break; } if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { BLOGD(sc, DBG_LOAD | DBG_IOCTL, "Re-initializing hardware from IOCTL change\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } return (error); } static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents) { char * type; int i = 0; if (!(sc->debug & DBG_MBUF)) { return; } if (m == NULL) { BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); return; } while (m) { BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, (int)m->m_pkthdr.csum_flags, CSUM_BITS); } if (m->m_flags & M_EXT) { switch (m->m_ext.ext_type) { case EXT_CLUSTER: type = "EXT_CLUSTER"; break; case EXT_SFBUF: type = "EXT_SFBUF"; break; case EXT_JUMBOP: type = "EXT_JUMBOP"; break; case EXT_JUMBO9: type = "EXT_JUMBO9"; break; case EXT_JUMBO16: type = "EXT_JUMBO16"; break; case EXT_PACKET: type = "EXT_PACKET"; break; case EXT_MBUF: type = "EXT_MBUF"; break; case EXT_NET_DRV: type = "EXT_NET_DRV"; break; case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; case EXT_EXTREF: type = "EXT_EXTREF"; break; default: type = "UNKNOWN"; break; } BLOGD(sc, DBG_MBUF, "%02d: - m_ext: %p ext_size=%d type=%s\n", i, m->m_ext.ext_buf, m->m_ext.ext_size, type); } if (contents) { bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); } m = m->m_next; i++; } } /* * Checks to ensure the 13 bd sliding window is >= MSS for TSO. * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD * The headers comes in a separate bd in FreeBSD so 13-3=10. * Returns: 0 if OK to send, 1 if packet needs further defragmentation */ static int bxe_chktso_window(struct bxe_softc *sc, int nsegs, bus_dma_segment_t *segs, struct mbuf *m) { uint32_t num_wnds, wnd_size, wnd_sum; int32_t frag_idx, wnd_idx; unsigned short lso_mss; wnd_sum = 0; wnd_size = 10; num_wnds = nsegs - wnd_size; lso_mss = htole16(m->m_pkthdr.tso_segsz); /* * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the * first window sum of data while skipping the first assuming it is the * header in FreeBSD. */ for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { wnd_sum += htole16(segs[frag_idx].ds_len); } /* check the first 10 bd window size */ if (wnd_sum < lso_mss) { return (1); } /* run through the windows */ for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { /* subtract the first mbuf->m_len of the last wndw(-header) */ wnd_sum -= htole16(segs[wnd_idx+1].ds_len); /* add the next mbuf len to the len of our new window */ wnd_sum += htole16(segs[frag_idx].ds_len); if (wnd_sum < lso_mss) { return (1); } } return (0); } static uint8_t bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, struct mbuf *m, uint32_t *parsing_data) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; int e_hlen, ip_hlen, l4_off; uint16_t proto; if (m->m_pkthdr.csum_flags == CSUM_IP) { /* no L4 checksum offload needed */ return (0); } /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 2); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = sizeof(struct ip6_hdr); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ l4_off = (e_hlen + ip_hlen); *parsing_data |= (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; th = (struct tcphdr *)(ip + ip_hlen); /* th_off is number of 32-bit words */ *parsing_data |= ((th->th_off << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); return (l4_off + (th->th_off << 2)); /* entire header length */ } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; return (l4_off + sizeof(struct udphdr)); /* entire header length */ } else { /* XXX error stat ??? */ return (0); } } static uint8_t bxe_set_pbd_csum(struct bxe_fastpath *fp, struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; int e_hlen, ip_hlen; uint16_t proto; uint8_t hlen; uint16_t tmp_csum; uint32_t *tmp_uh; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 1); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = (sizeof(struct ip6_hdr) >> 1); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } hlen = (e_hlen >> 1); /* note that rest of global_data is indirectly zeroed here */ if (m->m_flags & M_VLANTAG) { pbd->global_data = htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); } else { pbd->global_data = htole16(hlen); } pbd->ip_hlen_w = ip_hlen; hlen += pbd->ip_hlen_w; /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { th = (struct tcphdr *)(ip + (ip_hlen << 1)); /* th_off is number of 32-bit words */ hlen += (uint16_t)(th->th_off << 1); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { uh = (struct udphdr *)(ip + (ip_hlen << 1)); hlen += (sizeof(struct udphdr) / 2); } else { /* valid case as only CSUM_IP was set */ return (0); } pbd->total_hlen_w = htole16(hlen); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; pbd->tcp_pseudo_csum = ntohs(th->th_sum); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; /* * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP * checksums and does not know anything about the UDP header and where * the checksum field is located. It only knows about TCP. Therefore * we "lie" to the hardware for outgoing UDP packets w/ checksum * offload. Since the checksum field offset for TCP is 16 bytes and * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 * bytes less than the start of the UDP header. This allows the * hardware to write the checksum in the correct spot. But the * hardware will compute a checksum which includes the last 10 bytes * of the IP header. To correct this we tweak the stack computed * pseudo checksum by folding in the calculation of the inverse * checksum for those final 10 bytes of the IP header. This allows * the correct checksum to be computed by the hardware. */ /* set pointer 10 bytes before UDP header */ tmp_uh = (uint32_t *)((uint8_t *)uh - 10); /* calculate a pseudo header checksum over the first 10 bytes */ tmp_csum = in_pseudo(*tmp_uh, *(tmp_uh + 1), *(uint16_t *)(tmp_uh + 2)); pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); } return (hlen * 2); /* entire header length, number of bytes */ } static void bxe_set_pbd_lso_e2(struct mbuf *m, uint32_t *parsing_data) { *parsing_data |= ((m->m_pkthdr.tso_segsz << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ } static void bxe_set_pbd_lso(struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; int e_hlen; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; /* get the IP and TCP header, with LSO entire header in first mbuf */ /* XXX assuming IPv4 */ ip = (struct ip *)(m->m_data + e_hlen); th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); pbd->tcp_send_seq = ntohl(th->th_seq); pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); #if 1 /* XXX IPv4 */ pbd->ip_id = ntohs(ip->ip_id); pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP))); #else /* XXX IPv6 */ pbd->tcp_pseudo_csum = ntohs(in_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htons(IPPROTO_TCP))); #endif pbd->global_data |= htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /* * Encapsulte an mbuf cluster into the tx bd chain and makes the memory * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the * controller (e.g. it has too many fragments) then the function may free * the mbuf and return to the caller. * * Returns: * 0 = Success, !0 = Failure * Note the side effect that an mbuf may be freed if it causes a problem. */ static int bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) { bus_dma_segment_t segs[32]; struct mbuf *m0; struct bxe_sw_tx_bd *tx_buf; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_total_pkt_size_bd; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod, total_pkt_size; uint8_t mac_type; int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; struct bxe_softc *sc; uint16_t tx_bd_avail; struct ether_vlan_header *eh; uint32_t pbd_e2_parsing_data = 0; uint8_t hlen = 0; int tmp_bd; int i; sc = fp->sc; M_ASSERTPKTHDR(*m_head); m0 = *m_head; rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; tx_start_bd = NULL; tx_data_bd = NULL; tx_total_pkt_size_bd = NULL; /* get the H/W pointer for packets and BDs */ pkt_prod = fp->tx_pkt_prod; bd_prod = fp->tx_bd_prod; mac_type = UNICAST_ADDRESS; /* map the mbuf into the next open DMAable memory */ tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* mapping errors */ if(__predict_false(error != 0)) { fp->eth_q_stats.tx_dma_mapping_failure++; if (error == ENOMEM) { /* resource issue, try again later */ rc = ENOMEM; } else if (error == EFBIG) { /* possibly recoverable with defragmentation */ fp->eth_q_stats.mbuf_defrag_attempts++; m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; rc = error; } } } else { /* unknown, unrecoverable mapping error */ BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); bxe_dump_mbuf(sc, m0, FALSE); rc = error; } goto bxe_tx_encap_continue; } tx_bd_avail = bxe_tx_avail(sc, fp); /* make sure there is enough room in the send queue */ if (__predict_false(tx_bd_avail < (nsegs + 2))) { /* Recoverable, try again later. */ fp->eth_q_stats.tx_hw_queue_full++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENOMEM; goto bxe_tx_encap_continue; } /* capture the current H/W TX chain high watermark */ if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < (TX_BD_USABLE - tx_bd_avail))) { fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); } /* make sure it fits in the packet window */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { /* * The mbuf may be to big for the controller to handle. If the frame * is a TSO frame we'll need to do an additional check. */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { goto bxe_tx_encap_continue; /* OK to send */ } else { fp->eth_q_stats.tx_window_violation_tso++; } } else { fp->eth_q_stats.tx_window_violation_std++; } /* lets try to defragment this mbuf and remap it */ fp->eth_q_stats.mbuf_defrag_attempts++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; /* Ugh, just drop the frame... :( */ rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; } else { /* if the chain is still too long then drop it */ if(m0->m_pkthdr.csum_flags & CSUM_TSO) { /* * in case TSO is enabled nsegs should be checked against * BXE_TSO_MAX_SEGMENTS */ if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path1_errors++; rc = ENODEV; } } else { if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path2_errors++; rc = ENODEV; } } } } } bxe_tx_encap_continue: /* Check for errors */ if (rc) { if (rc == ENOMEM) { /* recoverable try again later */ } else { fp->eth_q_stats.tx_soft_errors++; fp->eth_q_stats.mbuf_alloc_tx--; m_freem(*m_head); *m_head = NULL; } return (rc); } /* set flag according to packet type (UNICAST_ADDRESS is default) */ if (m0->m_flags & M_BCAST) { mac_type = BROADCAST_ADDRESS; } else if (m0->m_flags & M_MCAST) { mac_type = MULTICAST_ADDRESS; } /* store the mbuf into the mbuf ring */ tx_buf->m = m0; tx_buf->first_bd = fp->tx_bd_prod; tx_buf->flags = 0; /* prepare the first transmit (start) BD for the mbuf */ tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; BLOGD(sc, DBG_TX, "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); tx_start_bd->nbytes = htole16(segs[0].ds_len); total_pkt_size += tx_start_bd->nbytes; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); /* all frames have at least Start BD + Parsing BD */ nbds = nsegs + 1; tx_start_bd->nbd = htole16(nbds); if (m0->m_flags & M_VLANTAG) { tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { /* vf tx, start bd must hold the ethertype for fw to enforce it */ if (IS_VF(sc)) { /* map ethernet header to find type and header length */ eh = mtod(m0, struct ether_vlan_header *); tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); } } /* * add a parsing BD from the chain. The parsing BD is always added * though it is only used for TSO and chksum */ bd_prod = TX_BD_NEXT(bd_prod); if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_IP) { fp->eth_q_stats.tx_ofld_frames_csum_ip++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_L4_CSUM); } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_IS_UDP | ETH_TX_BD_FLAGS_L4_CSUM); } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | ETH_TX_BD_FLAGS_IS_UDP); } } if (!CHIP_IS_E1x(sc)) { pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { uint16_t global_data = 0; pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); } SET_FLAG(global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); pbd_e1x->global_data |= htole16(global_data); } /* setup the parsing BD with TSO specific info */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { fp->eth_q_stats.tx_ofld_frames_lso++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (__predict_false(tx_start_bd->nbytes > hlen)) { fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; /* split the first BD into header/data making the fw job easy */ nbds++; tx_start_bd->nbd = htole16(nbds); tx_start_bd->nbytes = htole16(hlen); bd_prod = TX_BD_NEXT(bd_prod); /* new transmit BD after the tx_parse_bd */ tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } BLOGD(sc, DBG_TX, "TSO split header size is %d (%x:%x) nbds %d\n", le16toh(tx_start_bd->nbytes), le32toh(tx_start_bd->addr_hi), le32toh(tx_start_bd->addr_lo), nbds); } if (!CHIP_IS_E1x(sc)) { bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); } else { bxe_set_pbd_lso(m0, pbd_e1x); } } if (pbd_e2_parsing_data) { pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); } /* prepare remaining BDs, start tx bd contains first seg/frag */ for (i = 1; i < nsegs ; i++) { bd_prod = TX_BD_NEXT(bd_prod); tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); tx_data_bd->nbytes = htole16(segs[i].ds_len); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } total_pkt_size += tx_data_bd->nbytes; } BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); if (tx_total_pkt_size_bd != NULL) { tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; } if (__predict_false(sc->debug & DBG_TX)) { tmp_bd = tx_buf->first_bd; for (i = 0; i < nbds; i++) { if (i == 0) { BLOGD(sc, DBG_TX, "TX Strt: %p bd=%d nbd=%d vlan=0x%x " "bd_flags=0x%x hdr_nbds=%d\n", tx_start_bd, tmp_bd, le16toh(tx_start_bd->nbd), le16toh(tx_start_bd->vlan_or_ethertype), tx_start_bd->bd_flags.as_bitfield, (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); } else if (i == 1) { if (pbd_e1x) { BLOGD(sc, DBG_TX, "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " "tcp_seq=%u total_hlen_w=%u\n", pbd_e1x, tmp_bd, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, le16toh(pbd_e1x->total_hlen_w)); } else { /* if (pbd_e2) */ BLOGD(sc, DBG_TX, "-> Parse: %p bd=%d dst=%02x:%02x:%02x " "src=%02x:%02x:%02x parsing_data=0x%x\n", pbd_e2, tmp_bd, pbd_e2->data.mac_addr.dst_hi, pbd_e2->data.mac_addr.dst_mid, pbd_e2->data.mac_addr.dst_lo, pbd_e2->data.mac_addr.src_hi, pbd_e2->data.mac_addr.src_mid, pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); } } if (i != 1) { /* skip parse db as it doesn't hold data */ tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; BLOGD(sc, DBG_TX, "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", tx_data_bd, tmp_bd, le16toh(tx_data_bd->nbytes), le32toh(tx_data_bd->addr_hi), le32toh(tx_data_bd->addr_lo)); } tmp_bd = TX_BD_NEXT(tmp_bd); } } BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); /* update TX BD producer index value for next TX */ bd_prod = TX_BD_NEXT(bd_prod); /* * If the chain of tx_bd's describing this frame is adjacent to or spans * an eth_tx_next_bd element then we need to increment the nbds value. */ if (TX_BD_IDX(bd_prod) < nbds) { nbds++; } /* don't allow reordering of writes for nbd and packets */ mb(); fp->tx_db.data.prod += nbds; /* producer points to the next free tx_bd at this point */ fp->tx_pkt_prod++; fp->tx_bd_prod = bd_prod; DOORBELL(sc, fp->index, fp->tx_db.raw); fp->eth_q_stats.tx_pkts++; /* Prevent speculative reads from getting ahead of the status block. */ bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_READ); /* Prevent speculative reads from getting ahead of the doorbell. */ bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 0, 0, BUS_SPACE_BARRIER_READ); return (0); } static void bxe_tx_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp) { struct mbuf *m = NULL; int tx_count = 0; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); /* keep adding entries while there are frames to send */ while (!if_sendq_empty(ifp)) { /* * check for any frames to send * dequeue can still be NULL even if queue is not empty */ m = if_dequeue(ifp); if (__predict_false(m == NULL)) { break; } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ if (__predict_false(bxe_tx_encap(fp, &m))) { fp->eth_q_stats.tx_encap_failures++; if (m != NULL) { /* mark the TX queue as full and return the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_queue_xoff++; } /* stop looking for more work */ break; } /* the frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners. */ if_etherbpfmtap(ifp, m); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } } /* Legacy (non-RSS) dispatch routine */ static void bxe_tx_start(if_t ifp) { struct bxe_softc *sc; struct bxe_fastpath *fp; sc = if_getsoftc(ifp); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BLOGW(sc, "Interface not running, ignoring transmit request\n"); return; } if (!sc->link_vars.link_up) { BLOGW(sc, "Interface link is down, ignoring transmit request\n"); return; } fp = &sc->fp[0]; if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { fp->eth_q_stats.tx_queue_full_return++; return; } BXE_FP_TX_LOCK(fp); bxe_tx_start_locked(sc, ifp, fp); BXE_FP_TX_UNLOCK(fp); } static int bxe_tx_mq_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp, struct mbuf *m) { struct buf_ring *tx_br = fp->tx_br; struct mbuf *next; int depth, rc, tx_count; uint16_t tx_bd_avail; rc = tx_count = 0; BXE_FP_TX_LOCK_ASSERT(fp); if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); } if (m != NULL) { rc = drbr_enqueue(ifp, tx_br, m); if (rc != 0) { fp->eth_q_stats.tx_soft_errors++; goto bxe_tx_mq_start_locked_exit; } } if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { fp->eth_q_stats.tx_request_link_down_failures++; goto bxe_tx_mq_start_locked_exit; } /* fetch the depth of the driver queue */ depth = drbr_inuse(ifp, tx_br); if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { fp->eth_q_stats.tx_max_drbr_queue_depth = depth; } /* keep adding entries while there are frames to send */ while ((next = drbr_peek(ifp, tx_br)) != NULL) { /* handle any completions if we're running low */ tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { fp->eth_q_stats.bd_avail_too_less_failures++; m_freem(next); drbr_advance(ifp, tx_br); rc = ENOBUFS; break; } } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ rc = bxe_tx_encap(fp, &next); if (__predict_false(rc != 0)) { fp->eth_q_stats.tx_encap_failures++; if (next != NULL) { /* mark the TX queue as full and save the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); drbr_putback(ifp, tx_br, next); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_frames_deferred++; } else drbr_advance(ifp, tx_br); /* stop looking for more work */ break; } /* the transmit frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners */ if_etherbpfmtap(ifp, next); drbr_advance(ifp, tx_br); } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } bxe_tx_mq_start_locked_exit: /* If we didn't drain the drbr, enqueue a task in the future to do it. */ if (!drbr_empty(ifp, tx_br)) { fp->eth_q_stats.tx_mq_not_empty++; taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); } return (rc); } static void bxe_tx_mq_start_deferred(void *arg, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; struct bxe_softc *sc = fp->sc; if_t ifp = sc->ifp; BXE_FP_TX_LOCK(fp); bxe_tx_mq_start_locked(sc, ifp, fp, NULL); BXE_FP_TX_UNLOCK(fp); } /* Multiqueue (TSS) dispatch routine. */ static int -bxe_tx_mq_start(struct ifnet *ifp, +bxe_tx_mq_start(if_t ifp, struct mbuf *m) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; int fp_index, rc; fp_index = 0; /* default is the first queue */ /* check if flowid is set */ if (BXE_VALID_FLOWID(m)) fp_index = (m->m_pkthdr.flowid % sc->num_queues); fp = &sc->fp[fp_index]; if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); } else { rc = drbr_enqueue(ifp, fp->tx_br, m); taskqueue_enqueue(fp->tq, &fp->tx_task); } return (rc); } static void -bxe_mq_flush(struct ifnet *ifp) +bxe_mq_flush(if_t ifp) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; struct mbuf *m; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->state != BXE_FP_STATE_IRQ) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; } if (fp->tx_br != NULL) { BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { m_freem(m); } BXE_FP_TX_UNLOCK(fp); } } if_qflush(ifp); } static uint16_t bxe_cid_ilt_lines(struct bxe_softc *sc) { if (IS_SRIOV(sc)) { return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); } return (L2_ILT_LINES(sc)); } static void bxe_ilt_set_info(struct bxe_softc *sc) { struct ilt_client_info *ilt_client; struct ecore_ilt *ilt = sc->ilt; uint16_t line = 0; ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bxe_cid_ilt_lines(sc); if (CNIC_SUPPORT(sc)) { line += CNIC_ILT_LINES; } ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[CDU]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(sc->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[QM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } if (CNIC_SUPPORT(sc)) { /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[SRC]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[TM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); } static void bxe_set_fp_rx_buf_size(struct bxe_softc *sc) { int i; uint32_t rx_buf_size; rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); for (i = 0; i < sc->num_queues; i++) { if(rx_buf_size <= MCLBYTES){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= MJUMPAGESIZE){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ sc->fp[i].rx_buf_size = MJUMPAGESIZE; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else { sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; } } } static int bxe_alloc_ilt_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt = (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static int bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt->lines = (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static void bxe_free_ilt_mem(struct bxe_softc *sc) { if (sc->ilt != NULL) { free(sc->ilt, M_BXE_ILT); sc->ilt = NULL; } } static void bxe_free_ilt_lines_mem(struct bxe_softc *sc) { if (sc->ilt->lines != NULL) { free(sc->ilt->lines, M_BXE_ILT); sc->ilt->lines = NULL; } } static void bxe_free_mem(struct bxe_softc *sc) { int i; for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; sc->context[i].size = 0; } ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); bxe_free_ilt_lines_mem(sc); } static int bxe_alloc_mem(struct bxe_softc *sc) { int context_size; int allocated; int i; /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: * 1. There can be multiple entities allocating memory for context - * regular L2, CNIC, and SRIOV drivers. Each separately controls * its own ILT lines. * 2. Since CDU page-size is not a single 4KB page (which is the case * for the other ILT clients), to be efficient we want to support * allocation of sub-page-size in the last entry. * 3. Context pointers are used by the driver to pass to FW / update * the context (for the other ILT clients the pointers are used just to * free the memory during unload). */ context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); for (i = 0, allocated = 0; allocated < context_size; i++) { sc->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); if (bxe_dma_alloc(sc, sc->context[i].size, &sc->context[i].vcxt_dma, "cdu context") != 0) { bxe_free_mem(sc); return (-1); } sc->context[i].vcxt = (union cdu_context *)sc->context[i].vcxt_dma.vaddr; allocated += sc->context[i].size; } bxe_alloc_ilt_lines_mem(sc); BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", sc->ilt, sc->ilt->start_line, sc->ilt->lines); { for (i = 0; i < 4; i++) { BLOGD(sc, DBG_LOAD, "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", i, sc->ilt->clients[i].page_size, sc->ilt->clients[i].start, sc->ilt->clients[i].end, sc->ilt->clients[i].client_num, sc->ilt->clients[i].flags); } } if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); bxe_free_mem(sc); return (-1); } return (0); } static void bxe_free_rx_bd_chain(struct bxe_fastpath *fp) { int i; if (fp->rx_mbuf_tag == NULL) { return; } /* free all mbufs and unload all maps */ for (i = 0; i < RX_BD_TOTAL; i++) { if (fp->rx_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map); } if (fp->rx_mbuf_chain[i].m != NULL) { m_freem(fp->rx_mbuf_chain[i].m); fp->rx_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_rx--; } } } static void bxe_free_tpa_pool(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i, max_agg_queues; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } max_agg_queues = MAX_AGG_QS(sc); /* release all mbufs and unload all DMA maps in the TPA pool */ for (i = 0; i < max_agg_queues; i++) { if (fp->rx_tpa_info[i].bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map); } if (fp->rx_tpa_info[i].bd.m != NULL) { m_freem(fp->rx_tpa_info[i].bd.m); fp->rx_tpa_info[i].bd.m = NULL; fp->eth_q_stats.mbuf_alloc_tpa--; } } } static void bxe_free_sge_chain(struct bxe_fastpath *fp) { int i; if (fp->rx_sge_mbuf_tag == NULL) { return; } /* rree all mbufs and unload all maps */ for (i = 0; i < RX_SGE_TOTAL; i++) { if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map); } if (fp->rx_sge_mbuf_chain[i].m != NULL) { m_freem(fp->rx_sge_mbuf_chain[i].m); fp->rx_sge_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_sge--; } } } static void bxe_free_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br != NULL) { /* just in case bxe_mq_flush() wasn't called */ if (mtx_initialized(&fp->tx_mtx)) { struct mbuf *m; BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) m_freem(m); BXE_FP_TX_UNLOCK(fp); } } /* free all RX buffers */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); if (fp->eth_q_stats.mbuf_alloc_rx != 0) { BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_rx); } if (fp->eth_q_stats.mbuf_alloc_sge != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_sge); } if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tpa); } if (fp->eth_q_stats.mbuf_alloc_tx != 0) { BLOGE(sc, "failed to release tx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tx); } /* XXX verify all mbufs were reclaimed */ } } static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index) { struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs, rc; rc = 0; /* allocate the new RX BD mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_rx++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_rx--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing RX BD mbuf mappings */ if (prev_index != index) { rx_buf = &fp->rx_mbuf_chain[prev_index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We only get here from bxe_rxeof() when the maximum number * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already * holds the mbuf in the prev_index so it's OK to NULL it out * here without concern of a memory leak. */ fp->rx_mbuf_chain[prev_index].m = NULL; } rx_buf = &fp->rx_mbuf_chain[index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = (prev_index != index) ? fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; rx_buf->m_map = fp->rx_mbuf_spare_map; fp->rx_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_PREREAD); rx_buf->m = m; rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue) { struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate the new TPA mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_tpa++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; m_free(m); fp->eth_q_stats.mbuf_alloc_tpa--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing TPA mbuf mapping */ if (tpa_info->bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); } /* save the mbuf and mapping info for the TPA mbuf */ map = tpa_info->bd.m_map; tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; fp->rx_tpa_info_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_PREREAD); tpa_info->bd.m = m; tpa_info->seg = segs[0]; return (rc); } /* * Allocate an mbuf and assign it to the receive scatter gather chain. The * caller must take care to save a copy of the existing mbuf in the SG mbuf * chain. */ static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index) { struct bxe_sw_rx_bd *sge_buf; struct eth_rx_sge *sge; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate a new SGE mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; return (ENOMEM); } fp->eth_q_stats.mbuf_alloc_sge++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; /* map the SGE mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_sge--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); sge_buf = &fp->rx_sge_mbuf_chain[index]; /* release any existing SGE mbuf mapping */ if (sge_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = sge_buf->m_map; sge_buf->m_map = fp->rx_sge_mbuf_spare_map; fp->rx_sge_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_PREREAD); sge_buf->m = m; sge = &fp->rx_sge_chain[index]; sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static __noinline int bxe_alloc_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, j, rc = 0; int ring_prod, cqe_ring_prod; int max_agg_queues; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; ring_prod = cqe_ring_prod = 0; fp->rx_bd_cons = 0; fp->rx_cq_cons = 0; /* allocate buffers for the RX BDs in RX BD chain */ for (j = 0; j < sc->max_rx_bufs; j++) { rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", i, rc); goto bxe_alloc_fp_buffers_error; } ring_prod = RX_BD_NEXT(ring_prod); cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); } fp->rx_bd_prod = ring_prod; fp->rx_cq_prod = cqe_ring_prod; fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; max_agg_queues = MAX_AGG_QS(sc); fp->tpa_enable = TRUE; /* fill the TPA pool */ for (j = 0; j < max_agg_queues; j++) { rc = bxe_alloc_rx_tpa_mbuf(fp, j); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", i, j); fp->tpa_enable = FALSE; goto bxe_alloc_fp_buffers_error; } fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; } if (fp->tpa_enable) { /* fill the RX SGE chain */ ring_prod = 0; for (j = 0; j < RX_SGE_USABLE; j++) { rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", i, ring_prod); fp->tpa_enable = FALSE; ring_prod = 0; goto bxe_alloc_fp_buffers_error; } ring_prod = RX_SGE_NEXT(ring_prod); } fp->rx_sge_prod = ring_prod; } } return (0); bxe_alloc_fp_buffers_error: /* unwind what was already allocated */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); return (ENOBUFS); } static void bxe_free_fw_stats_mem(struct bxe_softc *sc) { bxe_dma_free(sc, &sc->fw_stats_dma); sc->fw_stats_num = 0; sc->fw_stats_req_size = 0; sc->fw_stats_req = NULL; sc->fw_stats_req_mapping = 0; sc->fw_stats_data_size = 0; sc->fw_stats_data = NULL; sc->fw_stats_data_mapping = 0; } static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc) { uint8_t num_queue_stats; int num_groups; /* number of queues for statistics is number of eth queues */ num_queue_stats = BXE_NUM_ETH_QUEUES(sc); /* * Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + num of queues */ sc->fw_stats_num = (2 + num_queue_stats); /* * Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT * rules. The real number or requests is configured in the * stats_query_header. */ num_groups = ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", sc->fw_stats_num, num_groups); sc->fw_stats_req_size = (sizeof(struct stats_query_header) + (num_groups * sizeof(struct stats_query_cmd_group))); /* * Data for statistics requests + stats_counter. * stats_counter holds per-STORM counters that are incremented when * STORM has finished with the current request. Memory for FCoE * offloaded statistics are counted anyway, even if they will not be sent. * VF stats are not accounted for here as the data of VF stats is stored * in memory allocated by the VF, not here. */ sc->fw_stats_data_size = (sizeof(struct stats_counter) + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + /* sizeof(struct fcoe_statistics_params) + */ (sizeof(struct per_queue_stats) * num_queue_stats)); if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), &sc->fw_stats_dma, "fw stats") != 0) { bxe_free_fw_stats_mem(sc); return (-1); } /* set up the shortcuts */ sc->fw_stats_req = (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; sc->fw_stats_data = (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + sc->fw_stats_req_size); sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + sc->fw_stats_req_size); BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", (uintmax_t)sc->fw_stats_req_mapping); BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", (uintmax_t)sc->fw_stats_data_mapping); return (0); } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active * function on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not * for just the one belonging to its engine). */ #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff #define BXE_PATH0_LOAD_CNT_SHIFT 0 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BXE_PATH1_LOAD_CNT_SHIFT 8 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 #define BXE_GLOBAL_RESET_BIT 0x00040000 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_set_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_clear_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ static uint8_t bxe_reset_is_global(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; } /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ static void bxe_set_reset_done(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ static void bxe_set_reset_in_progress(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? FALSE : TRUE; } /* get the load status for an engine, should be run under rtnl lock */ static uint8_t bxe_get_load_status(struct bxe_softc *sc, int engine) { uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); val = ((val & mask) >> shift); BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); return (val != 0); } /* set pf load mark */ /* XXX needs to be under rtnl lock */ static void bxe_set_pf_load(struct bxe_softc *sc) { uint32_t val; uint32_t val1; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); /* get the current counter value */ val1 = ((val & mask) >> shift); /* set bit of this PF */ val1 |= (1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear pf load mark */ /* XXX needs to be under rtnl lock */ static uint8_t bxe_clear_pf_load(struct bxe_softc *sc) { uint32_t val1, val; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); return (val1 != 0); } /* send load requrest to mcp and analyze response */ static int bxe_nic_load_request(struct bxe_softc *sc, uint32_t *load_code) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); /* get the current FW pulse sequence */ sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & DRV_PULSE_SEQ_MASK); BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", sc->fw_drv_pulse_wr_seq); /* load request */ (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); /* if the MCP fails to respond we must abort */ if (!(*load_code)) { BLOGE(sc, "MCP response failure!\n"); return (-1); } /* if MCP refused then must abort */ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { BLOGE(sc, "MCP refused load request\n"); return (-1); } return (0); } /* * Check whether another PF has already loaded FW to chip. In virtualized * environments a pf from anoth VM may have already initialized the device * including loading FW. */ static int bxe_nic_load_analyze_req(struct bxe_softc *sc, uint32_t load_code) { uint32_t my_fw, loaded_fw; /* is another pf loaded on this engine? */ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { /* build my FW version dword */ my_fw = (BCM_5710_FW_MAJOR_VERSION + (BCM_5710_FW_MINOR_VERSION << 8 ) + (BCM_5710_FW_REVISION_VERSION << 16) + (BCM_5710_FW_ENGINEERING_VERSION << 24)); /* read loaded FW from chip */ loaded_fw = REG_RD(sc, XSEM_REG_PRAM); BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", loaded_fw, my_fw); /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", loaded_fw, my_fw); return (-1); } } return (0); } /* mark PMF if applicable */ static void bxe_nic_load_pmf(struct bxe_softc *sc, uint32_t load_code) { uint32_t ncsi_oem_data_addr; if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { /* * Barrier here for ordering between the writing to sc->port.pmf here * and reading it from the periodic task. */ sc->port.pmf = 1; mb(); } else { sc->port.pmf = 0; } BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); /* XXX needed? */ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); if (ncsi_oem_data_addr) { REG_WR(sc, (ncsi_oem_data_addr + offsetof(struct glob_ncsi_oem_data, driver_version)), 0); } } } } static void bxe_read_mf_cfg(struct bxe_softc *sc) { int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); int abs_func; int vn; if (BXE_NOMCP(sc)) { return; /* what should be the default bvalue in this case */ } /* * The formula for computing the absolute function number is... * For 2 port configuration (4 functions per port): * abs_func = 2 * vn + SC_PORT + SC_PATH * For 4 port configuration (2 functions per port): * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH */ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); if (abs_func >= E1H_FUNC_MAX) { break; } sc->devinfo.mf_info.mf_config[vn] = MFCFG_RD(sc, func_mf_config[abs_func].config); } if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; } else { BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; } } /* acquire split MCP access lock register */ static int bxe_acquire_alr(struct bxe_softc *sc) { uint32_t j, val; for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(sc, GRCBASE_MCP + 0x9c, val); val = REG_RD(sc, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; DELAY(5000); } if (!(val & (1L << 31))) { BLOGE(sc, "Cannot acquire MCP access lock register\n"); return (-1); } return (0); } /* release split MCP access lock register */ static void bxe_release_alr(struct bxe_softc *sc) { REG_WR(sc, GRCBASE_MCP + 0x9c, 0); } static void bxe_fan_failure(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ BLOGW(sc, "Fan Failure has caused the driver to shutdown " "the card to prevent permanent damage. " "Please contact OEM Support for assistance\n"); /* XXX */ #if 1 bxe_panic(sc, ("Schedule task to handle fan failure\n")); #else /* * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); schedule_delayed_work(&sc->sp_rtnl_task, 0); #endif } /* this function is called upon a link interrupt */ static void bxe_link_attn(struct bxe_softc *sc) { uint32_t pause_enabled = 0; struct host_port_stats *pstats; int cmng_fns; struct bxe_fastpath *fp; int i; /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(sc) && sc->dropless_fc) { pause_enabled = 0; if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { pause_enabled = 1; } REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), pause_enabled); } if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { pstats = BXE_SP(sc, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (sc->state == BXE_STATE_OPEN) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); /* Restart tx when the link comes back. */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; taskqueue_enqueue(fp->tq, &fp->tx_task); } } } if (sc->link_vars.link_up && sc->link_vars.line_speed) { cmng_fns = bxe_get_cmng_fns_mode(sc); if (cmng_fns != CMNG_FNS_NONE) { bxe_cmng_fns_init(sc, FALSE, cmng_fns); storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } else { /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); } } bxe_link_report_locked(sc); if (IS_MF(sc)) { ; // XXX bxe_link_sync_notify(sc); } } static void bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted) { int port = SC_PORT(sc); uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; uint32_t aeu_mask; uint32_t nig_mask = 0; uint32_t reg_addr; uint32_t igu_acked; uint32_t cnt; if (sc->attn_state & asserted) { BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, aeu_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, aeu_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state |= asserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bxe_acquire_phy_lock(sc); /* save nig interrupt mask */ nig_mask = REG_RD(sc, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update function */ if (nig_mask) { REG_WR(sc, nig_int_mask_addr, 0); bxe_link_attn(sc); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) { BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); } if (asserted & GPIO_2_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); } if (asserted & GPIO_3_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); } if (asserted & GPIO_4_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); } if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* hardwired */ if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); } BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", asserted, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { /* * Verify that IGU ack through BAR was written before restoring * NIG mask. This loop should exit after 2-3 iterations max. */ if (sc->devinfo.int_block != INT_BLOCK_HC) { cnt = 0; do { igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO)); if (!igu_acked) { BLOGE(sc, "Failed to verify IGU ack on time\n"); } mb(); } REG_WR(sc, nig_int_mask_addr, nig_mask); bxe_release_phy_lock(sc); } } static void bxe_print_next_block(struct bxe_softc *sc, int idx, const char *blk) { BLOGI(sc, "%s%s", idx ? ", " : "", blk); } static int bxe_check_blocks_with_parity0(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity1(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { int i = 0; uint32_t cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity2(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity3(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP ROM"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP RX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP TX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP SCPAD"); *global = TRUE; break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity4(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static uint8_t bxe_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print, uint32_t *sig) { int par_num = 0; if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { BLOGE(sc, "Parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); if (print) BLOGI(sc, "Parity errors detected in blocks: "); par_num = bxe_check_blocks_with_parity0(sc, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bxe_check_blocks_with_parity1(sc, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bxe_check_blocks_with_parity2(sc, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bxe_check_blocks_with_parity3(sc, sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bxe_check_blocks_with_parity4(sc, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) BLOGI(sc, "\n"); if( *global == TRUE ) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); } return (TRUE); } return (FALSE); } static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print) { struct attn_route attn = { {0} }; int port = SC_PORT(sc); if(sc->state != BXE_STATE_OPEN) return FALSE; attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); /* * Since MCP attentions can't be disabled inside the block, we need to * read AEU registers to see whether they're currently disabled */ attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & MISC_AEU_ENABLE_MCP_PRTY_BITS) | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return (bxe_parity_attn(sc, global, print, attn.sig)); } static void bxe_attn_int_deasserted4(struct bxe_softc *sc, uint32_t attn) { uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); err_flg = TRUE; if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); BLOGE(sc, "ATC hw attention 0x%08x\n", val); err_flg = TRUE; if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BLOGE(sc, "FATAL parity attention set4 0x%08x\n", (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); err_flg = TRUE; } if (err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_e1h_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); bxe_tx_disable(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bxe_e1h_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); // XXX bxe_tx_enable(sc); } /* * called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static void bxe_config_mf_bw(struct bxe_softc *sc) { if (sc->link_vars.link_up) { bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); // XXX bxe_link_sync_notify(sc); } storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } static void bxe_set_mf_bw(struct bxe_softc *sc) { bxe_config_mf_bw(sc); bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bxe_handle_eee_event(struct bxe_softc *sc) { BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bxe_drv_info_ether_stat(struct bxe_softc *sc) { struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; strlcpy(ether_stat->version, BXE_DRIVER_VERSION, ETH_STAT_INFO_VERSION_LEN); /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local + MAC_PAD, MAC_PAD, ETH_ALEN); ether_stat->mtu_size = sc->mtu; ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; } // XXX ether_stat->feature_flags |= ???; ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; ether_stat->txq_size = sc->tx_ring_size; ether_stat->rxq_size = sc->rx_ring_size; } static void bxe_handle_drv_info_req(struct bxe_softc *sc) { enum drv_info_opcode op_code; uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT); memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bxe_drv_info_ether_stat(sc); break; case FCOE_STATS_OPCODE: case ISCSI_STATS_OPCODE: default: /* if op code isn't supported - send NACK */ bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* * If we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(sc, drv_info_host_addr_lo, U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); SHMEM2_WR(sc, drv_info_host_addr_hi, U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event) { BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the sc->flags can change so it is done without any * locks */ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; bxe_e1h_disable(sc); } else { BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; bxe_e1h_enable(sc); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bxe_config_mf_bw(sc); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); else bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); } static void bxe_pmf_update(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; sc->port.pmf = 1; BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * sc->port.pmf here and reading it from the bxe_periodic_task(). */ mb(); /* queue a periodic task */ // XXX schedule task... // XXX bxe_dcbx_pmf_update(sc); /* enable nig attention */ val = (0xff0f | (1 << (SC_VN(sc) + 4))); if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); } bxe_stats_handle(sc, STATS_EVENT_PMF); } static int bxe_mc_assert(struct bxe_softc *sc) { char last_idx; int i, rc = 0; uint32_t row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return (rc); } static void bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn) { int func = SC_FUNC(sc); uint32_t val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BXE_PMF_LINK_ASSERT(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bxe_read_mf_cfg(sc); sc->devinfo.mf_info.mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bxe_set_mf_bw(sc); if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); if (sc->link_vars.periodic_flags & ELINK_PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bxe_acquire_phy_lock(sc); sc->link_vars.periodic_flags &= ~ELINK_PERIODIC_FLAGS_LINK_EVENT; bxe_release_phy_lock(sc); if (IS_MF(sc)) ; // XXX bxe_link_sync_notify(sc); bxe_link_report(sc); } /* * Always call it here: bxe_link_report() will * prevent the link indication duplication. */ bxe_link_status_update(sc); } else if (attn & BXE_MC_ASSERT_BITS) { BLOGE(sc, "MC assert!\n"); bxe_mc_assert(sc); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } else if (attn & BXE_MCP_ASSERT) { BLOGE(sc, "MCP assert!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_int_disable(sc); /*avoid repetive assert alert */ } else { BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); } } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); if (attn & BXE_GRC_TIMEOUT) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); BLOGE(sc, "GRC time-out 0x%08x\n", val); } if (attn & BXE_GRC_RSV) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); BLOGE(sc, "GRC reserved 0x%08x\n", val); } REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } static void bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val0, mask0, val1, mask1; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); BLOGE(sc, "CFC hw attention 0x%08x\n", val); /* CFC error attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from CFC\n"); err_flg = TRUE; } } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) { BLOGE(sc, "FATAL error from PXP\n"); err_flg = TRUE; } if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); err_flg = TRUE; } } #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT if (attn & AEU_PXP2_HW_INT_BIT) { /* CQ47854 workaround do not panic on * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR */ if (!CHIP_IS_E1x(sc)) { mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); /* * If the only PXP2_EOP_ERROR_BIT is set in * STS0 and STS1 - clear it * * probably we lose additional attentions between * STS0 and STS_CLR0, in this case user will not * be notified about them */ if (val0 & mask0 & PXP2_EOP_ERROR_BIT && !(val1 & mask1)) val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); /* print the register, since no one can restore it */ BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); /* * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR * then notify */ if (val0 & PXP2_EOP_ERROR_BIT) { BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); err_flg = TRUE; /* * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is * set then clear attention from PXP2 block without panic */ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && ((val1 & mask1) == 0)) attn &= ~AEU_PXP2_HW_INT_BIT; } } } if (attn & HW_INTERRUT_ASSERT_SET_2) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set2 0x%x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set2\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); BLOGE(sc, "DB hw attention 0x%08x\n", val); /* DORQ discard attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from DORQ\n"); err_flg = TRUE; } } if (attn & HW_INTERRUT_ASSERT_SET_1) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set1\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(sc, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_offset, val); BLOGW(sc, "SPIO5 hw attention\n"); /* Fan failure attention */ elink_hw_reset_phy(&sc->link_params); bxe_fan_failure(sc); } if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_handle_module_detect_int(&sc->link_params); bxe_release_phy_lock(sc); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(sc, reg_offset, val); BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", (attn & HW_INTERRUT_ASSERT_SET_0))); } } static void bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted) { struct attn_route attn; struct attn_route *group_mask; int port = SC_PORT(sc); int index; uint32_t reg_addr; uint32_t val; uint32_t aeu_mask; uint8_t global = FALSE; /* * Need to take HW lock because MCP or other port might also * try to handle this event. */ bxe_acquire_alr(sc); if (bxe_chk_parity_attn(sc, &global, TRUE)) { /* XXX * In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ // XXX schedule a recovery task... /* disable HW interrupts */ bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_release_alr(sc); return; } attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(sc)) { attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); } else { attn.sig[4] = 0; } BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &sc->attn_group[index]; BLOGD(sc, DBG_INTR, "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); } } bxe_release_alr(sc); if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); } val = ~deasserted; BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", val, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, val); if (~sc->attn_state & deasserted) { BLOGE(sc, "IGU error\n"); } reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, reg_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, reg_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state &= ~deasserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); } static void bxe_attn_int(struct bxe_softc *sc) { /* read local copy of bits */ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); uint32_t attn_state = sc->attn_state; /* look for changed bits */ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; uint32_t deasserted = ~attn_bits & attn_ack & attn_state; BLOGD(sc, DBG_INTR, "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { BLOGE(sc, "BAD attention state\n"); } /* handle bits that were raised */ if (asserted) { bxe_attn_int_asserted(sc, asserted); } if (deasserted) { bxe_attn_int_deasserted(sc, deasserted); } } static uint16_t bxe_update_dsb_idx(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; uint16_t rc = 0; mb(); /* status block is written to by the chip */ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BXE_DEF_SB_ATT_IDX; } if (sc->def_idx != def_sb->sp_sb.running_index) { sc->def_idx = def_sb->sp_sb.running_index; rc |= BXE_DEF_SB_IDX; } mb(); return (rc); } static inline struct ecore_queue_sp_obj * bxe_cid_to_q_obj(struct bxe_softc *sc, uint32_t cid) { BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); } static void bxe_handle_mcast_eqe(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* clear pending state for the last command */ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); /* if there are pending mcast commands - send them */ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } BXE_MCAST_UNLOCK(sc); } static void bxe_handle_classification_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; struct ecore_vlan_mac_obj *vlan_mac_obj; /* always push next commands out, don't wait here */ bit_set(&ramrod_flags, RAMROD_CONT); switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { case ECORE_FILTER_MAC_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); vlan_mac_obj = &sc->sp_objs[cid].mac_obj; break; case ECORE_FILTER_MCAST_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); /* * This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bxe_handle_mcast_eqe(sc); return; default: BLOGE(sc, "Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); } else if (rc > 0) { BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); } } static void bxe_handle_rx_mode_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); /* send rx_mode command again if was requested */ if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } } static void bxe_update_eq_prod(struct bxe_softc *sc, uint16_t prod) { storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); wmb(); /* keep prod updates ordered */ } static void bxe_eq_int(struct bxe_softc *sc) { uint16_t hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; uint8_t echo; uint32_t cid; uint8_t opcode; int spqe_cnt = 0; struct ecore_queue_sp_obj *q_obj; struct ecore_func_sp_obj *f_obj = &sc->func_obj; struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; hw_cons = le16toh(*sc->eq_cons_sb); /* * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get to the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { hw_cons++; } /* * This function may never run in parallel with itself for a * specific sc and no need for a read memory barrier here. */ sw_cons = sc->eq_cons; sw_prod = sc->eq_prod; BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &sc->eq[EQ_DESC(sw_cons)]; /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", sc->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* we may want to verify here that the sc state is HALTING */ BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); q_obj = bxe_cid_to_q_obj(sc, cid); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { break; } goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: echo = elem->message.data.function_update_event.echo; if (echo == SWITCH_UPDATE) { BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_SWITCH_UPDATE)) { break; } } else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); } goto next_spqe; case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_SETUP_TX_ONLY)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { break; } goto next_spqe; } switch (opcode | sc->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); bxe_handle_classification_eqe(sc, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got mcast ramrod\n"); bxe_handle_mcast_eqe(sc); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); bxe_handle_rx_mode_eqe(sc, elem); break; default: /* unknown event log error and continue */ BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", elem->message.opcode, sc->state); } next_spqe: spqe_cnt++; } /* for */ mb(); atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); sc->eq_cons = sw_cons; sc->eq_prod = sw_prod; /* make sure that above mem writes were issued towards the memory */ wmb(); /* update producer */ bxe_update_eq_prod(sc, sc->eq_prod); } static void bxe_handle_sp_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; uint16_t status; BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); /* what work needs to be performed? */ status = bxe_update_dsb_idx(sc); BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); /* HW attentions */ if (status & BXE_DEF_SB_ATT_IDX) { BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); bxe_attn_int(sc); status &= ~BXE_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BXE_DEF_SB_IDX) { /* handle EQ completions */ BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); bxe_eq_int(sc); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, le16toh(sc->def_idx), IGU_INT_NOP, 1); status &= ~BXE_DEF_SB_IDX; } /* if status is non zero then something went wrong */ if (__predict_false(status)) { BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); } /* ack status block only if something was actually handled */ bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); /* * Must be called after the EQ processing (since eq leads to sriov * ramrod completion flows). * This flow may have been scheduled by the arrival of a ramrod * completion, or by the sriov code rescheduling itself. */ // XXX bxe_iov_sp_task(sc); } static void bxe_handle_fp_tq(void *context, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)context; struct bxe_softc *sc = fp->sc; /* uint8_t more_tx = FALSE; */ uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); /* XXX * IFF_DRV_RUNNING state can't be checked here since we process * slowpath events on a client queue during setup. Instead * we need to add a "process/continue" flag here that the driver * can use to tell the task here not to do anything. */ #if 0 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { return; } #endif /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); /* more_tx = */ bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } static void bxe_task_fp(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; /* uint8_t more_tx = FALSE; */ uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); /* more_tx = */ bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do, bail out if this ISR and process later */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } /* * Here we write the fastpath index taken before doing any tx or rx work. * It is very well possible other hw events occurred up to this point and * they were actually processed accordingly above. Since we're going to * write an older fastpath index, an interrupt is coming which we might * not do any work in. */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* * Legacy interrupt entry point. * * Verifies that the controller generated the interrupt and * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ static void bxe_intr_legacy(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t status, mask; int i; BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 * bit 0 = attention status block * bit 1 = fast path status block * a mask of 0x2 or more = tx/rx event * a mask of 1 = slow path event */ status = bxe_ack_int(sc); /* the interrupt is not for us */ if (__predict_false(status == 0)) { BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); return; } BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); if (status & mask) { /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); status &= ~mask; } } if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); status &= ~0x1; } if (__predict_false(status)) { BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); } } /* slowpath interrupt entry point */ static void bxe_intr_sp(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); } /* fastpath interrupt entry point */ static void bxe_intr_fp(void *xfp) { struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; struct bxe_softc *sc = fp->sc; BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); BLOGD(sc, DBG_INTR, "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); } /* Release all interrupts allocated by the driver. */ static void bxe_interrupt_free(struct bxe_softc *sc) { int i; switch (sc->interrupt_mode) { case INTR_MODE_INTX: BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); if (sc->intr[0].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[0].rid, sc->intr[0].resource); } break; case INTR_MODE_MSI: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; case INTR_MODE_MSIX: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; default: /* nothing to do as initial allocation failed */ break; } } /* * This function determines and allocates the appropriate * interrupt based on system capabilites and user request. * * The user may force a particular interrupt mode, specify * the number of receive queues, specify the method for * distribuitng received frames to receive queues, or use * the default settings which will automatically select the * best supported combination. In addition, the OS may or * may not support certain combinations of these settings. * This routine attempts to reconcile the settings requested * by the user with the capabilites available from the system * to select the optimal combination of features. * * Returns: * 0 = Success, !0 = Failure. */ static int bxe_interrupt_alloc(struct bxe_softc *sc) { int msix_count = 0; int msi_count = 0; int num_requested = 0; int num_allocated = 0; int rid, i, j; int rc; /* get the number of available MSI/MSI-X interrupts from the OS */ if (sc->interrupt_mode > 0) { if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { msix_count = pci_msix_count(sc->dev); } if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { msi_count = pci_msi_count(sc->dev); } BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", msi_count, msix_count); } do { /* try allocating MSI-X interrupt resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSIX) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || (msix_count < 2)) { sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } /* ask for the necessary number of MSI-X vectors */ num_requested = min((sc->num_queues + 1), msix_count); BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } if (num_allocated < 2) { /* possible? */ BLOGE(sc, "MSI-X allocation less than 2!\n"); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated - 1; rid = 1; /* initial resource identifier */ /* allocate the MSI-X vectors */ for (i = 0; i < num_allocated; i++) { sc->intr[i].rid = (rid + i); if ((sc->intr[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[i].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", i, (rid + i)); for (j = (i - 1); j >= 0; j--) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[j].rid, sc->intr[j].resource); } sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); } } while (0); do { /* try allocating MSI vector resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSI) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || (msi_count < 1)) { sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } /* ask for a single MSI vector */ num_requested = 1; BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI alloc failed (%d)!\n", rc); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } if (num_allocated != 1) { /* possible? */ BLOGE(sc, "MSI allocation is not 1!\n"); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated; rid = 1; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); } while (0); do { /* try allocating INTx vector resources */ if (sc->interrupt_mode != INTR_MODE_INTX) { break; } BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); /* only one vector for INTx */ sc->intr_count = 1; sc->num_queues = 1; rid = 0; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, (RF_ACTIVE | RF_SHAREABLE))) == NULL) { BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = -1; /* Failed! */ break; } BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); } while (0); if (sc->interrupt_mode == -1) { BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); rc = 1; } else { BLOGD(sc, DBG_LOAD, "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", sc->interrupt_mode, sc->num_queues); rc = 0; } return (rc); } static void bxe_interrupt_detach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; /* release interrupt resources */ for (i = 0; i < sc->intr_count; i++) { if (sc->intr[i].resource && sc->intr[i].tag) { BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); } } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq) { taskqueue_drain(fp->tq, &fp->tq_task); taskqueue_drain(fp->tq, &fp->tx_task); while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, NULL)) taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq != NULL) { taskqueue_free(fp->tq); fp->tq = NULL; } } } if (sc->sp_tq) { taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); taskqueue_free(sc->sp_tq); sc->sp_tq = NULL; } } /* * Enables interrupts and attach to the ISR. * * When using multiple MSI/MSI-X vectors the first vector * is used for slowpath operations while all remaining * vectors are used for fastpath operations. If only a * single MSI/MSI-X vector is used (SINGLE_ISR) then the * ISR must look for both slowpath and fastpath completions. */ static int bxe_interrupt_attach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int rc = 0; int i; snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), "bxe%d_sp_tq", sc->unit); TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->sp_tq); taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ "%s", sc->sp_tq_name); for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->tq); TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, bxe_tx_mq_start_deferred, fp); taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ "%s", fp->tq_name); } /* setup interrupt handlers */ if (sc->interrupt_mode == INTR_MODE_MSIX) { BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the driver instance * to the interrupt handler for the slowpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_sp, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[0].resource, sc->intr[0].tag, "sp"); /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ /* initialize the fastpath vectors (note the first was used for sp) */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); /* * Setup the interrupt handler. Note that we pass the * fastpath context to the interrupt handler in this * case. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_fp, fp, &sc->intr[i + 1].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", (i + 1), rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[i + 1].resource, sc->intr[i + 1].tag, "fp%02d", i); /* bind the fastpath instance to a cpu */ if (sc->num_queues > 1) { bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); } fp->state = BXE_FP_STATE_IRQ; } } else if (sc->interrupt_mode == INTR_MODE_MSI) { BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); goto bxe_interrupt_attach_exit; } } bxe_interrupt_attach_exit: return (rc); } static int bxe_init_hw_common_chip(struct bxe_softc *sc); static int bxe_init_hw_common(struct bxe_softc *sc); static int bxe_init_hw_port(struct bxe_softc *sc); static int bxe_init_hw_func(struct bxe_softc *sc); static void bxe_reset_common(struct bxe_softc *sc); static void bxe_reset_port(struct bxe_softc *sc); static void bxe_reset_func(struct bxe_softc *sc); static int bxe_gunzip_init(struct bxe_softc *sc); static void bxe_gunzip_end(struct bxe_softc *sc); static int bxe_init_firmware(struct bxe_softc *sc); static void bxe_release_firmware(struct bxe_softc *sc); static struct ecore_func_sp_drv_ops bxe_func_sp_drv = { .init_hw_cmn_chip = bxe_init_hw_common_chip, .init_hw_cmn = bxe_init_hw_common, .init_hw_port = bxe_init_hw_port, .init_hw_func = bxe_init_hw_func, .reset_hw_cmn = bxe_reset_common, .reset_hw_port = bxe_reset_port, .reset_hw_func = bxe_reset_func, .gunzip_init = bxe_gunzip_init, .gunzip_end = bxe_gunzip_end, .init_fw = bxe_init_firmware, .release_fw = bxe_release_firmware, }; static void bxe_init_func_obj(struct bxe_softc *sc) { sc->dmae_ready = 0; ecore_init_func_obj(sc, &sc->func_obj, BXE_SP(sc, func_rdata), BXE_SP_MAPPING(sc, func_rdata), BXE_SP(sc, func_afex_rdata), BXE_SP_MAPPING(sc, func_afex_rdata), &bxe_func_sp_drv); } static int bxe_init_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare the parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_INIT; func_params.params.hw_init.load_phase = load_code; /* * Via a plethora of function pointers, we will eventually reach * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). */ rc = ecore_func_state_change(sc, &func_params); return (rc); } static void bxe_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len) { uint32_t i; if (!(len % 4) && !(addr % 4)) { for (i = 0; i < len; i += 4) { REG_WR(sc, (addr + i), fill); } } else { for (i = 0; i < len; i++) { REG_WR8(sc, (addr + i), fill); } } } /* writes FP SP data to FW - data_size in dwords */ static void bxe_wr_fp_sb_data(struct bxe_softc *sc, int fw_sb_id, uint32_t *sb_data_p, uint32_t data_size) { int index; for (index = 0; index < data_size; index++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + (sizeof(uint32_t) * index)), *(sb_data_p + index)); } } static void bxe_zero_fp_sb(struct bxe_softc *sc, int fw_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; uint32_t *sb_data_p; uint32_t data_size = 0; if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); } bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_SYNC_BLOCK_SIZE); } static void bxe_wr_sp_sb_data(struct bxe_softc *sc, struct hc_sp_status_block_data *sp_sb_data) { int i; for (i = 0; i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); i++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + (i * sizeof(uint32_t))), *((uint32_t *)sp_sb_data + i)); } } static void bxe_zero_sp_sb(struct bxe_softc *sc) { struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = FALSE; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static void bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } static void bxe_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); } static void bxe_init_sb(struct bxe_softc *sc, bus_addr_t busaddr, int vfid, uint8_t vf_valid, int fw_sb_id, int igu_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; uint32_t *sb_data_p; int igu_seg_id; int data_size; if (CHIP_INT_MODE_IS_BC(sc)) { igu_seg_id = HC_SEG_ACCESS_NORM; } else { igu_seg_id = IGU_SEG_ACCESS_NORM; } bxe_zero_fp_sb(sc, fw_sb_id); if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = SC_VN(sc); sb_data_e2.common.same_igu_sb_1b = TRUE; sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); sb_data_e1x.common.same_igu_sb_1b = TRUE; sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e1x.index_data); } bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); /* write indices to HW - PCI guarantees endianity of regpairs */ bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); } static inline uint8_t bxe_fp_qzone_id(struct bxe_fastpath *fp) { if (CHIP_IS_E1x(fp->sc)) { return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); } else { return (fp->cl_id); } } static inline uint32_t bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, struct bxe_fastpath *fp) { uint32_t offset = BAR_USTRORM_INTMEM; if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); } return (offset); } static void bxe_init_eth_fp(struct bxe_softc *sc, int idx) { struct bxe_fastpath *fp = &sc->fp[idx]; uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; unsigned long q_type = 0; int cos; fp->sc = sc; fp->index = idx; fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); fp->cl_id = (CHIP_IS_E1x(sc)) ? (SC_L_ID(sc) + idx) : /* want client ID same as IGU SB ID for non-E1 */ fp->igu_sb_id; fp->cl_qzone_id = bxe_fp_qzone_id(fp); /* setup sb indices */ if (!CHIP_IS_E1x(sc)) { fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; } else { fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; } /* init shortcut */ fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. */ for (cos = 0; cos < sc->max_cos; cos++) { cids[cos] = idx; } fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; /* nothing more for a VF to do */ if (IS_VF(sc)) { return; } bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, fp->fw_sb_id, fp->igu_sb_id); bxe_update_fp_sb_idx(fp); /* Configure Queue State object */ bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); ecore_init_queue_obj(sc, &sc->sp_objs[idx].q_obj, fp->cl_id, cids, sc->max_cos, SC_FUNC(sc), BXE_SP(sc, q_rdata), BXE_SP_MAPPING(sc, q_rdata), q_type); /* configure classification DBs */ ecore_init_mac_obj(sc, &sc->sp_objs[idx].mac_obj, fp->cl_id, idx, SC_FUNC(sc), BXE_SP(sc, mac_rdata), BXE_SP_MAPPING(sc, mac_rdata), ECORE_FILTER_MAC_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod) { struct ustorm_eth_rx_producers rx_prods = { 0 }; uint32_t i; /* update producers */ rx_prods.bd_prod = rx_bd_prod; rx_prods.cqe_prod = rx_cq_prod; rx_prods.sge_prod = rx_sge_prod; /* * Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. * This is only applicable for weak-ordered memory model archs such * as IA-64. The following barrier is also mandatory since FW will * assumes BDs must have buffers. */ wmb(); for (i = 0; i < (sizeof(rx_prods) / 4); i++) { REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), ((uint32_t *)&rx_prods)[i]); } wmb(); /* keep prod updates ordered */ BLOGD(sc, DBG_RX, "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); } static void bxe_init_rx_rings(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->rx_bd_cons = 0; /* * Activate the BD ring... * Warning, this will generate an interrupt (to the TSTORM) * so this can only be done after the chip is initialized */ bxe_update_rx_prod(sc, fp, fp->rx_bd_prod, fp->rx_cq_prod, fp->rx_sge_prod); if (i != 0) { continue; } if (CHIP_IS_E1(sc)) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), U64_LO(fp->rcq_dma.paddr)); REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), U64_HI(fp->rcq_dma.paddr)); } } } static void bxe_init_tx_ring_one(struct bxe_fastpath *fp) { SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.prod = 0; fp->tx_pkt_prod = 0; fp->tx_pkt_cons = 0; fp->tx_bd_prod = 0; fp->tx_bd_cons = 0; fp->eth_q_stats.tx_pkts = 0; } static inline void bxe_init_tx_rings(struct bxe_softc *sc) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_tx_ring_one(&sc->fp[i]); } } static void bxe_init_def_sb(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; bus_addr_t mapping = sc->def_sb_dma.paddr; int igu_sp_sb_index; int igu_seg_id; int port = SC_PORT(sc); int func = SC_FUNC(sc); int reg_offset, reg_offset_en5; uint64_t section; int index, sindex; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(sc)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = sc->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* attentions */ section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, atten_status_block)); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; sc->attn_state = 0; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) { sc->attn_group[index].sig[sindex] = REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); } if (!CHIP_IS_E1x(sc)) { /* * enable5 is separate from the rest of the registers, * and the address skip is 4 and not 16 between the * different groups */ sc->attn_group[index].sig[4] = REG_RD(sc, (reg_offset_en5 + (0x4 * index))); } else { sc->attn_group[index].sig[4] = 0; } } if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_offset = (port) ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; REG_WR(sc, reg_offset, U64_LO(section)); REG_WR(sc, (reg_offset + 4), U64_HI(section)); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, sp_sb)); bxe_zero_sp_sb(sc); /* PCI guarantees endianity of regpair */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = SC_VN(sc); sp_sb_data.p_func.vf_id = 0xff; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } static void bxe_init_sp_ring(struct bxe_softc *sc) { atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); sc->spq_prod_idx = 0; sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; sc->spq_prod_bd = sc->spq; sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); } static void bxe_init_eq_ring(struct bxe_softc *sc) { union event_ring_elem *elem; int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); } sc->eq_cons = 0; sc->eq_prod = NUM_EQ_DESC; sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; atomic_store_rel_long(&sc->eq_spq_left, (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), NUM_EQ_DESC) - 1)); } static void bxe_init_internal_common(struct bxe_softc *sc) { int i; /* * Zero this manually as its initialization is currently missing * in the initTool. */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 0); } if (!CHIP_IS_E1x(sc)) { REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bxe_init_internal(struct bxe_softc *sc, uint32_t load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bxe_init_internal_common(sc); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bxe_pf_init */ break; default: BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); break; } } static void storm_memset_func_cfg(struct bxe_softc *sc, struct tstorm_eth_function_common_config *tcfg, uint16_t abs_fid) { uint32_t addr; size_t size; addr = (BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); size = sizeof(struct tstorm_eth_function_common_config); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); } static void bxe_func_init(struct bxe_softc *sc, struct bxe_func_init_params *p) { struct tstorm_eth_function_common_config tcfg = { 0 }; if (CHIP_IS_E1x(sc)) { storm_memset_func_cfg(sc, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); storm_memset_func_en(sc, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(sc, p->spq_map, p->func_id); REG_WR(sc, (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); } } /* * Calculates the sum of vn_min_rates. * It's needed for further normalizing of the min_rates. * Returns: * sum of vn_min_rates. * or * 0 - if all the min_rates are 0. * In the later case fainess algorithm should be deactivated. * If all min rates are not zero then those that are zeroes will be set to 1. */ static void bxe_calc_vn_min(struct bxe_softc *sc, struct cmng_init_input *input) { uint32_t vn_cfg; uint32_t vn_min_rate; int all_zero = 1; int vn; for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { vn_cfg = sc->devinfo.mf_info.mf_config[vn]; vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100); if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { /* skip hidden VNs */ vn_min_rate = 0; } else if (!vn_min_rate) { /* If min rate is zero - set it to 100 */ vn_min_rate = DEF_MIN_RATE; } else { all_zero = 0; } input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BXE_IS_ETS_ENABLED(sc)) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); } else if (all_zero) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fariness disabled (all MIN values are zeroes)\n"); } else { input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } } static inline uint16_t bxe_extract_max_cfg(struct bxe_softc *sc, uint32_t mf_cfg) { uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); if (!max_cfg) { BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return (max_cfg); } static void bxe_calc_vn_max(struct bxe_softc *sc, int vn, struct cmng_init_input *input) { uint16_t vn_max_rate; uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; uint32_t max_cfg; if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_max_rate = 0; } else { max_cfg = bxe_extract_max_cfg(sc, vn_cfg); if (IS_MF_SI(sc)) { /* max_cfg in percents of linkspeed */ vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); } else { /* SD modes */ /* max_cfg is absolute in 100Mb units */ vn_max_rate = (max_cfg * 100); } } BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); input->vnic_max_rate[vn] = vn_max_rate; } static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type) { struct cmng_init_input input; int vn; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = sc->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { /* read mf conf from shmem */ if (read_cfg) { bxe_read_mf_cfg(sc); } /* get VN min rate and enable fairness if not 0 */ bxe_calc_vn_min(sc, &input); /* get VN max rate */ if (sc->port.pmf) { for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { bxe_calc_vn_max(sc, vn, &input); } } /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; ecore_init_cmng(&input, &sc->cmng); return; } /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); } static int bxe_get_cmng_fns_mode(struct bxe_softc *sc) { if (CHIP_REV_IS_SLOW(sc)) { return (CMNG_FNS_NONE); } if (IS_MF(sc)) { return (CMNG_FNS_MINMAX); } return (CMNG_FNS_NONE); } static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port) { int vn; int func; uint32_t addr; size_t size; addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); size = sizeof(struct cmng_struct_per_port); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { func = func_by_vn(sc, vn); addr = (BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); size = sizeof(struct rate_shaping_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); addr = (BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); size = sizeof(struct fairness_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); } } static void bxe_pf_init(struct bxe_softc *sc) { struct bxe_func_init_params func_init = { 0 }; struct event_ring_data eq_data = { { 0 } }; uint16_t flags; if (!CHIP_IS_E1x(sc)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); /* ATTN */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + (BXE_IGU_STAS_MSG_PF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* * This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = SC_FUNC(sc); func_init.func_id = SC_FUNC(sc); func_init.spq_map = sc->spq_dma.paddr; func_init.spq_prod = sc->spq_prod_idx; bxe_func_init(sc, &func_init); memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate. * There is no active link so initial link rate is set to 10Gbps. * When the link comes up the congestion management values are * re-calculated according to the actual link rate. */ sc->link_vars.line_speed = SPEED_10000; bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); /* Only the PMF sets the HW */ if (sc->port.pmf) { storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } /* init Event Queue - PCI bus guarantees correct endainity */ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); eq_data.producer = sc->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); } static void bxe_hc_int_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (single_msix) { val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(sc)) { BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(sc, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(sc)) { REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, addr, val); /* ensure that HC_CONFIG is written before leading/trailing edge config */ mb(); if (!CHIP_IS_E1(sc)) { /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); } /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_igu_int_enable(struct bxe_softc *sc) { uint32_t val; uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) { val |= IGU_PF_CONF_SINGLE_ISR_EN; } } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } /* clean previous status - need to configure igu prior to ack*/ if ((!msix) || single_msix) { REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); bxe_ack_int(sc); } val |= IGU_PF_CONF_FUNC_EN; BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); mb(); /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_int_enable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_enable(sc); } else { bxe_igu_int_enable(sc); } } static void bxe_hc_int_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); /* * In E1 we must use only PCI configuration space to disable MSI/MSIX * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC * block */ if (CHIP_IS_E1(sc)) { /* * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register * to prevent from HC sending interrupts after we exit the function */ REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mb(); REG_WR(sc, addr, val); if (REG_RD(sc, addr) != val) { BLOGE(sc, "proper val not read from HC IGU!\n"); } } static void bxe_igu_int_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mb(); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { BLOGE(sc, "proper val not read from IGU!\n"); } } static void bxe_int_disable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_disable(sc); } else { bxe_igu_int_disable(sc); } } static void bxe_nic_init(struct bxe_softc *sc, int load_code) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_eth_fp(sc, i); } rmb(); /* ensure status block indices were read */ bxe_init_rx_rings(sc); bxe_init_tx_rings(sc); if (IS_VF(sc)) { return; } /* initialize MOD_ABS interrupts */ elink_init_mod_abs_int(sc, &sc->link_vars, sc->devinfo.chip_id, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, SC_PORT(sc)); bxe_init_def_sb(sc); bxe_update_dsb_idx(sc); bxe_init_sp_ring(sc); bxe_init_eq_ring(sc); bxe_init_internal(sc, load_code); bxe_pf_init(sc); bxe_stats_init(sc); /* flush all before enabling interrupts */ mb(); bxe_int_enable(sc); /* check for SPIO5 */ bxe_attn_int_deasserted0(sc, REG_RD(sc, (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + SC_PORT(sc)*4)) & AEU_INPUTS_ATTN_BITS_SPIO5); } static inline void bxe_init_objs(struct bxe_softc *sc) { /* mcast rules must be added to tx if tx switching is enabled */ ecore_obj_type o_type = (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : ECORE_OBJ_TYPE_RX; /* RX_MODE controlling object */ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); /* multicast configuration controlling object */ ecore_init_mcast_obj(sc, &sc->mcast_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, mcast_rdata), BXE_SP_MAPPING(sc, mcast_rdata), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type); /* Setup CAM credit pools */ ecore_init_mac_credit_pool(sc, &sc->macs_pool, SC_FUNC(sc), CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); ecore_init_vlan_credit_pool(sc, &sc->vlans_pool, SC_ABS_FUNC(sc) >> 1, CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); /* RSS configuration object */ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, rss_rdata), BXE_SP_MAPPING(sc, rss_rdata), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX); } /* * Initialize the function. This must be called before sending CLIENT_SETUP * for the first client. */ static inline int bxe_func_start(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; struct ecore_func_start_params *start_params = &func_params.params.start; /* Prepare parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_START; /* Function parameters */ start_params->mf_mode = sc->devinfo.mf_info.mf_mode; start_params->sd_vlan_tag = OVLAN(sc); if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { start_params->network_cos_mode = STATIC_COS; } else { /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; } //start_params->gre_tunnel_mode = 0; //start_params->gre_tunnel_rss = 0; return (ecore_func_state_change(sc, &func_params)); } static int bxe_set_power_state(struct bxe_softc *sc, uint8_t state) { uint16_t pmcsr; /* If there is no power capability, silently succeed */ if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { BLOGW(sc, "No power capability\n"); return (0); } pmcsr = pci_read_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 2); switch (state) { case PCI_PM_D0: pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); if (pmcsr & PCIM_PSTAT_DMASK) { /* delay required during transition out of D3hot */ DELAY(20000); } break; case PCI_PM_D3hot: /* XXX if there are other clients above don't shut down the power */ /* don't shut down the power for emulation and FPGA */ if (CHIP_REV_IS_SLOW(sc)) { return (0); } pmcsr &= ~PCIM_PSTAT_DMASK; pmcsr |= PCIM_PSTAT_D3; if (sc->wol) { pmcsr |= PCIM_PSTAT_PMEENABLE; } pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), pmcsr, 4); /* * No more memory access after this point until device is brought back * to D0 state. */ break; default: BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", state, pmcsr); return (-1); } return (0); } /* return true if succeeded to acquire the lock */ static uint8_t bxe_trylock_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGD(sc, DBG_LOAD, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return (FALSE); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* try to acquire the lock */ REG_WR(sc, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (TRUE); } BLOGE(sc, "Failed to get a resource lock 0x%x func %d " "lock_status 0x%x resource_bit 0x%x\n", resource, func, lock_status, resource_bit); return (FALSE); } /* * Get the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static int bxe_get_leader_lock_resource(struct bxe_softc *sc) { if (SC_PATH(sc)) { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); } else { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); } } /* try to acquire a leader lock for current engine */ static uint8_t bxe_trylock_leader_lock(struct bxe_softc *sc) { return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } static int bxe_release_leader_lock(struct bxe_softc *sc) { return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } /* close gates #2, #3 and #4 */ static void bxe_set_234_gates(struct bxe_softc *sc, uint8_t close) { uint32_t val; /* gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(sc)) { /* #4 */ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(sc)) { /* prevent interrupts from HC on both ports */ val = REG_RD(sc, HC_REG_CONFIG_1); REG_WR(sc, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(sc, HC_REG_CONFIG_0); REG_WR(sc, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incoming interrupts in IGU */ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); wmb(); } /* poll for pending writes bit, it should get cleared in no more than 1s */ static int bxe_er_poll_igu_vq(struct bxe_softc *sc) { uint32_t cnt = 1000; uint32_t pend_bits = 0; do { pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) { break; } DELAY(1000); } while (--cnt > 0); if (cnt == 0) { BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); return (-1); } return (0); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ static void bxe_clp_reset_prep(struct bxe_softc *sc, uint32_t *magic_val) { /* Do some magic... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /* restore the value of the 'magic' bit */ static void bxe_clp_reset_done(struct bxe_softc *sc, uint32_t magic_val) { /* Restore the 'magic' bit value... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); MFCFG_WR(sc, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /* prepare for MCP reset, takes care of CLP configurations */ static void bxe_reset_mcp_prep(struct bxe_softc *sc, uint32_t *magic_val) { uint32_t shmem; uint32_t validity_offset; /* set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_prep(sc, magic_val); } /* get shmem offset */ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); /* Clear validity map flags */ if (shmem > 0) { REG_WR(sc, shmem + validity_offset, 0); } } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ static void bxe_mcp_wait_one(struct bxe_softc *sc) { /* special handling for emulation and FPGA (10 times longer) */ if (CHIP_REV_IS_SLOW(sc)) { DELAY((MCP_ONE_TIMEOUT*10) * 1000); } else { DELAY((MCP_ONE_TIMEOUT) * 1000); } } /* initialize shmem_base and waits for validity signature to appear */ static int bxe_init_shmem(struct bxe_softc *sc) { int cnt = 0; uint32_t val = 0; do { sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); if (sc->devinfo.shmem_base) { val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if (val & SHR_MEM_VALIDITY_MB) return (0); } bxe_mcp_wait_one(sc); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BLOGE(sc, "BAD MCP validity signature\n"); return (-1); } static int bxe_reset_mcp_comp(struct bxe_softc *sc, uint32_t magic_val) { int rc = bxe_init_shmem(sc); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_done(sc, magic_val); } return (rc); } static void bxe_pxp_prep(struct bxe_softc *sc) { if (!CHIP_IS_E1(sc)) { REG_WR(sc, PXP2_REG_RD_START_INIT, 0); REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); wmb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bxe_process_kill_chip_reset(struct bxe_softc *sc, uint8_t global) { uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; uint32_t global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* * Don't reset the following blocks. * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be * reset, as in 4 port device they might still be owned * by the MCP (there is only one leader per path). */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC | MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the elink code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(sc)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(sc)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(sc)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); wmb(); } static int bxe_process_kill(struct bxe_softc *sc, uint8_t global) { int cnt = 1000; uint32_t val = 0; uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; uint32_t tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); if (CHIP_IS_E3(sc)) { tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); } if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) break; DELAY(1000); } while (cnt-- > 0); if (cnt <= 0) { BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " "are still outstanding read requests after 1s! " "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return (-1); } mb(); /* Close gates #2, #3 and #4 */ bxe_set_234_gates(sc, TRUE); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { return (-1); } /* XXX indicate that "process kill" is in progress to MCP */ /* clear "unprepared" bit */ REG_WR(sc, MISC_REG_UNPREPARED, 0); mb(); /* Make sure all is written to the chip before the reset */ wmb(); /* * Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ DELAY(1000); /* Prepare to chip reset: */ /* MCP */ if (global) { bxe_reset_mcp_prep(sc, &val); } /* PXP */ bxe_pxp_prep(sc); mb(); /* reset the chip */ bxe_process_kill_chip_reset(sc, global); mb(); /* clear errors in PGB */ if (!CHIP_IS_E1(sc)) REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); /* Recover after reset: */ /* MCP */ if (global && bxe_reset_mcp_comp(sc, val)) { return (-1); } /* XXX add resetting the NO_MCP mode DB here */ /* Open the gates #2, #3 and #4 */ bxe_set_234_gates(sc, FALSE); /* XXX * IGU/AEU preparation bring back the AEU/IGU to a reset state * re-enable attentions */ return (0); } static int bxe_leader_reset(struct bxe_softc *sc) { int rc = 0; uint8_t global = bxe_reset_is_global(sc); uint32_t load_code; /* * If not going to reset MCP, load "fake" driver to reset HW while * driver is owner of the HW. */ if (!global && !BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BLOGE(sc, "MCP unexpected response, aborting\n"); rc = -1; goto exit_leader_reset2; } load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset2; } } /* try to recover after the failure */ if (bxe_process_kill(sc, global)) { BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); rc = -1; goto exit_leader_reset2; } /* * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver * state. */ bxe_set_reset_done(sc); if (global) { bxe_clear_reset_global(sc); } exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); return (rc); } /* * prepare INIT transition, parameters configured: * - HC configuration * - Queue's CDU context */ static void bxe_pf_q_prep_init(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_queue_init_params *init_params) { uint8_t cos; int cxt_index, cxt_offset; bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; init_params->tx.hc_rate = sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* CQ index among the SB indices */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; /* set maximum number of COSs supported by this queue */ init_params->max_cos = sc->max_cos; BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { /* XXX change index/cid here if ever support multiple tx CoS */ /* fp->txdata[cos]->cid */ cxt_index = fp->index / ILT_PAGE_CIDS; cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; } } /* set flags that are common for the Tx-only and not normal connections */ static unsigned long bxe_get_common_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); /* * tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ bxe_set_bit(ECORE_Q_FLG_STATS, &flags); if (zero_stats) { bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); } /* * tx only connections can support tx-switching, though their * CoS-ness doesn't survive the loopback */ if (sc->flags & BXE_TX_SWITCHING) { bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); } bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); return (flags); } static unsigned long bxe_get_q_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { unsigned long flags = 0; if (IS_MF_SD(sc)) { bxe_set_bit(ECORE_Q_FLG_OV, &flags); } if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); } if (leading) { bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); } bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } static void bxe_pf_q_prep_general(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_general_setup_params *gen_init, uint8_t cos) { gen_init->stat_id = bxe_stats_id(fp); gen_init->spcl_id = fp->cl_id; gen_init->mtu = sc->mtu; gen_init->cos = cos; } static void bxe_pf_rx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct rxq_pause_params *pause, struct ecore_rxq_setup_params *rxq_init) { uint8_t max_sge = 0; uint16_t sge_sz = 0; uint16_t tpa_agg_size = 0; pause->sge_th_lo = SGE_TH_LO(sc); pause->sge_th_hi = SGE_TH_HI(sc); /* validate SGE ring has enough to cross high threshold */ if (sc->dropless_fc && (pause->sge_th_hi + FW_PREFETCH_CNT) > (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { BLOGW(sc, "sge ring threshold limit\n"); } /* minimum max_aggregation_size is 2*MTU (two full buffers) */ tpa_agg_size = (2 * sc->mtu); if (tpa_agg_size < sc->max_aggregation_size) { tpa_agg_size = sc->max_aggregation_size; } max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); /* pause - not for e1 */ if (!CHIP_IS_E1(sc)) { pause->bd_th_lo = BD_TH_LO(sc); pause->bd_th_hi = BD_TH_HI(sc); pause->rcq_th_lo = RCQ_TH_LO(sc); pause->rcq_th_hi = RCQ_TH_HI(sc); /* validate rings have enough entries to cross high thresholds */ if (sc->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { BLOGW(sc, "rx bd ring threshold limit\n"); } if (sc->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { BLOGW(sc, "rcq ring threshold limit\n"); } pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_dma.paddr; rxq_init->sge_map = fp->rx_sge_dma.paddr; rxq_init->rcq_map = fp->rcq_dma.paddr; rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); /* * This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = SC_FUNC(sc); rxq_init->mcast_engine_id = SC_FUNC(sc); /* * Maximum number or simultaneous TPA aggregation for this Queue. * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(sc); rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; /* * configure silent vlan removal * if multi function mode is afex, then mask default vlan */ if (IS_MF_AFEX(sc)) { rxq_init->silent_removal_value = sc->devinfo.mf_info.afex_def_vlan_tag; rxq_init->silent_removal_mask = EVL_VLID_MASK; } } static void bxe_pf_tx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_txq_setup_params *txq_init, uint8_t cos) { /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. * fp->txdata[cos]->tx_dma.paddr; */ txq_init->dscr_map = fp->tx_dma.paddr; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the TSS leading client id for TX classfication to the * leading RSS client id */ txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); } /* * This function performs 2 steps in a queue state machine: * 1) RESET->INIT * 2) INIT->SETUP */ static int bxe_setup_queue(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; /* we want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* prepare the INIT parameters */ bxe_pf_q_prep_init(sc, fp, &q_params.params.init); /* Set the command */ q_params.cmd = ECORE_Q_CMD_INIT; /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } BLOGD(sc, DBG_LOAD, "init complete\n"); /* now move the Queue to the SETUP state */ memset(setup_params, 0, sizeof(*setup_params)); /* set Queue flags */ setup_params->flags = bxe_get_q_flags(sc, fp, leading); /* set general SETUP parameters */ bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bxe_pf_rx_q_prep(sc, fp, &setup_params->pause_params, &setup_params->rxq_params); bxe_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = ECORE_Q_CMD_SETUP; /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } return (rc); } static int bxe_setup_leading(struct bxe_softc *sc) { return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); } static int bxe_config_rss_pf(struct bxe_softc *sc, struct ecore_rss_config_obj *rss_obj, uint8_t config_hash) { struct ecore_config_rss_params params = { NULL }; int i; /* * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. */ params.rss_obj = rss_obj; bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); /* RSS configuration */ bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); if (rss_obj->udp_rss_v4) { bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); } if (rss_obj->udp_rss_v6) { bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); } /* Hash bits */ params.rss_result_mask = MULTI_MASK; memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ for (i = 0; i < sizeof(params.rss_key) / 4; i++) { params.rss_key[i] = arc4random(); } bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); } return (ecore_config_rss(sc, ¶ms)); } static int bxe_config_rss_eth(struct bxe_softc *sc, uint8_t config_hash) { return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); } static int bxe_init_rss_pf(struct bxe_softc *sc) { uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); int i; /* * Prepare the initial contents of the indirection table if * RSS is enabled */ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { sc->rss_conf_obj.ind_table[i] = (sc->fp->cl_id + (i % num_eth_queues)); } if (sc->udp_rss) { sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; } /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is * per-port, so if explicit configuration is needed, do it only * for a PMF. * * For 57712 and newer it's a per-function configuration. */ return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); } static int bxe_set_mac_one(struct bxe_softc *sc, uint8_t *mac, struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, unsigned long *ramrod_flags) { struct ecore_vlan_mac_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* fill in general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* fill a user request section if needed */ if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL; } rc = ecore_config_vlan_mac(sc, &ramrod_param); if (rc == ECORE_EXISTS) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); } return (rc); } static int bxe_set_eth_mac(struct bxe_softc *sc, uint8_t set) { unsigned long ramrod_flags = 0; BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return (bxe_set_mac_one(sc, sc->link_params.mac_addr, &sc->sp_objs->mac_obj, set, ECORE_ETH_MAC, &ramrod_flags)); } static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = 0; if (sc->link_params.num_phys <= 1) { return (ELINK_INT_PHY); } if (sc->link_vars.link_up) { sel_phy_idx = ELINK_EXT_PHY1; /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && (sc->link_params.phy[ELINK_EXT_PHY2].supported & ELINK_SUPPORTED_FIBRE)) sel_phy_idx = ELINK_EXT_PHY2; } else { switch (elink_phy_selection(&sc->link_params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY2; break; } } return (sel_phy_idx); } static int bxe_get_link_cfg_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); /* * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { if (sel_phy_idx == ELINK_EXT_PHY1) sel_phy_idx = ELINK_EXT_PHY2; else if (sel_phy_idx == ELINK_EXT_PHY2) sel_phy_idx = ELINK_EXT_PHY1; } return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); } static void bxe_set_requested_fc(struct bxe_softc *sc) { /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; } else { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; } } static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; default: break; } } static uint16_t bxe_get_mf_speed(struct bxe_softc *sc) { uint16_t line_speed = sc->link_vars.line_speed; if (IS_MF(sc)) { uint16_t maxCfg = bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); /* calculate the current MAX line speed limit for the MF devices */ if (IS_MF_SI(sc)) { line_speed = (line_speed * maxCfg) / 100; } else { /* SD mode */ uint16_t vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) { line_speed = vn_max_rate; } } } return (line_speed); } static void bxe_fill_report_data(struct bxe_softc *sc, struct bxe_link_report_data *data) { uint16_t line_speed = bxe_get_mf_speed(sc); memset(data, 0, sizeof(*data)); /* fill the report data with the effective line speed */ data->line_speed = line_speed; /* Link is down */ if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); } /* Full DUPLEX */ if (sc->link_vars.duplex == DUPLEX_FULL) { bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); } /* Rx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); } /* Tx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); } } /* report link status to OS, should be called under phy_lock */ static void bxe_link_report_locked(struct bxe_softc *sc) { struct bxe_link_report_data cur_data; /* reread mf_cfg */ if (IS_PF(sc) && !CHIP_IS_E1(sc)) { bxe_read_mf_cfg(sc); } /* Read the current link report info */ bxe_fill_report_data(sc, &cur_data); /* Don't report link down or exactly the same link status twice */ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &sc->last_reported_link.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags))) { return; } ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", cur_data.link_report_flags, sc->last_reported_link.link_report_flags); sc->link_cnt++; ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } else { const char *duplex; const char *flow; if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; ELINK_DEBUG_P0(sc, "link set to full duplex\n"); } else { duplex = "half"; ELINK_DEBUG_P0(sc, "link set to half duplex\n"); } /* * Handle the FC at the end so that only these flags would be * possibly set. This way we may easily check if there is no FC * enabled. */ if (cur_data.link_report_flags) { if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive & transmit"; } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive"; } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - transmit"; } else { flow = "none"; /* possible? */ } } else { flow = "none"; } if_link_state_change(sc->ifp, LINK_STATE_UP); BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", cur_data.line_speed, duplex, flow); } } static void bxe_link_report(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); bxe_link_report_locked(sc); bxe_release_phy_lock(sc); } static void bxe_link_status_update(struct bxe_softc *sc) { if (sc->state != BXE_STATE_OPEN) { return; } if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full | ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full | ELINK_SUPPORTED_1000baseT_Full | ELINK_SUPPORTED_2500baseX_Full | ELINK_SUPPORTED_10000baseT_Full | ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE | ELINK_SUPPORTED_Autoneg | ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause); sc->port.advertising[0] = sc->port.supported[0]; sc->link_params.sc = sc; sc->link_params.port = SC_PORT(sc); sc->link_params.req_duplex[0] = DUPLEX_FULL; sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; sc->link_params.req_line_speed[0] = SPEED_10000; sc->link_params.speed_cap_mask[0] = 0x7f0000; sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; if (CHIP_REV_IS_FPGA(sc)) { sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; sc->link_vars.line_speed = ELINK_SPEED_1000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); } else { sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; sc->link_vars.line_speed = ELINK_SPEED_10000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); } sc->link_vars.link_up = 1; sc->link_vars.duplex = DUPLEX_FULL; sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; if (IS_PF(sc)) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } else { bxe_stats_handle(sc, STATS_EVENT_STOP); } bxe_link_report(sc); } else { bxe_link_report(sc); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } static int bxe_initial_phy_init(struct bxe_softc *sc, int load_mode) { int rc, cfg_idx = bxe_get_link_cfg_idx(sc); uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; struct elink_params *lp = &sc->link_params; bxe_set_requested_fc(sc); if (CHIP_REV_IS_SLOW(sc)) { uint32_t bond = CHIP_BOND_ID(sc); uint32_t feat = 0; if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } else if (bond & 0x4) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } } else if (bond & 0x8) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } } /* disable EMAC for E3 and above */ if (bond & 0x2) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } sc->link_params.feature_config_flags |= feat; } bxe_acquire_phy_lock(sc); if (load_mode == LOAD_DIAG) { lp->loopback_mode = ELINK_LOOPBACK_XGXS; /* Prefer doing PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { if (lp->speed_cap_mask[cfg_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; } else { lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; } } } if (load_mode == LOAD_LOOPBACK_EXT) { lp->loopback_mode = ELINK_LOOPBACK_EXT; } rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } if (!CHIP_REV_IS_SLOW(sc)) { bxe_periodic_start(sc); } sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return (rc); } static u_int bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct ecore_mcast_list_elem *mc_mac = arg; mc_mac += cnt; mc_mac->mac = (uint8_t *)LLADDR(sdl); return (1); } static int bxe_init_mcast_macs_list(struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p) { if_t ifp = sc->ifp; int mc_count; struct ecore_mcast_list_elem *mc_mac; ECORE_LIST_INIT(&p->mcast_list); p->mcast_list_len = 0; /* XXXGL: multicast count may change later */ mc_count = if_llmaddr_count(ifp); if (!mc_count) { return (0); } mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!mc_mac) { BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac); for (int i = 0; i < mc_count; i ++) { ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list); BLOGD(sc, DBG_LOAD, "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n", mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2], mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5], mc_count); } p->mcast_list_len = mc_count; return (0); } static void bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) { struct ecore_mcast_list_elem *mc_mac = ECORE_LIST_FIRST_ENTRY(&p->mcast_list, struct ecore_mcast_list_elem, link); if (mc_mac) { /* only a single free as all mc_macs are in the same heap array */ free(mc_mac, M_DEVBUF); } } static int bxe_set_mc_list(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam = { NULL }; int rc = 0; rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* first, clear all configured multicast MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); /* Manual backport parts of FreeBSD upstream r284470. */ BXE_MCAST_UNLOCK(sc); return (rc); } /* configure a new MACs list */ rc = bxe_init_mcast_macs_list(sc, &rparam); if (rc) { BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); BXE_MCAST_UNLOCK(sc); return (rc); } /* Now add the new MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); if (rc < 0) { BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); } bxe_free_mcast_macs_list(&rparam); BXE_MCAST_UNLOCK(sc); return (rc); } struct bxe_set_addr_ctx { struct bxe_softc *sc; unsigned long ramrod_flags; int rc; }; static u_int bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct bxe_set_addr_ctx *ctx = arg; struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj; int rc; if (ctx->rc < 0) return (0); rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE, ECORE_UC_LIST_MAC, &ctx->ramrod_flags); /* do not treat adding same MAC as an error */ if (rc == -EEXIST) BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); else if (rc < 0) { BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc); ctx->rc = rc; } return (1); } static int bxe_set_uc_list(struct bxe_softc *sc) { if_t ifp = sc->ifp; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; struct bxe_set_addr_ctx ctx = { sc, 0, 0 }; int rc; /* first schedule a cleanup up of old configuration */ rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); return (rc); } if_foreach_lladdr(ifp, bxe_set_addr, &ctx); if (ctx.rc < 0) return (ctx.rc); /* Execute the pending commands */ bit_set(&ctx.ramrod_flags, RAMROD_CONT); return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, ECORE_UC_LIST_MAC, &ctx.ramrod_flags)); } static void bxe_set_rx_mode(struct bxe_softc *sc) { if_t ifp = sc->ifp; uint32_t rx_mode = BXE_RX_MODE_NORMAL; if (sc->state != BXE_STATE_OPEN) { BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); return; } BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); if (if_getflags(ifp) & IFF_PROMISC) { rx_mode = BXE_RX_MODE_PROMISC; } else if ((if_getflags(ifp) & IFF_ALLMULTI) || ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && CHIP_IS_E1(sc))) { rx_mode = BXE_RX_MODE_ALLMULTI; } else { if (IS_PF(sc)) { /* some multicasts */ if (bxe_set_mc_list(sc) < 0) { rx_mode = BXE_RX_MODE_ALLMULTI; } if (bxe_set_uc_list(sc) < 0) { rx_mode = BXE_RX_MODE_PROMISC; } } } sc->rx_mode = rx_mode; /* schedule the rx_mode command */ if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); return; } if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } } /* update flags in shmem */ static void bxe_update_drv_flags(struct bxe_softc *sc, uint32_t flags, uint32_t set) { uint32_t drv_flags; if (SHMEM2_HAS(sc, drv_flags)) { bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(sc, drv_flags); if (set) { SET_FLAGS(drv_flags, flags); } else { RESET_FLAGS(drv_flags, flags); } SHMEM2_WR(sc, drv_flags, drv_flags); BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); } } /* periodic timer callout routine, only runs when the interface is up */ static void bxe_periodic_callout_func(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; int i; if (!BXE_CORE_TRYLOCK(sc)) { /* just bail and try again next time */ if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } return; } if ((sc->state != BXE_STATE_OPEN) || (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); BXE_CORE_UNLOCK(sc); return; } /* Check for TX timeouts on any fastpath. */ FOR_EACH_QUEUE(sc, i) { if (bxe_watchdog(sc, &sc->fp[i]) != 0) { /* Ruh-Roh, chip was reset! */ break; } } if (!CHIP_REV_IS_SLOW(sc)) { /* * This barrier is needed to ensure the ordering between the writing * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and * the reading here. */ mb(); if (sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_period_func(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } } if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t drv_pulse; uint32_t mcp_pulse; ++sc->fw_drv_pulse_wr_seq; sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; drv_pulse = sc->fw_drv_pulse_wr_seq; bxe_drv_pulse(sc); mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* * The delta between driver pulse and mcp response should * be 1 (before mcp response) or 0 (after mcp response). */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } } static void bxe_periodic_start(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } static void bxe_periodic_stop(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); callout_drain(&sc->periodic_callout); } void bxe_parity_recover(struct bxe_softc *sc) { uint8_t global = FALSE; uint32_t error_recovered, error_unrecovered; if ((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running! " "Please reboot/power cycle the system.\n"); return; } while (1) { BLOGD(sc, DBG_SP, "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n", __func__, sc, sc->state, sc->recovery_state, sc->error_status); switch(sc->recovery_state) { case BXE_RECOVERY_INIT: bxe_chk_parity_attn(sc, &global, FALSE); if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) || (sc->error_status & BXE_ERR_MCP_ASSERT) || (sc->error_status & BXE_ERR_GLOBAL)) { BXE_CORE_LOCK(sc); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } bxe_nic_unload(sc, UNLOAD_RECOVERY, false); sc->state = BXE_STATE_ERROR; sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, " No Recovery tried for error 0x%x" " stack notified driver is NOT running!" " Please reboot/power cycle the system.\n", sc->error_status); BXE_CORE_UNLOCK(sc); return; } /* Try to get a LEADER_LOCK HW lock */ if (bxe_trylock_leader_lock(sc)) { bxe_set_reset_in_progress(sc); /* * Check if there is a global attention and if * there was a global attention, set the global * reset bit. */ if (global) { bxe_set_reset_global(sc); } sc->is_leader = 1; } /* If interface has been removed - break */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } BXE_CORE_LOCK(sc); bxe_nic_unload(sc,UNLOAD_RECOVERY, false); sc->recovery_state = BXE_RECOVERY_WAIT; BXE_CORE_UNLOCK(sc); /* * Ensure "is_leader", MCP command sequence and * "recovery_state" update values are seen on other * CPUs. */ mb(); break; case BXE_RECOVERY_WAIT: if (sc->is_leader) { int other_engine = SC_PATH(sc) ? 0 : 1; bool other_load_status = bxe_get_load_status(sc, other_engine); bool load_status = bxe_get_load_status(sc, SC_PATH(sc)); global = bxe_reset_is_global(sc); /* * In case of a parity in a global block, let * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise * the gates will remain closed for that * engine. */ if (load_status || (global && other_load_status)) { /* * Wait until all other functions get * down. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If all other functions got down * try to bring the chip back to * normal. In any case it's an exit * point for a leader. */ if (bxe_leader_reset(sc)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running!\n"); sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; mb(); return; } /* * If we are here, means that the * leader has succeeded and doesn't * want to be a leader any more. Try * to continue as a none-leader. */ break; } } else { /* non-leader */ if (!bxe_reset_is_done(sc, SC_PATH(sc))) { /* * Try to get a LEADER_LOCK HW lock as * long as a former leader may have * been unloaded by the user or * released a leadership by another * reason. */ if (bxe_trylock_leader_lock(sc)) { /* * I'm a leader now! Restart a * switch case. */ sc->is_leader = 1; break; } taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If there was a global attention, wait * for it to be cleared. */ if (bxe_reset_is_global(sc)) { taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } error_recovered = sc->eth_stats.recoverable_error; error_unrecovered = sc->eth_stats.unrecoverable_error; BXE_CORE_LOCK(sc); sc->recovery_state = BXE_RECOVERY_NIC_LOADING; if (bxe_nic_load(sc, LOAD_NORMAL)) { error_unrecovered++; sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; BLOGE(sc, "Recovery is NOT successfull, " " state=0x%x recovery_state=0x%x error=%x\n", sc->state, sc->recovery_state, sc->error_status); sc->error_status = 0; } else { sc->recovery_state = BXE_RECOVERY_DONE; error_recovered++; BLOGI(sc, "Recovery is successfull from errors %x," " state=0x%x" " recovery_state=0x%x \n", sc->error_status, sc->state, sc->recovery_state); mb(); } sc->error_status = 0; BXE_CORE_UNLOCK(sc); sc->eth_stats.recoverable_error = error_recovered; sc->eth_stats.unrecoverable_error = error_unrecovered; return; } } default: return; } } } void bxe_handle_error(struct bxe_softc * sc) { if(sc->recovery_state == BXE_RECOVERY_WAIT) { return; } if(sc->error_status) { if (sc->state == BXE_STATE_OPEN) { bxe_int_disable(sc); } if (sc->link_vars.link_up) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } sc->recovery_state = BXE_RECOVERY_INIT; BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n", sc->unit, sc->error_status, sc->recovery_state); bxe_parity_recover(sc); } } static void bxe_sp_err_timeout_task(void *arg, int pending) { struct bxe_softc *sc = (struct bxe_softc *)arg; BLOGD(sc, DBG_SP, "%s state = 0x%x rec state=0x%x error_status=%x\n", __func__, sc->state, sc->recovery_state, sc->error_status); if((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { return; } /* if can be taken */ if ((sc->error_status) && (sc->trigger_grcdump)) { bxe_grc_dump(sc); } if (sc->recovery_state != BXE_RECOVERY_DONE) { bxe_handle_error(sc); bxe_parity_recover(sc); } else if (sc->error_status) { bxe_handle_error(sc); } return; } /* start the controller */ static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode) { uint32_t val; int load_code = 0; int i, rc = 0; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); sc->state = BXE_STATE_OPENING_WAITING_LOAD; if (IS_PF(sc)) { /* must be called before memory allocation and HW init */ bxe_ilt_set_info(sc); } sc->last_reported_link_state = LINK_STATE_UNKNOWN; bxe_set_fp_rx_buf_size(sc); if (bxe_alloc_fp_buffers(sc) != 0) { BLOGE(sc, "Failed to allocate fastpath memory\n"); sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_fw_stats_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (IS_PF(sc)) { /* set pf load just before approaching the MCP */ bxe_set_pf_load(sc); /* if MCP exists send load request and analyze response */ if (!BXE_NOMCP(sc)) { /* attempt to load pf */ if (bxe_nic_load_request(sc, &load_code) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error1; } /* what did the MCP say? */ if (bxe_nic_load_analyze_req(sc, load_code) != 0) { bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } else { BLOGI(sc, "Device has no MCP!\n"); load_code = bxe_nic_load_no_mcp(sc); } /* mark PMF if applicable */ bxe_nic_load_pmf(sc, load_code); /* Init Function state controlling object */ bxe_init_func_obj(sc); /* Initialize HW */ if (bxe_init_hw(sc, load_code) != 0) { BLOGE(sc, "HW init failed\n"); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); sc->flags |= BXE_NO_PULSE; /* attach interrupts */ if (bxe_interrupt_attach(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } bxe_nic_init(sc, load_code); /* Init per-function objects */ if (IS_PF(sc)) { bxe_init_objs(sc); // XXX bxe_iov_nic_init(sc); /* set AFEX default VLAN tag to an invalid value */ sc->devinfo.mf_info.afex_def_vlan_tag = -1; // XXX bxe_nic_load_afex_dcc(sc, load_code); sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } /* send LOAD_DONE command to MCP */ if (!BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); sc->state = BXE_STATE_ERROR; rc = ENXIO; goto bxe_nic_load_error3; } } rc = bxe_setup_leading(sc); if (rc) { BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } rc = bxe_init_rss_pf(sc); if (rc) { BLOGE(sc, "PF RSS init failed\n"); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } /* XXX VF */ /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; /* Configure a ucast MAC */ if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } if (rc) { BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; /* start fast path */ /* Initialize Rx filter */ bxe_set_rx_mode(sc); /* start the Tx */ switch (/* XXX load_mode */LOAD_OPEN) { case LOAD_NORMAL: case LOAD_OPEN: break; case LOAD_DIAG: case LOAD_LOOPBACK_EXT: sc->state = BXE_STATE_DIAG; break; default: break; } if (sc->port.pmf) { bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); } else { bxe_link_status_update(sc); } /* start the periodic timer callout */ bxe_periodic_start(sc); if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], (val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2)); } /* wait for all pending SP commands to complete */ if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); bxe_periodic_stop(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); return (ENXIO); } /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); return (0); bxe_nic_load_error3: if (IS_PF(sc)) { bxe_int_disable_sync(sc, 1); /* clean out queued objects */ bxe_squeeze_objects(sc); } bxe_interrupt_detach(sc); bxe_nic_load_error2: if (IS_PF(sc) && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } sc->port.pmf = 0; bxe_nic_load_error1: /* clear pf_load status, as it was already set */ if (IS_PF(sc)) { bxe_clear_pf_load(sc); } bxe_nic_load_error0: bxe_free_fw_stats_mem(sc); bxe_free_fp_buffers(sc); bxe_free_mem(sc); return (rc); } static int bxe_init_locked(struct bxe_softc *sc) { int other_engine = SC_PATH(sc) ? 0 : 1; uint8_t other_load_status, load_status; uint8_t global = FALSE; int rc; BXE_CORE_LOCK_ASSERT(sc); /* check if the driver is already running */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); return (0); } if((sc->state == BXE_STATE_ERROR) && (sc->recovery_state == BXE_RECOVERY_FAILED)) { BLOGE(sc, "Initialization not done, " "as previous recovery failed." "Reboot/Power-cycle the system\n" ); return (ENXIO); } bxe_set_power_state(sc, PCI_PM_D0); /* * If parity occurred during the unload, then attentions and/or * RECOVERY_IN_PROGRES may still be set. If so we want the first function * loaded on the current engine to complete the recovery. Parity recovery * is only relevant for PF driver. */ if (IS_PF(sc)) { other_load_status = bxe_get_load_status(sc, other_engine); load_status = bxe_get_load_status(sc, SC_PATH(sc)); if (!bxe_reset_is_done(sc, SC_PATH(sc)) || bxe_chk_parity_attn(sc, &global, TRUE)) { do { /* * If there are attentions and they are in global blocks, set * the GLOBAL_RESET bit regardless whether it will be this * function that will complete the recovery or not. */ if (global) { bxe_set_reset_global(sc); } /* * Only the first function on the current engine should try * to recover in open. In case of attentions in global blocks * only the first in the chip should try to recover. */ if ((!load_status && (!global || !other_load_status)) && bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { BLOGI(sc, "Recovered during init\n"); break; } /* recovery has failed... */ bxe_set_power_state(sc, PCI_PM_D3hot); sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, "Recovery flow hasn't properly " "completed yet, try again later. " "If you still see this message after a " "few retries then power cycle is required.\n"); rc = ENXIO; goto bxe_init_locked_done; } while (0); } } sc->recovery_state = BXE_RECOVERY_DONE; rc = bxe_nic_load(sc, LOAD_OPEN); bxe_init_locked_done: if (rc) { /* Tell the stack the driver is NOT running! */ BLOGE(sc, "Initialization failed, " "stack notified driver is NOT running!\n"); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); } return (rc); } static int bxe_stop_locked(struct bxe_softc *sc) { BXE_CORE_LOCK_ASSERT(sc); return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); } /* * Handles controller initialization when called from an unlocked routine. * ifconfig calls this function. * * Returns: * void */ static void bxe_init(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BXE_CORE_LOCK(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } static int bxe_init_ifnet(struct bxe_softc *sc) { if_t ifp; int capabilities; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&sc->ifmedia, IFM_IMASK, bxe_ifmedia_update, bxe_ifmedia_status); /* set the default interface values */ ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); /* allocate the ifnet structure */ if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { BLOGE(sc, "Interface allocation failed!\n"); return (ENXIO); } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, bxe_ioctl); if_setstartfn(ifp, bxe_tx_start); if_setgetcounterfn(ifp, bxe_get_counter); if_settransmitfn(ifp, bxe_tx_mq_start); if_setqflushfn(ifp, bxe_mq_flush); if_setinitfn(ifp, bxe_init); if_setmtu(ifp, sc->mtu); if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); capabilities = (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_WOL_MAGIC); if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, IF_Gbps(10)); /* XXX */ if_setsendqlen(ifp, sc->tx_ring_size); if_setsendqready(ifp); /* XXX */ sc->ifp = ifp; /* attach to the Ethernet interface list */ ether_ifattach(ifp, sc->link_params.mac_addr); /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, bxe); return (0); } static void bxe_deallocate_bars(struct bxe_softc *sc) { int i; for (i = 0; i < MAX_BARS; i++) { if (sc->bar[i].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->bar[i].rid, sc->bar[i].resource); BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", i, PCIR_BAR(i)); } } } static int bxe_allocate_bars(struct bxe_softc *sc) { u_int flags; int i; memset(sc->bar, 0, sizeof(sc->bar)); for (i = 0; i < MAX_BARS; i++) { /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ if ((i != 0) && (i != 2) && (i != 4)) { continue; } sc->bar[i].rid = PCIR_BAR(i); flags = RF_ACTIVE; if (i == 0) { flags |= RF_SHAREABLE; } if ((sc->bar[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { return (0); } sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n", i, PCIR_BAR(i), rman_get_start(sc->bar[i].resource), rman_get_end(sc->bar[i].resource), rman_get_size(sc->bar[i].resource), (uintmax_t)sc->bar[i].kva); } return (0); } static void bxe_get_function_num(struct bxe_softc *sc) { uint32_t val = 0; /* * Read the ME register to get the function number. The ME register * holds the relative-function number and absolute-function number. The * absolute-function number appears only in E2 and above. Before that * these bits always contained zero, therefore we cannot blindly use them. */ val = REG_RD(sc, BAR_ME_REGISTER); sc->pfunc_rel = (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); sc->path_id = (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); } else { sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); } BLOGD(sc, DBG_LOAD, "Relative function %d, Absolute function %d, Path %d\n", sc->pfunc_rel, sc->pfunc_abs, sc->path_id); } static uint32_t bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) { uint32_t shmem2_size; uint32_t offset; uint32_t mf_cfg_offset_value; /* Non 57712 */ offset = (SHMEM_RD(sc, func_mb) + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); /* 57712 plus */ if (sc->devinfo.shmem2_base != 0) { shmem2_size = SHMEM2_RD(sc, size); if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { offset = mf_cfg_offset_value; } } } return (offset); } static uint32_t bxe_pcie_capability_read(struct bxe_softc *sc, int reg, int width) { int pcie_reg; /* ensure PCIe capability is enabled */ if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { if (pcie_reg != 0) { BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); return (pci_read_config(sc->dev, (pcie_reg + reg), width)); } } BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); return (0); } static uint8_t bxe_is_pcie_pending(struct bxe_softc *sc) { return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND); } /* * Walk the PCI capabiites list for the device to find what features are * supported. These capabilites may be enabled/disabled by firmware so it's * best to walk the list rather than make assumptions. */ static void bxe_probe_pci_caps(struct bxe_softc *sc) { uint16_t link_status; int reg; /* check if PCI Power Management is enabled */ if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; } } link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2); /* handle PCIe 2.0 workarounds for 57710 */ if (CHIP_IS_E1(sc)) { /* workaround for 57710 errata E4_57710_27462 */ sc->devinfo.pcie_link_speed = (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; /* workaround for 57710 errata E4_57710_27488 */ sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4); if (sc->devinfo.pcie_link_speed > 1) { sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1; } } else { sc->devinfo.pcie_link_speed = (link_status & PCIEM_LINK_STA_SPEED); sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4); } BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; /* check if MSI capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; } } /* check if MSI-X capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; } } } static int bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* get the outer vlan if we're in switch-dependent mode */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); mf_info->ext_id = (uint16_t)val; mf_info->multi_vnics_mode = 1; if (!VALID_OVLAN(mf_info->ext_id)) { BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); return (1); } /* get the capabilities */ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; } else { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; } mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static uint32_t bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) { uint32_t retval = 0; uint32_t val; val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { retval |= MF_PROTO_SUPPORT_ETHERNET; } if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { retval |= MF_PROTO_SUPPORT_ISCSI; } if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { retval |= MF_PROTO_SUPPORT_FCOE; } } return (retval); } static int bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* * There is no outer vlan if we're in switch-independent mode. * If the mac is valid then assume multi-function. */ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t e1hov_tag; uint32_t func_config; uint32_t niv_config; mf_info->multi_vnics_mode = 1; e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); mf_info->ext_id = (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> FUNC_MF_CFG_E1HOV_TAG_SHIFT); mf_info->default_vlan = (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> FUNC_MF_CFG_AFEX_VLAN_SHIFT); mf_info->niv_allowed_priorities = (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); mf_info->niv_default_cos = (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); mf_info->afex_vlan_mode = ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); mf_info->niv_mba_enabled = ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_check_valid_mf_cfg(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t mf_cfg1; uint32_t mf_cfg2; uint32_t ovlan1; uint32_t ovlan2; uint8_t i, j; BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", SC_PORT(sc)); BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", mf_info->mf_config[SC_VN(sc)]); BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", mf_info->multi_vnics_mode); BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", mf_info->vnics_per_port); BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", mf_info->ext_id); BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", mf_info->min_bw[0], mf_info->min_bw[1], mf_info->min_bw[2], mf_info->min_bw[3]); BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", mf_info->max_bw[0], mf_info->max_bw[1], mf_info->max_bw[2], mf_info->max_bw[3]); BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", sc->mac_addr_str); /* various MF mode sanity checks... */ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { BLOGE(sc, "Enumerated function %d is marked as hidden\n", SC_PORT(sc)); return (1); } if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", mf_info->vnics_per_port, mf_info->multi_vnics_mode); return (1); } if (mf_info->mf_mode == MULTI_FUNCTION_SD) { /* vnic id > 0 must have valid ovlan in switch-dependent mode */ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", SC_VN(sc), OVLAN(sc)); return (1); } if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", mf_info->multi_vnics_mode, OVLAN(sc)); return (1); } /* * Verify all functions are either MF or SF mode. If MF, make sure * sure that all non-hidden functions have a valid ovlan. If SF, * make sure that all non-hidden functions have an invalid ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { BLOGE(sc, "mf_mode=SD function %d MF config " "mismatch, multi_vnics_mode=%d ovlan=%d\n", i, mf_info->multi_vnics_mode, ovlan1); return (1); } } /* Verify all funcs on the same port each have a different ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); /* iterate from the next function on the port to the max func */ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && (ovlan1 == ovlan2)) { BLOGE(sc, "mf_mode=SD functions %d and %d " "have the same ovlan (%d)\n", i, j, ovlan1); return (1); } } } } /* MULTI_FUNCTION_SD */ return (0); } static int bxe_get_mf_cfg_info(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val, mac_upper; uint8_t i, vnic; /* initialize mf_info defaults */ mf_info->vnics_per_port = 1; mf_info->multi_vnics_mode = FALSE; mf_info->path_has_ovlan = FALSE; mf_info->mf_mode = SINGLE_FUNCTION; if (!CHIP_IS_MF_CAP(sc)) { return (0); } if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { BLOGE(sc, "Invalid mf_cfg_base!\n"); return (1); } /* get the MF mode (switch dependent / independent / single-function) */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); /* check for legal upper mac bytes */ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SI; } else { BLOGE(sc, "Invalid config for Switch Independent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: /* get outer vlan configuration */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SD; } else { BLOGE(sc, "Invalid config for Switch Dependent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ return (0); case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: /* * Mark MF mode as NIV if MCP version includes NPAR-SD support * and the MAC address is valid. */ mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); if ((SHMEM2_HAS(sc, afex_driver_support)) && (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { mf_info->mf_mode = MULTI_FUNCTION_AFEX; } else { BLOGE(sc, "Invalid config for AFEX mode\n"); } break; default: BLOGE(sc, "Unknown MF mode (0x%08x)\n", (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); return (1); } /* set path mf_mode (which could be different than function mf_mode) */ if (mf_info->mf_mode == MULTI_FUNCTION_SD) { mf_info->path_has_ovlan = TRUE; } else if (mf_info->mf_mode == SINGLE_FUNCTION) { /* * Decide on path multi vnics mode. If we're not in MF mode and in * 4-port mode, this is good enough to check vnic-0 of the other port * on the same path */ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { uint8_t other_port = !(PORT_ID(sc) & 1); uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; } } if (mf_info->mf_mode == SINGLE_FUNCTION) { /* invalid MF config */ if (SC_VN(sc) >= 1) { BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); return (1); } return (0); } /* get the MF configuration */ mf_info->mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); switch(mf_info->mf_mode) { case MULTI_FUNCTION_SD: bxe_get_shmem_mf_cfg_info_sd(sc); break; case MULTI_FUNCTION_SI: bxe_get_shmem_mf_cfg_info_si(sc); break; case MULTI_FUNCTION_AFEX: bxe_get_shmem_mf_cfg_info_niv(sc); break; default: BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", mf_info->mf_mode); return (1); } /* get the congestion management parameters */ vnic = 0; FOREACH_ABS_FUNC_IN_PORT(sc, i) { /* get min/max bw */ val = MFCFG_RD(sc, func_mf_config[i].config); mf_info->min_bw[vnic] = ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); mf_info->max_bw[vnic] = ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); vnic++; } return (bxe_check_valid_mf_cfg(sc)); } static int bxe_get_shmem_info(struct bxe_softc *sc) { int port; uint32_t mac_hi, mac_lo, val; port = SC_PORT(sc); mac_hi = mac_lo = 0; sc->link_params.sc = sc; sc->link_params.port = port; /* get the hardware config info */ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); sc->devinfo.hw_config2 = SHMEM_RD(sc, dev_info.shared_hw_config.config2); sc->link_params.hw_led_mode = ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); /* get the port feature config */ sc->port.config = SHMEM_RD(sc, dev_info.port_feature_config[port].config); /* get the link params */ sc->link_params.speed_cap_mask[0] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); sc->link_params.speed_cap_mask[1] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); /* get the lane config */ sc->link_params.lane_config = SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); /* get the link config */ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); sc->port.link_config[ELINK_INT_PHY] = val; sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); sc->port.link_config[ELINK_EXT_PHY1] = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); /* get the override preemphasis flag and enable it or turn it off */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } else { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } /* get the initial value of the link params */ sc->link_params.multi_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); /* get external phy info */ sc->port.ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); /* get the multifunction configuration */ bxe_get_mf_cfg_info(sc); /* get the mac address */ if (IS_MF(sc)) { mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); } else { mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); } if ((mac_lo == 0) && (mac_hi == 0)) { *sc->mac_addr_str = 0; BLOGE(sc, "No Ethernet address programmed!\n"); } else { sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), "%02x:%02x:%02x:%02x:%02x:%02x", sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } return (0); } static void bxe_get_tunable_params(struct bxe_softc *sc) { /* sanity checks */ if ((bxe_interrupt_mode != INTR_MODE_INTX) && (bxe_interrupt_mode != INTR_MODE_MSI) && (bxe_interrupt_mode != INTR_MODE_MSIX)) { BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); bxe_interrupt_mode = INTR_MODE_MSIX; } if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); bxe_queue_count = 0; } if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { if (bxe_max_rx_bufs == 0) { bxe_max_rx_bufs = RX_BD_USABLE; } else { BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); bxe_max_rx_bufs = 2048; } } if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); bxe_hc_rx_ticks = 25; } if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); bxe_hc_tx_ticks = 50; } if (bxe_max_aggregation_size == 0) { bxe_max_aggregation_size = TPA_AGG_SIZE; } if (bxe_max_aggregation_size > 0xffff) { BLOGW(sc, "invalid max_aggregation_size (%d)\n", bxe_max_aggregation_size); bxe_max_aggregation_size = TPA_AGG_SIZE; } if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); bxe_mrrs = -1; } if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); bxe_autogreeen = 0; } if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); bxe_udp_rss = 0; } /* pull in user settings */ sc->interrupt_mode = bxe_interrupt_mode; sc->max_rx_bufs = bxe_max_rx_bufs; sc->hc_rx_ticks = bxe_hc_rx_ticks; sc->hc_tx_ticks = bxe_hc_tx_ticks; sc->max_aggregation_size = bxe_max_aggregation_size; sc->mrrs = bxe_mrrs; sc->autogreeen = bxe_autogreeen; sc->udp_rss = bxe_udp_rss; if (bxe_interrupt_mode == INTR_MODE_INTX) { sc->num_queues = 1; } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ sc->num_queues = min((bxe_queue_count ? bxe_queue_count : mp_ncpus), MAX_RSS_CHAINS); if (sc->num_queues > mp_ncpus) { sc->num_queues = mp_ncpus; } } BLOGD(sc, DBG_LOAD, "User Config: " "debug=0x%lx " "interrupt_mode=%d " "queue_count=%d " "hc_rx_ticks=%d " "hc_tx_ticks=%d " "rx_budget=%d " "max_aggregation_size=%d " "mrrs=%d " "autogreeen=%d " "udp_rss=%d\n", bxe_debug, sc->interrupt_mode, sc->num_queues, sc->hc_rx_ticks, sc->hc_tx_ticks, bxe_rx_budget, sc->max_aggregation_size, sc->mrrs, sc->autogreeen, sc->udp_rss); } static int bxe_media_detect(struct bxe_softc *sc) { int port_type; uint32_t phy_idx = bxe_get_cur_phy_idx(sc); switch (sc->link_params.phy[phy_idx].media_type) { case ELINK_ETH_PHY_SFPP_10G_FIBER: case ELINK_ETH_PHY_XFP_FIBER: BLOGI(sc, "Found 10Gb Fiber media.\n"); sc->media = IFM_10G_SR; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_SFP_1G_FIBER: BLOGI(sc, "Found 1Gb Fiber media.\n"); sc->media = IFM_1000_SX; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_KR: case ELINK_ETH_PHY_CX4: BLOGI(sc, "Found 10GBase-CX4 media.\n"); sc->media = IFM_10G_CX4; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_DA_TWINAX: BLOGI(sc, "Found 10Gb Twinax media.\n"); sc->media = IFM_10G_TWINAX; port_type = PORT_DA; break; case ELINK_ETH_PHY_BASE_T: if (sc->link_params.speed_cap_mask[0] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { BLOGI(sc, "Found 10GBase-T media.\n"); sc->media = IFM_10G_T; port_type = PORT_TP; } else { BLOGI(sc, "Found 1000Base-T media.\n"); sc->media = IFM_1000_T; port_type = PORT_TP; } break; case ELINK_ETH_PHY_NOT_PRESENT: BLOGI(sc, "Media not present.\n"); sc->media = 0; port_type = PORT_OTHER; break; case ELINK_ETH_PHY_UNSPECIFIED: default: BLOGI(sc, "Unknown media!\n"); sc->media = 0; port_type = PORT_OTHER; break; } return port_type; } #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static int bxe_get_igu_cam_info(struct bxe_softc *sc) { int pfid = SC_FUNC(sc); int igu_sb_id; uint32_t val; uint8_t fid, igu_sb_cnt = 0; sc->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(sc)) { int vn = SC_VN(sc); igu_sb_cnt = sc->igu_sb_cnt; sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * FP_SB_MAX_E1x); sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); return (0); } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { continue; } fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { continue; } if (IGU_VEC(val) == 0) { /* default status block */ sc->igu_dsb_id = igu_sb_id; } else { if (sc->igu_base_sb == 0xff) { sc->igu_base_sb = igu_sb_id; } igu_sb_cnt++; } } } /* * Due to new PF resource allocation by MFW T7.4 and above, it's optional * that number of CAM entries will not be equal to the value advertised in * PCI. Driver should use the minimal value of both as the actual status * block count */ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); if (igu_sb_cnt == 0) { BLOGE(sc, "CAM configuration error\n"); return (-1); } return (0); } /* * Gather various information from the device config space, the device itself, * shmem, and the user input. */ static int bxe_get_device_info(struct bxe_softc *sc) { uint32_t val; int rc; /* Get the data for the device */ sc->devinfo.vendor_id = pci_get_vendor(sc->dev); sc->devinfo.device_id = pci_get_device(sc->dev); sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); /* get the chip revision (chip metal comes from pci config space) */ sc->devinfo.chip_id = sc->link_params.chip_id = (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); /* force 57811 according to MISC register */ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { if (CHIP_IS_57810(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } else if (CHIP_IS_57810_MF(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } sc->devinfo.chip_id |= 0x1; } BLOGD(sc, DBG_LOAD, "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", sc->devinfo.chip_id, ((sc->devinfo.chip_id >> 16) & 0xffff), ((sc->devinfo.chip_id >> 12) & 0xf), ((sc->devinfo.chip_id >> 4) & 0xff), ((sc->devinfo.chip_id >> 0) & 0xf)); val = (REG_RD(sc, 0x2874) & 0x55); if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) { sc->flags |= BXE_ONE_PORT_FLAG; BLOGD(sc, DBG_LOAD, "single port device\n"); } /* set the doorbell size */ sc->doorbell_size = (1 << BXE_DB_SHIFT); /* determine whether the device is in 2 port or 4 port mode */ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ if (CHIP_IS_E2E3(sc)) { /* * Read port4mode_en_ovwr[0]: * If 1, four port mode is in port4mode_en_ovwr[1]. * If 0, four port mode is in port4mode_en[0]. */ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); if (val & 1) { val = ((val >> 1) & 1); } else { val = REG_RD(sc, MISC_REG_PORT4MODE_EN); } sc->devinfo.chip_port_mode = (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); } /* get the function and path info for the device */ bxe_get_function_num(sc); /* get the shared memory base address */ sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); sc->devinfo.shmem2_base = REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", sc->devinfo.shmem_base, sc->devinfo.shmem2_base); if (!sc->devinfo.shmem_base) { /* this should ONLY prevent upcoming shmem reads */ BLOGI(sc, "MCP not active\n"); sc->flags |= BXE_NO_MCP_FLAG; return (0); } /* make sure the shared memory contents are valid */ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); return (0); } BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); /* get the bootcode version */ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); snprintf(sc->devinfo.bc_ver_str, sizeof(sc->devinfo.bc_ver_str), "%d.%d.%d", ((sc->devinfo.bc_ver >> 24) & 0xff), ((sc->devinfo.bc_ver >> 16) & 0xff), ((sc->devinfo.bc_ver >> 8) & 0xff)); BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); /* get the bootcode shmem address */ sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); /* clean indirect addresses as they're not used */ pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); if (IS_PF(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); if (CHIP_IS_E1x(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF * FLR). Must be done prior to any BAR read access. Only for * 57712 and up */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } } /* get the nvram size */ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); sc->devinfo.flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); /* get PCI capabilites */ bxe_probe_pci_caps(sc); bxe_set_power_state(sc, PCI_PM_D0); /* get various configuration parameters from shmem */ bxe_get_shmem_info(sc); if (sc->devinfo.pcie_msix_cap_reg != 0) { val = pci_read_config(sc->dev, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), 2); sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); } else { sc->igu_sb_cnt = 1; } sc->igu_base_addr = BAR_IGU_INTMEM; /* initialize IGU parameters */ if (CHIP_IS_E1x(sc)) { sc->devinfo.int_block = INT_BLOCK_HC; sc->igu_dsb_id = DEF_SB_IGU_ID; sc->igu_base_sb = 0; } else { sc->devinfo.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { tout--; DELAY(1000); } if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); return (-1); } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; } else { BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); } rc = bxe_get_igu_cam_info(sc); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); if (rc) { return (rc); } } /* * Get base FW non-default (fast path) status block ID. This value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(sc)) { sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); } else { /* * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ sc->base_fw_ndsb = sc->igu_base_sb; } BLOGD(sc, DBG_LOAD, "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", sc->igu_dsb_id, sc->igu_base_sb, sc->igu_sb_cnt, sc->base_fw_ndsb); elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg) { uint32_t cfg_size = 0; uint32_t idx; uint8_t port = SC_PORT(sc); /* aggregation of supported attributes of all external phys */ sc->port.supported[0] = 0; sc->port.supported[1] = 0; switch (sc->link_params.num_phys) { case 1: sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; cfg_size = 1; break; case 2: sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; cfg_size = 1; break; case 3: if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } else { sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } cfg_size = 2; break; } if (!(sc->port.supported[0] || sc->port.supported[1])) { BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(sc)) sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case ELINK_SWITCH_CFG_1G: sc->port.phy_addr = REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case ELINK_SWITCH_CFG_10G: sc->port.phy_addr = REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", sc->port.link_config[0]); return; } } BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; } } BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); } static void bxe_link_settings_requested(struct bxe_softc *sc) { uint32_t link_config; uint32_t idx; uint32_t cfg_size = 0; sc->port.advertising[0] = 0; sc->port.advertising[1] = 0; switch (sc->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { sc->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = sc->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] |= sc->port.supported[idx]; if (sc->link_params.phy[ELINK_EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) sc->port.advertising[idx] |= (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", sc->link_params.req_duplex[idx]); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; break; default: BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] = sc->port.supported[idx]; break; } sc->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; } else { bxe_set_requested_fc(sc); } } BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " "req_flow_ctrl=0x%x advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " "advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->port.advertising[idx]); } } static void bxe_get_phy_info(struct bxe_softc *sc) { uint8_t port = SC_PORT(sc); uint32_t config = sc->port.config; uint32_t eee_mode; /* shmem data already read in bxe_get_shmem_info() */ ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } /* configure link feature according to nvram value */ eee_mode = (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | ELINK_EEE_MODE_ENABLE_LPI | ELINK_EEE_MODE_OUTPUT_TIME); } else { sc->link_params.eee_mode = 0; } /* get the media type */ bxe_media_detect(sc); ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); } static void bxe_get_params(struct bxe_softc *sc) { /* get user tunable params */ bxe_get_tunable_params(sc); /* select the RX and TX ring sizes */ sc->tx_ring_size = TX_BD_USABLE; sc->rx_ring_size = RX_BD_USABLE; /* XXX disable WoL */ sc->wol = 0; } static void bxe_set_modes_bitmap(struct bxe_softc *sc) { uint32_t flags = 0; if (CHIP_REV_IS_FPGA(sc)) { SET_FLAGS(flags, MODE_FPGA); } else if (CHIP_REV_IS_EMUL(sc)) { SET_FLAGS(flags, MODE_EMUL); } else { SET_FLAGS(flags, MODE_ASIC); } if (CHIP_IS_MODE_4_PORT(sc)) { SET_FLAGS(flags, MODE_PORT4); } else { SET_FLAGS(flags, MODE_PORT2); } if (CHIP_IS_E2(sc)) { SET_FLAGS(flags, MODE_E2); } else if (CHIP_IS_E3(sc)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(sc) == CHIP_REV_Ax) { SET_FLAGS(flags, MODE_E3_A0); } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } } if (IS_MF(sc)) { SET_FLAGS(flags, MODE_MF); switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; case MULTI_FUNCTION_AFEX: SET_FLAGS(flags, MODE_MF_AFEX); break; } } else { SET_FLAGS(flags, MODE_SF); } #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /* __BIG_ENDIAN */ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(sc) = flags; } static int bxe_alloc_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; bus_addr_t busaddr; int max_agg_queues; int max_segments; bus_size_t max_size; bus_size_t max_seg_size; char buf[32]; int rc; int i, j; /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ /* allocate the parent bus DMA tag */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BUS_SPACE_MAXSIZE_32BIT, /* max map size */ BUS_SPACE_UNRESTRICTED, /* num discontinuous */ BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &sc->parent_dma_tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); return (1); } /************************/ /* DEFAULT STATUS BLOCK */ /************************/ if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "default status block") != 0) { /* XXX */ bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; /***************/ /* EVENT QUEUE */ /***************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->eq_dma, "event queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; /*************/ /* SLOW PATH */ /*************/ if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), &sc->sp_dma, "slow path") != 0) { /* XXX */ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; /*******************/ /* SLOW PATH QUEUE */ /*******************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->spq_dma, "slow path queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw decompression buffer") != 0) { /* XXX */ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; if ((sc->gz_strm = malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { /* XXX */ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } /*************/ /* FASTPATHS */ /*************/ /* allocate DMA memory for each fastpath structure */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->sc = sc; fp->index = i; /*******************/ /* FP STATUS BLOCK */ /*******************/ snprintf(buf, sizeof(buf), "fp %d status block", i); if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), &fp->sb_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { if (CHIP_IS_E2E3(sc)) { fp->status_block.e2_sb = (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; } else { fp->status_block.e1x_sb = (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; } } /******************/ /* FP TX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), &fp->tx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; } /* link together the tx bd chain pages */ for (j = 1; j <= TX_BD_NUM_PAGES; j++) { /* index into the tx bd chain array to last entry per page */ struct eth_tx_next_bd *tx_next_bd = &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; /* point to the next page and wrap from last page */ busaddr = (fp->tx_dma.paddr + (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); } /******************/ /* FP RX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), &fp->rx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; } /* link together the rx bd chain pages */ for (j = 1; j <= RX_BD_NUM_PAGES; j++) { /* index into the rx bd chain array to last entry per page */ struct eth_rx_bd *rx_bd = &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_dma.paddr + (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); rx_bd->addr_hi = htole32(U64_HI(busaddr)); rx_bd->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX RCQ CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d rcq chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), &fp->rcq_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; } /* link together the rcq chain pages */ for (j = 1; j <= RCQ_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_cqe_next_page *rx_cqe_next = (struct eth_rx_cqe_next_page *) &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; /* point to the next page and wrap from last page */ busaddr = (fp->rcq_dma.paddr + (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX SGE CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d sge chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), &fp->rx_sge_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; } /* link together the sge chain pages */ for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_sge *rx_sge = &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_sge_dma.paddr + (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); rx_sge->addr_hi = htole32(U64_HI(busaddr)); rx_sge->addr_lo = htole32(U64_LO(busaddr)); } /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ /* set required sizes before mapping to conserve resources */ if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { max_size = BXE_TSO_MAX_SIZE; max_segments = BXE_TSO_MAX_SEGMENTS; max_seg_size = BXE_TSO_MAX_SEG_SIZE; } else { max_size = (MCLBYTES * BXE_MAX_SEGMENTS); max_segments = BXE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* create a dma tag for the tx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ max_size, /* max map size */ max_segments, /* num discontinuous */ max_seg_size, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->tx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the tx mbuf clusters */ for (j = 0; j < TX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT, &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ /* create a dma tag for the rx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ MJUM9BYTES, /* max map size */ 1, /* num discontinuous */ MJUM9BYTES, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the rx mbuf clusters */ for (j = 0; j < RX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ /* create a dma tag for the rx sge mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BCM_PAGE_SIZE, /* max map size */ 1, /* num discontinuous */ BCM_PAGE_SIZE, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_sge_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for the rx sge mbuf clusters */ for (j = 0; j < RX_SGE_TOTAL; j++) { if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx sge mbuf cluster */ if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ /* create dma maps for the rx tpa mbuf clusters */ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx tpa mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } bxe_init_sge_ring_bit_mask(fp); } return (0); } static void bxe_free_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; int max_agg_queues; int i, j; if (sc->parent_dma_tag == NULL) { return; /* assume nothing was allocated */ } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; /*******************/ /* FP STATUS BLOCK */ /*******************/ bxe_dma_free(sc, &fp->sb_dma); memset(&fp->status_block, 0, sizeof(fp->status_block)); /******************/ /* FP TX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->tx_dma); fp->tx_chain = NULL; /******************/ /* FP RX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->rx_dma); fp->rx_chain = NULL; /*******************/ /* FP RX RCQ CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rcq_dma); fp->rcq_chain = NULL; /*******************/ /* FP RX SGE CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rx_sge_dma); fp->rx_sge_chain = NULL; /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ if (fp->tx_mbuf_tag != NULL) { for (j = 0; j < TX_BD_TOTAL; j++) { if (fp->tx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); } } bus_dma_tag_destroy(fp->tx_mbuf_tag); fp->tx_mbuf_tag = NULL; } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ if (fp->rx_mbuf_tag != NULL) { for (j = 0; j < RX_BD_TOTAL; j++) { if (fp->rx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); } } if (fp->rx_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (fp->rx_tpa_info[j].bd.m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); } } if (fp->rx_tpa_info_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_mbuf_tag); fp->rx_mbuf_tag = NULL; } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ if (fp->rx_sge_mbuf_tag != NULL) { for (j = 0; j < RX_SGE_TOTAL; j++) { if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); } } if (fp->rx_sge_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); fp->rx_sge_mbuf_tag = NULL; } } /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; free(sc->gz_strm, M_DEVBUF); sc->gz_strm = NULL; /*******************/ /* SLOW PATH QUEUE */ /*******************/ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; /*************/ /* SLOW PATH */ /*************/ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; /***************/ /* EVENT QUEUE */ /***************/ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; /************************/ /* DEFAULT STATUS BLOCK */ /************************/ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); sc->parent_dma_tag = NULL; } /* * Previous driver DMAE transaction may have occurred when pre-boot stage * ended and boot began. This would invalidate the addresses of the * transaction, resulting in was-error bit set in the PCI causing all * hw-to-host PCIe transactions to timeout. If this happened we want to clear * the interrupt which detected this from the pglueb and the was-done bit */ static void bxe_prev_interrupted_dmae(struct bxe_softc *sc) { uint32_t val; if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BLOGD(sc, DBG_LOAD, "Clearing 'was-error' bit that was set in pglueb"); REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); } } } static int bxe_prev_mcp_done(struct bxe_softc *sc) { uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BLOGE(sc, "MCP response failure, aborting\n"); return (-1); } return (0); } static struct bxe_prev_list_node * bxe_prev_path_get_entry(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; LIST_FOREACH(tmp, &bxe_prev_list, node) { if ((sc->pcie_bus == tmp->bus) && (sc->pcie_device == tmp->slot) && (SC_PATH(sc) == tmp->path)) { return (tmp); } } return (NULL); } static uint8_t bxe_prev_is_path_marked(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; int rc = FALSE; mtx_lock(&bxe_prev_mtx); tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (tmp->aer) { BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was marked by AER\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { rc = TRUE; BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was already cleaned from previous drivers\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } } mtx_unlock(&bxe_prev_mtx); return (rc); } static int bxe_prev_mark_path(struct bxe_softc *sc, uint8_t after_undi) { struct bxe_prev_list_node *tmp; mtx_lock(&bxe_prev_mtx); /* Check whether the entry for this path already exists */ tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (!tmp->aer) { BLOGD(sc, DBG_LOAD, "Re-marking AER in path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { BLOGD(sc, DBG_LOAD, "Removing AER indication from path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); tmp->aer = 0; } mtx_unlock(&bxe_prev_mtx); return (0); } mtx_unlock(&bxe_prev_mtx); /* Create an entry for this path and add it */ tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!tmp) { BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); return (-1); } tmp->bus = sc->pcie_bus; tmp->slot = sc->pcie_device; tmp->path = SC_PATH(sc); tmp->aer = 0; tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; mtx_lock(&bxe_prev_mtx); BLOGD(sc, DBG_LOAD, "Marked path %d/%d/%d - finished previous unload\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); mtx_unlock(&bxe_prev_mtx); return (0); } static int bxe_do_flr(struct bxe_softc *sc) { int i; /* only E2 and onwards support FLR */ if (CHIP_IS_E1x(sc)) { BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); return (-1); } /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", sc->devinfo.bc_ver); return (-1); } /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) { DELAY(((1 << (i - 1)) * 100) * 1000); } if (!bxe_is_pcie_pending(sc)) { goto clear; } } BLOGE(sc, "PCIE transaction is not cleared, " "proceeding with reset anyway\n"); clear: BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); return (0); } struct bxe_mac_vals { uint32_t xmac_addr; uint32_t xmac_val; uint32_t emac_addr; uint32_t emac_val; uint32_t umac_addr; uint32_t umac_val; uint32_t bmac_addr; uint32_t bmac_val[2]; }; static void bxe_prev_unload_close_mac(struct bxe_softc *sc, struct bxe_mac_vals *vals) { uint32_t val, base_addr, offset, mask, reset_reg; uint8_t mac_stopped = FALSE; uint8_t port = SC_PORT(sc); uint32_t wb_data[2]; /* reset addresses as they also mark which values were changed */ vals->bmac_addr = 0; vals->umac_addr = 0; vals->xmac_addr = 0; vals->emac_addr = 0; reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(sc)) { val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(sc, base_addr + offset); wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); vals->bmac_addr = base_addr + offset; vals->bmac_val[0] = wb_data[0]; vals->bmac_val[1] = wb_data[1]; wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; REG_WR(sc, vals->bmac_addr, wb_data[0]); REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); } BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; vals->emac_val = REG_RD(sc, vals->emac_addr); REG_WR(sc, vals->emac_addr, 0); mac_stopped = TRUE; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); vals->xmac_addr = base_addr + XMAC_REG_CTRL; vals->xmac_val = REG_RD(sc, vals->xmac_addr); REG_WR(sc, vals->xmac_addr, 0); mac_stopped = TRUE; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; vals->umac_val = REG_RD(sc, vals->umac_addr); REG_WR(sc, vals->umac_addr, 0); mac_stopped = TRUE; } } if (mac_stopped) { DELAY(20000); } } #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void bxe_prev_unload_undi_inc(struct bxe_softc *sc, uint8_t port, uint8_t inc) { uint16_t rcq, bd; uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); BLOGD(sc, DBG_LOAD, "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int bxe_prev_unload_common(struct bxe_softc *sc) { uint32_t reset_reg, tmp_reg = 0, rc; uint8_t prev_undi = FALSE; struct bxe_mac_vals mac_vals; uint32_t timer_count = 1000; uint32_t prev_brb; /* * It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); memset(&mac_vals, 0, sizeof(mac_vals)); if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { /* Close the MAC Rx to prevent BRB from filling up */ bxe_prev_unload_close_mac(sc, &mac_vals); /* close LLH filters towards the BRB */ elink_set_rx_filter(&sc->link_params, 0); /* * Check if the UNDI driver was previously loaded. * UNDI driver initializes CID offset for normal bell to 0x7 */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); prev_undi = TRUE; /* clear the UNDI indication */ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); /* clear possible idle check errors */ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { prev_brb = tmp_reg; tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) { break; } BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) { timer_count = 1000; } else { timer_count--; } /* If UNDI resides in memory, manually increment it */ if (prev_undi) { bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); } DELAY(10); } if (!timer_count) { BLOGE(sc, "Failed to empty BRB\n"); } } /* No packets are in the pipeline, path is ready for reset */ bxe_reset_common(sc); if (mac_vals.xmac_addr) { REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); } if (mac_vals.umac_addr) { REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); } if (mac_vals.emac_addr) { REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); } if (mac_vals.bmac_addr) { REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); } rc = bxe_prev_mark_path(sc, prev_undi); if (rc) { bxe_prev_mcp_done(sc); return (rc); } return (bxe_prev_mcp_done(sc)); } static int bxe_prev_unload_uncommon(struct bxe_softc *sc) { int rc; BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); /* * If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); rc = bxe_do_flr(sc); } if (!rc) { /* FLR was performed */ BLOGD(sc, DBG_LOAD, "FLR successful\n"); return (0); } BLOGD(sc, DBG_LOAD, "Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bxe_prev_mcp_done(sc); if (!rc) { rc = BXE_PREV_WAIT_NEEDED; } return (rc); } static int bxe_prev_unload(struct bxe_softc *sc) { int time_counter = 10; uint32_t fw, hw_lock_reg, hw_lock_val; uint32_t rc = 0; /* * Clear HW from errors which may have resulted from an interrupted * DMAE transaction. */ bxe_prev_interrupted_dmae(sc); /* Release previously held locks */ hw_lock_reg = (SC_FUNC(sc) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); hw_lock_val = (REG_RD(sc, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); } BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); REG_WR(sc, hw_lock_reg, 0xffffffff); } else { BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); } if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bxe_prev_unload_common(sc); break; } /* non-common reply from MCP night require looping */ rc = bxe_prev_unload_uncommon(sc); if (rc != BXE_PREV_WAIT_NEEDED) { break; } DELAY(20000); } while (--time_counter); if (!time_counter || rc) { BLOGE(sc, "Failed to unload previous driver!" " time_counter %d rc %d\n", time_counter, rc); rc = -1; } return (rc); } void bxe_dcbx_set_state(struct bxe_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) { if (!CHIP_IS_E1x(sc)) { sc->dcb_state = dcb_on; sc->dcbx_enabled = dcbx_enabled; } else { sc->dcb_state = FALSE; sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; } BLOGD(sc, DBG_LOAD, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? "on-chip with negotiation" : "invalid"); } /* must be called after sriov-enable */ static int bxe_set_qm_cid_count(struct bxe_softc *sc) { int cid_count = BXE_L2_MAX_CID(sc); if (IS_SRIOV(sc)) { cid_count += BXE_VF_CIDS; } if (CNIC_SUPPORT(sc)) { cid_count += CNIC_CID_MAX; } return (roundup(cid_count, QM_CID_ROUND)); } static void bxe_init_multi_cos(struct bxe_softc *sc) { int pri, cos; uint32_t pri_map = 0; /* XXX change to user config */ for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); if (cos < sc->max_cos) { sc->prio_to_cos[pri] = cos; } else { BLOGW(sc, "Invalid COS %d for priority %d " "(max COS is %d), setting to 0\n", cos, pri, (sc->max_cos - 1)); sc->prio_to_cos[pri] = 0; } } } static int bxe_sysctl_state(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { uint32_t temp; sc = (struct bxe_softc *)arg1; BLOGI(sc, "... dumping driver state ...\n"); temp = SHMEM2_RD(sc, temperature_in_half_celsius); BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); } return (error); } static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; uint32_t *offset; uint64_t value = 0; int index = (int)arg2; if (index >= BXE_NUM_ETH_STATS) { BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_stats_arr[index].offset); switch (bxe_eth_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", index, bxe_eth_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static int bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats; uint32_t *offset; uint64_t value = 0; uint32_t q_stat = (uint32_t)arg2; uint32_t fp_index = ((q_stat >> 16) & 0xffff); uint32_t index = (q_stat & 0xffff); eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; if (index >= BXE_NUM_ETH_Q_STATS) { BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); switch (bxe_eth_q_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", index, bxe_eth_q_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static void bxe_force_link_reset(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); elink_link_reset(&sc->link_params, &sc->link_vars, 1); bxe_release_phy_lock(sc); } static int bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); int rc = 0; int error; int result; error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); if (error || !req->newptr) { return (error); } if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param); sc->bxe_pause_param = 8; } result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); return -EINVAL; } if(IS_MF(sc)) return 0; sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; if(result & ELINK_FLOW_CTRL_RX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; if(result & ELINK_FLOW_CTRL_TX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; if(result & 0x400) { if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; } sc->link_params.req_fc_auto_adv = 0; if (result & ELINK_FLOW_CTRL_RX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; if (result & ELINK_FLOW_CTRL_TX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; if (!sc->link_params.req_fc_auto_adv) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_STOP); } if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_force_link_reset(sc); bxe_acquire_phy_lock(sc); rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); } } return rc; } static void bxe_add_sysctls(struct bxe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *queue_top, *queue; struct sysctl_oid_list *queue_top_children, *queue_children; char queue_num_buf[32]; uint32_t q_stat; int i, j; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", CTLFLAG_RD, BXE_DRIVER_VERSION, 0, "version"); snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : "Unknown")); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, "multifunction vnics per port"); snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : "???GT/s"), sc->devinfo.pcie_link_width); sc->debug = bxe_debug; SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, "debug logging mode"); sc->trigger_grcdump = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", CTLFLAG_RW, &sc->trigger_grcdump, 0, "trigger grcdump should be invoked" " before collecting grcdump"); sc->grcdump_started = 0; sc->grcdump_done = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", CTLFLAG_RD, &sc->grcdump_done, 0, "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, bxe_sysctl_pauseparam, "IU", "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); for (i = 0; i < BXE_NUM_ETH_STATS; i++) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, bxe_eth_stats_arr[i].string, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i, bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string); } /* add a new parent node for all queues "dev.bxe.#.queue" */ queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue"); queue_top_children = SYSCTL_CHILDREN(queue_top); for (i = 0; i < sc->num_queues; i++) { /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue"); queue_children = SYSCTL_CHILDREN(queue); for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { q_stat = ((i << 16) | j); SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, bxe_eth_q_stats_arr[j].string, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat, bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string); } } } static int bxe_alloc_buf_rings(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) return (-1); } return (0); } static void bxe_free_buf_rings(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br) { buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } } } static void bxe_init_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "bxe%d_fp%d_tx_lock", sc->unit, i); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), "bxe%d_fp%d_rx_lock", sc->unit, i); mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); } } static void bxe_destroy_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (mtx_initialized(&fp->tx_mtx)) { mtx_destroy(&fp->tx_mtx); } if (mtx_initialized(&fp->rx_mtx)) { mtx_destroy(&fp->rx_mtx); } } } /* * Device attach function. * * Allocates device resources, performs secondary chip identification, and * initializes driver instance variables. This function is called from driver * load after a successful probe. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_attach(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting attach...\n"); sc->state = BXE_STATE_CLOSED; sc->dev = dev; sc->unit = device_get_unit(dev); BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); sc->pcie_bus = pci_get_bus(dev); sc->pcie_device = pci_get_slot(dev); sc->pcie_func = pci_get_function(dev); /* enable bus master capability */ pci_enable_busmaster(dev); /* get the BARs */ if (bxe_allocate_bars(sc) != 0) { return (ENXIO); } /* initialize the mutexes */ bxe_init_mutexes(sc); /* prepare the periodic callout */ callout_init(&sc->periodic_callout, 1); /* prepare the chip taskqueue */ sc->chip_tq_flags = CHIP_TQ_NONE; snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), "bxe%d_chip_tq", sc->unit); TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->chip_tq); taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ "%s", sc->chip_tq_name); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); /* get device info and set params */ if (bxe_get_device_info(sc) != 0) { BLOGE(sc, "getting device info\n"); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* get final misc params */ bxe_get_params(sc); /* set the default MTU (changed via ifconfig) */ sc->mtu = ETHERMTU; bxe_set_modes_bitmap(sc); /* XXX * If in AFEX mode and the function is configured for FCoE * then bail... no L2 allowed. */ /* get phy settings from shmem and 'and' against admin settings */ bxe_get_phy_info(sc); /* initialize the FreeBSD ifnet interface */ if (bxe_init_ifnet(sc) != 0) { bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } if (bxe_add_cdev(sc) != 0) { if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate device interrupts */ if (bxe_interrupt_alloc(sc) != 0) { bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } bxe_init_fp_mutexs(sc); if (bxe_alloc_buf_rings(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate ilt */ if (bxe_alloc_ilt_mem(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate the host hardware/software hsi structures */ if (bxe_alloc_hsi_mem(sc) != 0) { bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* need to reset chip if UNDI was active */ if (IS_PF(sc) && !BXE_NOMCP(sc)) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); bxe_prev_unload(sc); } #if 1 /* XXX */ bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); #else if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && SHMEM2_RD(sc, dcbx_lldp_params_offset) && SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); bxe_dcbx_init_params(sc); } else { bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); } #endif /* calculate qm_cid_count */ sc->qm_cid_count = bxe_set_qm_cid_count(sc); BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); sc->max_cos = 1; bxe_init_multi_cos(sc); bxe_add_sysctls(sc); return (0); } /* * Device detach function. * * Stops the controller, resets the controller, and releases resources. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_detach(device_t dev) { struct bxe_softc *sc; if_t ifp; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting detach...\n"); ifp = sc->ifp; if (ifp != NULL && if_vlantrunkinuse(ifp)) { BLOGE(sc, "Cannot detach while VLANs are in use.\n"); return(EBUSY); } bxe_del_cdev(sc); /* stop the periodic callout */ bxe_periodic_stop(sc); /* stop the chip taskqueue */ atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); if (sc->chip_tq) { taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); taskqueue_free(sc->chip_tq); sc->chip_tq = NULL; taskqueue_drain_timeout(taskqueue_thread, &sc->sp_err_timeout_task); } /* stop and reset the controller if it was open */ if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); sc->state = BXE_STATE_DISABLED; BXE_CORE_UNLOCK(sc); } /* release the network interface */ if (ifp != NULL) { ether_ifdetach(ifp); } ifmedia_removeall(&sc->ifmedia); /* XXX do the following based on driver state... */ /* free the host hardware/software hsi structures */ bxe_free_hsi_mem(sc); /* free ilt */ bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); /* release the interrupts */ bxe_interrupt_free(sc); /* Release the mutexes*/ bxe_destroy_fp_mutexs(sc); bxe_release_mutexes(sc); /* Release the PCIe BAR mapped memory */ bxe_deallocate_bars(sc); /* Release the FreeBSD interface. */ if (sc->ifp != NULL) { if_free(sc->ifp); } pci_disable_busmaster(dev); return (0); } /* * Device shutdown function. * * Stops and resets the controller. * * Returns: * Nothing */ static int bxe_shutdown(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); /* stop the periodic callout */ bxe_periodic_stop(sc); if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); BXE_CORE_UNLOCK(sc); } return (0); } void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update) { uint32_t igu_addr = sc->igu_base_addr; igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); } static void bxe_igu_clear_sb_gen(struct bxe_softc *sc, uint8_t func, uint8_t idu_sb_id, uint8_t is_pf) { uint32_t data, ctl, cnt = 100; uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; uint32_t sb_bit = 1 << (idu_sb_id%32); uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ if (CHIP_INT_MODE_IS_BC(sc)) { return; } data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | (func_encode << IGU_CTRL_REG_FID_SHIFT) | (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(sc, igu_addr_data, data); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(sc, igu_addr_ctl, ctl); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); /* wait for clean up to finish */ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { DELAY(20000); } if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { BLOGD(sc, DBG_LOAD, "Unable to finish IGU cleanup: " "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } static void bxe_igu_clear_sb(struct bxe_softc *sc, uint8_t idu_sb_id) { bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); } /*******************/ /* ECORE CALLBACKS */ /*******************/ static void bxe_reset_common(struct bxe_softc *sc) { uint32_t val = 0x1400; /* reset_common */ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); } static void bxe_common_init_phy(struct bxe_softc *sc) { uint32_t shmem_base[2]; uint32_t shmem2_base[2]; /* Avoid common init in case MFW supports LFA */ if (SHMEM2_RD(sc, size) > (uint32_t)offsetof(struct shmem2_region, lfa_host_addr[SC_PORT(sc)])) { return; } shmem_base[0] = sc->devinfo.shmem_base; shmem2_base[0] = sc->devinfo.shmem2_base; if (!CHIP_IS_E1x(sc)) { shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); } bxe_acquire_phy_lock(sc); elink_common_init_phy(sc, shmem_base, shmem2_base, sc->devinfo.chip_id, 0); bxe_release_phy_lock(sc); } static void bxe_pf_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); } static void bxe_init_pxp(struct bxe_softc *sc) { uint16_t devctl; int r_order, w_order; devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2); BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (sc->mrrs == -1) { r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12); } else { BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); r_order = sc->mrrs; } ecore_init_pxp_arb(sc, r_order, w_order); } static uint32_t bxe_get_pretend_reg(struct bxe_softc *sc) { uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); return (base + (SC_ABS_FUNC(sc)) * stride); } /* * Called only on E1H or E2. * When pretending to be PF, the pretend value is the function number 0..7. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID * combination. */ static int bxe_pretend_func(struct bxe_softc *sc, uint16_t pretend_func_val) { uint32_t pretend_reg; if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { return (-1); } /* get my own pretend register */ pretend_reg = bxe_get_pretend_reg(sc); REG_WR(sc, pretend_reg, pretend_func_val); REG_RD(sc, pretend_reg); return (0); } static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; } static void bxe_iov_init_dq(struct bxe_softc *sc) { return; } /* send a NIG loopback debug packet */ static void bxe_lb_pckt(struct bxe_softc *sc) { uint32_t wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* * Some of the internal memories are not directly readable from the driver. * To test them we send debug packets. */ static int bxe_int_mem_test(struct bxe_softc *sc) { int factor; int count, i; uint32_t val = 0; if (CHIP_REV_IS_FPGA(sc)) { factor = 120; } else if (CHIP_REV_IS_EMUL(sc)) { factor = 200; } else { factor = 1; } /* disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bxe_lb_pckt(sc); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0x10) { break; } DELAY(10000); count--; } if (val != 0x10) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-1); } /* wait until PRS register shows 1 packet */ count = (1000 * factor); while (count) { val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val == 1) { break; } DELAY(10000); count--; } if (val != 0x1) { BLOGE(sc, "PRS timeout val=0x%x\n", val); return (-2); } /* Reset and init BRB, PRS */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); /* Disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) { bxe_lb_pckt(sc); } /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ count = (1000 * factor); while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0xb0) { break; } DELAY(10000); count--; } if (val != 0xb0) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-3); } /* Wait until PRS register shows 2 packets */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 2) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* Write 1 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ DELAY(10000 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 3) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) { REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); } val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } /* Reset and init BRB, PRS, NIG */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); if (!CNIC_SUPPORT(sc)) { /* set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); } /* Enable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); REG_WR(sc, CFC_REG_DEBUG0, 0x0); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); return (0); } static void bxe_setup_fan_failure_detection(struct bxe_softc *sc) { int is_required; uint32_t val; int port; is_required = 0; val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK); if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { is_required = 1; } /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { for (port = PORT_0; port < PORT_MAX; port++) { is_required |= elink_fan_failure_det_req(sc, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, port); } } BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); if (is_required == 0) { return; } /* Fan failure is indicated by SPIO 5 */ bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(sc, MISC_REG_SPIO_INT); val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(sc, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); val |= MISC_SPIO_SPIO5; REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); } static void bxe_enable_blocks_attention(struct bxe_softc *sc) { uint32_t val; REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); } else { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); } REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(sc, QM_REG_QM_INT_MASK, 0); REG_WR(sc, TM_REG_TM_INT_MASK, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); if (!CHIP_IS_E1x(sc)) { val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); } REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(sc)) { /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); } REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } /** * bxe_init_hw_common - initialize the HW at the COMMON phase. * * @sc: driver handle */ static int bxe_init_hw_common(struct bxe_softc *sc) { uint8_t abs_func_id; uint32_t val; BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", SC_ABS_FUNC(sc)); /* * take the RESET lock to protect undi_unload flow from accessing * registers while we are resetting the chip */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); bxe_reset_common(sc); REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); BLOGD(sc, DBG_LOAD, "after misc block init\n"); if (!CHIP_IS_E1x(sc)) { /* * 4-port mode or 2-port mode we need to turn off master-enable for * everyone. After that we turn it back on for self. So, we disregard * multi-function, and always disable all functions on the given path, * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = SC_PATH(sc); abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { if (abs_func_id == SC_ABS_FUNC(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bxe_pretend_func(sc, abs_func_id); /* clear pf enable */ bxe_pf_disable(sc); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); } } BLOGD(sc, DBG_LOAD, "after pf disable\n"); ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(sc)) { /* * enable HW interrupt from PXP on USDM overflow * bit 16 on INT_MASK_0 */ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); } ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); bxe_init_pxp(sc); #ifdef __BIG_ENDIAN REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif ecore_ilt_init_page_size(sc, INITOP_SET); if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); } /* let the HW do it's magic... */ DELAY(100000); /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } BLOGD(sc, DBG_LOAD, "after pxp init\n"); /* * Timer bug workaround for E2 only. We need to set the entire ILT to have * entries with value "0" and valid bit on. This needs to be done by the * first PF that is loaded in a path (i.e. common phase) */ if (!CHIP_IS_E1x(sc)) { /* * In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing * fatal will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last entry in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct ecore_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct ecore_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* * Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existence of the vnic) * * both steps performed by call to ecore_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bxe_pretend_func(sc, (SC_PATH(sc) + 6)); ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(sc)) { int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : (CHIP_REV_IS_FPGA(sc) ? 400 : 0); ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic... */ do { DELAY(200000); val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); bxe_iov_init_dmae(sc); /* clean the DMAE memory */ sc->dmae_ready = 1; ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(sc, QM_REG_SOFT_RESET, 1); REG_WR(sc, QM_REG_SOFT_RESET, 0); if (CNIC_SUPPORT(sc)) ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); if (!CHIP_REV_IS_SLOW(sc)) { /* enable hw interrupt from doorbell Q */ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); } ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(sc)) { REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); } if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * received in AFEX mode */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); } else { /* * Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { /* reset VFC memories */ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); DELAY(20000); } ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * sent in AFEX mode */ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); } else { REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } REG_WR(sc, SRC_REG_SOFT_RST, 1); ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); if (CNIC_SUPPORT(sc)) { REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); } REG_WR(sc, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) { /* we currently assume that a context is 1024 bytes */ BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); } ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); } ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(sc, 0x2814, 0xffffffff); REG_WR(sc, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(sc)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(sc)) REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); } if (CHIP_IS_E1H(sc)) { /* not applicable for E2 (and above ...) */ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); } if (CHIP_REV_IS_SLOW(sc)) { DELAY(200000); } /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(sc)) { /* read NIG statistic to see if this is our first up since powerup */ bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } bxe_setup_fan_failure_detection(sc); /* clear PXP2 attentions */ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); bxe_enable_blocks_attention(sc); if (!CHIP_REV_IS_SLOW(sc)) { ecore_enable_blocks_parity(sc); } if (!BXE_NOMCP(sc)) { if (CHIP_IS_E1x(sc)) { bxe_common_init_phy(sc); } } return (0); } /** * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @sc: driver handle */ static int bxe_init_hw_common_chip(struct bxe_softc *sc) { int rc = bxe_init_hw_common(sc); if (rc) { BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BXE_NOMCP(sc)) { bxe_common_init_phy(sc); } return (0); } static int bxe_init_hw_port(struct bxe_softc *sc) { int port = SC_PORT(sc); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; uint32_t low, high; uint32_t val; BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); /* * Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); } ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); /* QM cid (connection) count */ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_TM, init_phase); REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); } ecore_init_block(sc, BLOCK_DORQ, init_phase); ecore_init_block(sc, BLOCK_BRB1, init_phase); if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { if (IS_MF(sc)) { low = (BXE_ONE_PORT(sc) ? 160 : 246); } else if (sc->mtu > 4096) { if (BXE_ONE_PORT(sc)) { low = 160; } else { val = sc->mtu; /* (24*1024 + val*4)/256 */ low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); } } else { low = (BXE_ONE_PORT(sc) ? 80 : 160); } high = (low + 56); /* 14*1024/256 */ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_IS_MODE_4_PORT(sc)) { REG_WR(sc, SC_PORT(sc) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0, 40); } ecore_init_block(sc, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* configure headers for AFEX mode */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); REG_WR(sc, SC_PORT(sc) ? PRS_REG_MUST_HAVE_HDRS_PORT_1 : PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); } else { /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); } } ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(sc)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); DELAY(50); REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); } if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_SRC, init_phase); } ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (CHIP_IS_E1(sc)) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); ecore_init_block(sc, BLOCK_IGU, init_phase); ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(sc) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(sc) ? 0 : 0x10; REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); ecore_init_block(sc, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(sc)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ if (IS_MF_AFEX(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); } else { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(sc) ? 7 : 6); } if (CHIP_IS_E3(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(sc)); } } if (!CHIP_IS_E3(sc)) { REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } if (!CHIP_IS_E1(sc)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(sc) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(sc)) { val = 0; switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: case MULTI_FUNCTION_AFEX: val = 2; break; } REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(sc, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_addr, val); } return (0); } static uint32_t bxe_flr_clnup_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, uint32_t poll_count) { uint32_t cur_cnt = poll_count; uint32_t val; while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); } return (val); } static int bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, uint32_t reg, char *msg, uint32_t poll_cnt) { uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); if (val != 0) { BLOGE(sc, "%s usage count=%d\n", msg, val); return (1); } return (0); } /* Common routines with VF FLR cleanup */ static uint32_t bxe_flr_clnup_poll_count(struct bxe_softc *sc) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(sc)) { return (FLR_POLL_CNT * 2000); } if (CHIP_REV_IS_FPGA(sc)) { return (FLR_POLL_CNT * 120); } return (FLR_POLL_CNT); } static int bxe_poll_hw_usage_counters(struct bxe_softc *sc, uint32_t poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bxe_flr_clnup_poll_hw_counter(sc, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), "QM PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), "Timers VNIC usage counter timed out", poll_cnt)) { return (1); } if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), "Timers NUM_SCANS usage counter timed out", poll_cnt)) { return (1); } /* Wait DMAE PF usage counter to zero */ if (bxe_flr_clnup_poll_hw_counter(sc, dmae_reg_go_c[INIT_DMAE_C(sc)], "DMAE dommand register timed out", poll_cnt)) { return (1); } return (0); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static int bxe_send_final_clnup(struct bxe_softc *sc, uint8_t clnup_func, uint32_t poll_cnt) { uint32_t op_gen_command = 0; uint32_t comp_addr = (BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); int ret = 0; if (REG_RD(sc, comp_addr)) { BLOGE(sc, "Cleanup complete was not 0 before sending\n"); return (1); } op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen_command |= OP_GEN_AGG_VECT(clnup_func); op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { BLOGE(sc, "FW final cleanup did not succeed\n"); BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", (REG_RD(sc, comp_addr))); bxe_panic(sc, ("FLR cleanup failed\n")); return (1); } /* Zero completion for nxt FLR */ REG_WR(sc, comp_addr, 0); return (ret); } static void bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, struct pbf_pN_buf_regs *regs, uint32_t poll_count) { uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; uint32_t cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); crd = crd_start = REG_RD(sc, regs->crd); init_crd = REG_RD(sc, regs->init_crd); BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); crd = REG_RD(sc, regs->crd); crd_freed = REG_RD(sc, regs->crd_freed); } else { BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, struct pbf_pN_cmd_regs *regs, uint32_t poll_count) { uint32_t occup, to_free, freed, freed_start; uint32_t cur_cnt = poll_count; occup = to_free = REG_RD(sc, regs->lines_occup); freed = freed_start = REG_RD(sc, regs->lines_freed); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); occup = REG_RD(sc, regs->lines_occup); freed = REG_RD(sc, regs->lines_freed); } else { BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); } /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); } } static void bxe_hw_enable_status(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(sc, PBF_REG_DISABLE_PF); BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bxe_pf_flr_clnup(struct bxe_softc *sc) { uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); /* Re-enable PF target read access */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { return (-1); } /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { return (-1); } /* ATC cleanup */ /* Verify TX hw is flushed */ bxe_tx_hw_flushed(sc, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ DELAY(100000); /* Verify no pending pci transactions */ if (bxe_is_pcie_pending(sc)) { BLOGE(sc, "PCIE Transactions still pending\n"); } /* Debug */ bxe_hw_enable_status(sc); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return (0); } static int bxe_init_hw_func(struct bxe_softc *sc) { int port = SC_PORT(sc); int func = SC_FUNC(sc); int init_phase = PHASE_PF0 + func; struct ecore_ilt *ilt = sc->ilt; uint16_t cdu_ilt_start; uint32_t addr, val; uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); /* FLR cleanup */ if (!CHIP_IS_E1x(sc)) { rc = bxe_pf_flr_clnup(sc); if (rc) { BLOGE(sc, "FLR cleanup failed!\n"); // XXX bxe_fw_dump(sc); // XXX bxe_idle_chk(sc); return (rc); } } /* set MSI reconfigure capability */ if (sc->devinfo.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(sc, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(sc, addr, val); } ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = sc->context[i].vcxt_dma.paddr; ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; } ecore_ilt_init_op(sc, INITOP_SET); /* Set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (sc->interrupt_mode != INTR_MODE_MSIX) { pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; } /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ DELAY(20000); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); } sc->dmae_ready = 1; ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_NIG, init_phase); ecore_init_block(sc, BLOCK_SRC, init_phase); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); } ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TM, init_phase); ecore_init_block(sc, BLOCK_DORQ, init_phase); bxe_iov_init_dq(sc); ecore_init_block(sc, BLOCK_BRB1, init_phase); ecore_init_block(sc, BLOCK_PRS, init_phase); ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PBF_REG_DISABLE_PF, 0); ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); } ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (sc->devinfo.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } ecore_init_block(sc, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(sc)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { prod_offset = (sc->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(sc, addr, 0); } /* send consumer update with value 0 */ bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_IS_MODE_4_PORT(sc)) dsb_idx = SC_FUNC(sc); else dsb_idx = SC_VN(sc); prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(sc, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(sc)) { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bxe_igu_clear_sb(sc, sc->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(sc, 0x2114, 0xffffffff); REG_WR(sc, 0x2120, 0xffffffff); if (CHIP_IS_E1x(sc)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + SC_PORT(sc) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(sc, main_mem_prty_clr); if (val) { BLOGD(sc, DBG_LOAD, "Parity errors in HC block during function init (0x%x)!\n", val); } /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bxe_read_dmae(sc, i, main_mem_width / 4); bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(sc, main_mem_prty_clr); } #if 1 /* Enable STORMs SP logging */ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); #endif elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_reset(struct bxe_softc *sc) { if (!BXE_NOMCP(sc)) { bxe_acquire_phy_lock(sc); elink_lfa_reset(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } else { if (!CHIP_REV_IS_SLOW(sc)) { BLOGW(sc, "Bootcode is missing - cannot reset link\n"); } } } static void bxe_reset_port(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); /* reset physical Link */ bxe_link_reset(sc); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); DELAY(100000); /* Check for BRB port occupancy */ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) { BLOGD(sc, DBG_LOAD, "BRB1 is not empty, %d blocks are occupied\n", val); } /* TODO: Close Doorbell port? */ } static void bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr) { int reg; uint32_t wb_write[2]; if (CHIP_IS_E1(sc)) { reg = PXP2_REG_RQ_ONCHIP_AT + index*8; } else { reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; } wb_write[0] = ONCHIP_ADDR1(addr); wb_write[1] = ONCHIP_ADDR2(addr); REG_WR_DMAE(sc, reg, wb_write, 2); } static void bxe_clear_func_ilt(struct bxe_softc *sc, uint32_t func) { uint32_t i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) { bxe_ilt_wr(sc, i, 0); } } static void bxe_reset_func(struct bxe_softc *sc) { struct bxe_fastpath *fp; int port = SC_PORT(sc); int func = SC_FUNC(sc); int i; /* Disable the function in the FW */ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); } /* Configure IGU */ if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } if (CNIC_LOADED(sc)) { /* Disable Timer scan */ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers * scan to complete */ for (i = 0; i < 200; i++) { DELAY(10000); if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) break; } } /* Clear ILT */ bxe_clear_func_ilt(sc, func); /* * Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } sc->dmae_ready = 0; } static int bxe_gunzip_init(struct bxe_softc *sc) { return (0); } static void bxe_gunzip_end(struct bxe_softc *sc) { return; } static int bxe_init_firmware(struct bxe_softc *sc) { if (CHIP_IS_E1(sc)) { ecore_init_e1_firmware(sc); sc->iro_array = e1_iro_arr; } else if (CHIP_IS_E1H(sc)) { ecore_init_e1h_firmware(sc); sc->iro_array = e1h_iro_arr; } else if (!CHIP_IS_E1x(sc)) { ecore_init_e2_firmware(sc); sc->iro_array = e2_iro_arr; } else { BLOGE(sc, "Unsupported chip revision\n"); return (-1); } return (0); } static void bxe_release_firmware(struct bxe_softc *sc) { /* Do nothing */ return; } static int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len) { /* XXX : Implement... */ BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); return (FALSE); } static void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { bxe_reg_wr_ind(sc, addr, val); } static void ecore_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { bxe_write_dmae_phys_len(sc, phys_addr, addr, len); } void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data) { uint8_t i; for (i = 0; i < size/4; i++) { REG_WR(sc, addr + (i * 4), data[i]); } } /* * character device - ioctl interface definitions */ #include "bxe_dump.h" #include "bxe_ioctl.h" #include static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw bxe_cdevsw = { .d_version = D_VERSION, .d_ioctl = bxe_eioctl, .d_name = "bxecnic", }; #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) #define DUMP_ALL_PRESETS 0x1FFF #define DUMP_MAX_PRESETS 13 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) #define IS_REG_IN_PRESET(presets, idx) \ ((presets & (1 << (idx-1))) == (1 << (idx-1))) static int bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) { if (CHIP_IS_E1(sc)) return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(sc)) return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(sc)) return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(sc)) return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(sc)) return dump_num_registers[4][preset-1]; else return 0; } static int bxe_get_total_regs_len32(struct bxe_softc *sc) { uint32_t preset_idx; int regdump_len32 = 0; /* Calculate the total preset regs length */ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); } return regdump_len32; } static const uint32_t * __bxe_get_page_addr_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_vals_e2; else if (CHIP_IS_E3(sc)) return page_vals_e3; else return NULL; } static uint32_t __bxe_get_page_reg_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_MODE_VALUES_E2; else if (CHIP_IS_E3(sc)) return PAGE_MODE_VALUES_E3; else return 0; } static const uint32_t * __bxe_get_page_write_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_write_regs_e2; else if (CHIP_IS_E3(sc)) return page_write_regs_e3; else return NULL; } static uint32_t __bxe_get_page_write_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_WRITE_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_WRITE_REGS_E3; else return 0; } static const struct reg_addr * __bxe_get_page_read_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_read_regs_e2; else if (CHIP_IS_E3(sc)) return page_read_regs_e3; else return NULL; } static uint32_t __bxe_get_page_read_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_READ_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_READ_REGS_E3; else return 0; } static bool bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(reg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(reg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(reg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(reg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(reg_info->chips); else return 0; } static bool bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(wreg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(wreg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(wreg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(wreg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(wreg_info->chips); else return 0; } /** * bxe_read_pages_regs - read "paged" registers * * @bp device handle * @p output buffer * * Reads "paged" memories: memories that may only be read by first writing to a * specific address ("write address") and then reading from a specific address * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ static void bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, k, n; /* addresses of the paged registers */ const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); /* number of paged registers */ int num_pages = __bxe_get_page_reg_num(sc); /* write addresses */ const uint32_t *write_addr = __bxe_get_page_write_ar(sc); /* number of write addresses */ int write_num = __bxe_get_page_write_num(sc); /* read addresses info */ const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); /* number of read addresses */ int read_num = __bxe_get_page_read_num(sc); uint32_t addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(sc, write_addr[j], page_addr[i]); for (k = 0; k < read_num; k++) { if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { size = read_addr[k].size; for (n = 0; n < size; n++) { addr = read_addr[k].addr + n*4; *p++ = REG_RD(sc, addr); } } } } } return; } static int bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, addr; const struct wreg_addr *wreg_addr_p = NULL; if (CHIP_IS_E1(sc)) wreg_addr_p = &wreg_addr_e1; else if (CHIP_IS_E1H(sc)) wreg_addr_p = &wreg_addr_e1h; else if (CHIP_IS_E2(sc)) wreg_addr_p = &wreg_addr_e2; else if (CHIP_IS_E3A0(sc)) wreg_addr_p = &wreg_addr_e3; else if (CHIP_IS_E3B0(sc)) wreg_addr_p = &wreg_addr_e3b0; else return (-1); /* Read the idle_chk registers */ for (i = 0; i < IDLE_REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { for (j = 0; j < idle_reg_addrs[i].size; j++) *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); } } /* Read the regular registers */ for (i = 0; i < REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); } } /* Read the CAM registers */ if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { for (i = 0; i < wreg_addr_p->size; i++) { *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); /* In case of wreg_addr register, read additional registers from read_regs array */ for (j = 0; j < wreg_addr_p->read_regs_count; j++) { addr = *(wreg_addr_p->read_regs); *p++ = REG_RD(sc, addr + j*4); } } } /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { /* Read "paged" registers */ bxe_read_pages_regs(sc, p, preset); } return 0; } int bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; uint32_t i; uint32_t reg_val; uint32_t reg_addr; uint32_t cmd_offset; struct ecore_ilt *ilt = SC_ILT(sc); struct bxe_fastpath *fp; struct ilt_client_info *ilt_cli; int grc_dump_size; if (sc->grcdump_done || sc->grcdump_started) return (rval); sc->grcdump_started = 1; BLOGI(sc, "Started collecting grcdump\n"); grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); if (sc->grc_dump == NULL) { BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); return(ENOMEM); } /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. */ /* Disable parity on path 0 */ bxe_pretend_func(sc, 0); ecore_disable_blocks_parity(sc); /* Disable parity on path 1 */ bxe_pretend_func(sc, 1); ecore_disable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); buf = sc->grc_dump; d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; d_hdr->preset = DUMP_ALL_PRESETS; if (CHIP_IS_E1(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1; } else if (CHIP_IS_E1H(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1H; } else if (CHIP_IS_E2(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E2 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3A0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3B0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { /* Skip presets with IOR */ if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || (preset_idx == 11)) continue; rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); if (rval) break; size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); buf += size; } bxe_pretend_func(sc, 0); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); bxe_pretend_func(sc, 1); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); if(sc->state == BXE_STATE_OPEN) { if(sc->fw_stats_req != NULL) { BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->fw_stats_req_mapping, (uintmax_t)sc->fw_stats_data_mapping, sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); } if(sc->def_sb != NULL) { BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", (void *)sc->def_sb_dma.paddr, sc->def_sb, sizeof(struct host_sp_status_block)); } if(sc->eq_dma.vaddr != NULL) { BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->sp_dma.vaddr != NULL) { BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, sizeof(struct bxe_slowpath)); } if(sc->spq_dma.vaddr != NULL) { BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->gz_buf_dma.vaddr != NULL) { BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, FW_BUF_SIZE); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && fp->rx_sge_dma.vaddr != NULL) { BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, sizeof(union bxe_host_hc_status_block)); BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); } } if(ilt != NULL ) { ilt_cli = &ilt->clients[1]; if(ilt->lines != NULL) { for (i = ilt_cli->start; i <= ilt_cli->end; i++) { BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); } } } cmd_offset = DMAE_REG_CMD_MEM; for (i = 0; i < 224; i++) { reg_addr = (cmd_offset +(i * 4)); reg_val = REG_RD(sc, reg_addr); BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, reg_addr, reg_val); } } BLOGI(sc, "Collection of grcdump done\n"); sc->grcdump_done = 1; return(rval); } static int bxe_add_cdev(struct bxe_softc *sc) { sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); if (sc->eeprom == NULL) { BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); return (-1); } sc->ioctl_dev = make_dev(&bxe_cdevsw, - sc->ifp->if_dunit, + if_getdunit(sc->ifp), UID_ROOT, GID_WHEEL, 0600, "%s", if_name(sc->ifp)); if (sc->ioctl_dev == NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; return (-1); } sc->ioctl_dev->si_drv1 = sc; return (0); } static void bxe_del_cdev(struct bxe_softc *sc) { if (sc->ioctl_dev != NULL) destroy_dev(sc->ioctl_dev); if (sc->eeprom != NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; } sc->ioctl_dev = NULL; return; } static bool bxe_is_nvram_accessible(struct bxe_softc *sc) { if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return FALSE; return TRUE; } static int bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) { int rval = 0; switch (eeprom->eeprom_cmd) { case BXE_EEPROM_CMD_SET_EEPROM: rval = copyin(eeprom->eeprom_data, sc->eeprom, eeprom->eeprom_data_len); if (rval) break; rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); break; case BXE_EEPROM_CMD_GET_EEPROM: rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); if (rval) { break; } rval = copyout(sc->eeprom, eeprom->eeprom_data, eeprom->eeprom_data_len); break; default: rval = EINVAL; break; } if (rval) { BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); } return (rval); } static int bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) { uint32_t ext_phy_config; int port = SC_PORT(sc); int cfg_idx = bxe_get_link_cfg_idx(sc); dev_p->supported = sc->port.supported[cfg_idx] | (sc->port.supported[cfg_idx ^ 1] & (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); dev_p->advertising = sc->port.advertising[cfg_idx]; if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == ELINK_ETH_PHY_SFP_1G_FIBER) { dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); } if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && !(sc->flags & BXE_MF_FUNC_DIS)) { dev_p->duplex = sc->link_vars.duplex; if (IS_MF(sc) && !BXE_NOMCP(sc)) dev_p->speed = bxe_get_mf_speed(sc); else dev_p->speed = sc->link_vars.line_speed; } else { dev_p->duplex = DUPLEX_UNKNOWN; dev_p->speed = SPEED_UNKNOWN; } dev_p->port = bxe_media_detect(sc); ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) dev_p->phy_address = sc->port.phy_addr; else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); else dev_p->phy_address = 0; if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) dev_p->autoneg = AUTONEG_ENABLE; else dev_p->autoneg = AUTONEG_DISABLE; return 0; } static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct bxe_softc *sc; int rval = 0; bxe_grcdump_t *dump = NULL; int grc_dump_size; bxe_drvinfo_t *drv_infop = NULL; bxe_dev_setting_t *dev_p; bxe_dev_setting_t dev_set; bxe_get_regs_t *reg_p; bxe_reg_rdw_t *reg_rdw_p; bxe_pcicfg_rdw_t *cfg_rdw_p; bxe_perm_mac_addr_t *mac_addr_p; if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) return ENXIO; dump = (bxe_grcdump_t *)data; switch(cmd) { case BXE_GRC_DUMP_SIZE: dump->pci_func = sc->pcie_func; dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); break; case BXE_GRC_DUMP: grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || (dump->grcdump_size < grc_dump_size)) { rval = EINVAL; break; } if((sc->trigger_grcdump) && (!sc->grcdump_done) && (!sc->grcdump_started)) { rval = bxe_grc_dump(sc); } if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { dump->grcdump_dwords = grc_dump_size >> 2; rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_DRV_INFO: drv_infop = (bxe_drvinfo_t *)data; snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", BXE_DRIVER_VERSION); snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", sc->devinfo.bc_ver_str); snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, "%s", sc->fw_ver_str); drv_infop->eeprom_dump_len = sc->devinfo.flash_size; drv_infop->reg_dump_len = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", sc->pcie_bus, sc->pcie_device, sc->pcie_func); break; case BXE_DEV_SETTING: dev_p = (bxe_dev_setting_t *)data; bxe_get_settings(sc, &dev_set); dev_p->supported = dev_set.supported; dev_p->advertising = dev_set.advertising; dev_p->speed = dev_set.speed; dev_p->duplex = dev_set.duplex; dev_p->port = dev_set.port; dev_p->phy_address = dev_set.phy_address; dev_p->autoneg = dev_set.autoneg; break; case BXE_GET_REGS: reg_p = (bxe_get_regs_t *)data; grc_dump_size = reg_p->reg_buf_len; if((!sc->grcdump_done) && (!sc->grcdump_started)) { bxe_grc_dump(sc); } if((sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_RDW_REG: reg_rdw_p = (bxe_reg_rdw_t *)data; if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); break; case BXE_RDW_PCICFG: cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_width); } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, cfg_rdw_p->cfg_width); } else { BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); } break; case BXE_MAC_ADDR: mac_addr_p = (bxe_perm_mac_addr_t *)data; snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", sc->mac_addr_str); break; case BXE_EEPROM: rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); break; default: break; } return (rval); } #ifdef DEBUGNET static void -bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) +bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { struct bxe_softc *sc; sc = if_getsoftc(ifp); BXE_CORE_LOCK(sc); *nrxr = sc->num_queues; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = sc->fp[0].mbuf_alloc_size; BXE_CORE_UNLOCK(sc); } static void -bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) +bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused) { } static int -bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) +bxe_debugnet_transmit(if_t ifp, struct mbuf *m) { struct bxe_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || !sc->link_vars.link_up) return (ENOENT); error = bxe_tx_encap(&sc->fp[0], &m); if (error != 0 && m != NULL) m_freem(m); return (error); } static int -bxe_debugnet_poll(struct ifnet *ifp, int count) +bxe_debugnet_poll(if_t ifp, int count) { struct bxe_softc *sc; int i; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !sc->link_vars.link_up) return (ENOENT); for (i = 0; i < sc->num_queues; i++) (void)bxe_rxeof(sc, &sc->fp[i]); (void)bxe_txeof(sc, &sc->fp[0]); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c index d691f7ccbf01..60d1c9f96d45 100644 --- a/sys/dev/enetc/if_enetc.c +++ b/sys/dev/enetc/if_enetc.c @@ -1,1523 +1,1523 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "miibus_if.h" static device_register_t enetc_register; static ifdi_attach_pre_t enetc_attach_pre; static ifdi_attach_post_t enetc_attach_post; static ifdi_detach_t enetc_detach; static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc; static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc; static ifdi_queues_free_t enetc_queues_free; static ifdi_init_t enetc_init; static ifdi_stop_t enetc_stop; static ifdi_msix_intr_assign_t enetc_msix_intr_assign; static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable; static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable; static ifdi_intr_enable_t enetc_intr_enable; static ifdi_intr_disable_t enetc_intr_disable; static int enetc_isc_txd_encap(void*, if_pkt_info_t); static void enetc_isc_txd_flush(void*, uint16_t, qidx_t); static int enetc_isc_txd_credits_update(void*, uint16_t, bool); static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t); static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t); static void enetc_isc_rxd_refill(void*, if_rxd_update_t); static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t); static void enetc_vlan_register(if_ctx_t, uint16_t); static void enetc_vlan_unregister(if_ctx_t, uint16_t); static uint64_t enetc_get_counter(if_ctx_t, ift_counter); static int enetc_promisc_set(if_ctx_t, int); static int enetc_mtu_set(if_ctx_t, uint32_t); static void enetc_setup_multicast(if_ctx_t); static void enetc_timer(if_ctx_t, uint16_t); static void enetc_update_admin_status(if_ctx_t); static miibus_readreg_t enetc_miibus_readreg; static miibus_writereg_t enetc_miibus_writereg; static miibus_linkchg_t enetc_miibus_linkchg; static miibus_statchg_t enetc_miibus_statchg; static int enetc_media_change(if_t); static void enetc_media_status(if_t, struct ifmediareq*); static int enetc_fixed_media_change(if_t); static void enetc_fixed_media_status(if_t, struct ifmediareq*); static void enetc_max_nqueues(struct enetc_softc*, int*, int*); static int enetc_setup_phy(struct enetc_softc*); static void enetc_get_hwaddr(struct enetc_softc*); static void enetc_set_hwaddr(struct enetc_softc*); static int enetc_setup_rss(struct enetc_softc*); static void enetc_init_hw(struct enetc_softc*); static void enetc_init_ctrl(struct enetc_softc*); static void enetc_init_tx(struct enetc_softc*); static void enetc_init_rx(struct enetc_softc*); static int enetc_ctrl_send(struct enetc_softc*, uint16_t, uint16_t, iflib_dma_info_t); static const char enetc_driver_version[] = "1.0.0"; static pci_vendor_info_t enetc_vendor_info_array[] = { PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF, "Freescale ENETC PCIe Gigabit Ethernet Controller"), PVID_END }; #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER) static device_method_t enetc_methods[] = { DEVMETHOD(device_register, enetc_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD(miibus_readreg, enetc_miibus_readreg), DEVMETHOD(miibus_writereg, enetc_miibus_writereg), DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg), DEVMETHOD(miibus_statchg, enetc_miibus_statchg), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD_END }; static driver_t enetc_driver = { "enetc", enetc_methods, sizeof(struct enetc_softc) }; DRIVER_MODULE(miibus, enetc, miibus_fdt_driver, NULL, NULL); /* Make sure miibus gets procesed first. */ DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, NULL, NULL, SI_ORDER_ANY); MODULE_VERSION(enetc, 1); IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array); MODULE_DEPEND(enetc, ether, 1, 1, 1); MODULE_DEPEND(enetc, iflib, 1, 1, 1); MODULE_DEPEND(enetc, miibus, 1, 1, 1); static device_method_t enetc_iflib_methods[] = { DEVMETHOD(ifdi_attach_pre, enetc_attach_pre), DEVMETHOD(ifdi_attach_post, enetc_attach_post), DEVMETHOD(ifdi_detach, enetc_detach), DEVMETHOD(ifdi_init, enetc_init), DEVMETHOD(ifdi_stop, enetc_stop), DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, enetc_queues_free), DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign), DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_enable, enetc_intr_enable), DEVMETHOD(ifdi_intr_disable, enetc_intr_disable), DEVMETHOD(ifdi_vlan_register, enetc_vlan_register), DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister), DEVMETHOD(ifdi_get_counter, enetc_get_counter), DEVMETHOD(ifdi_mtu_set, enetc_mtu_set), DEVMETHOD(ifdi_multi_set, enetc_setup_multicast), DEVMETHOD(ifdi_promisc_set, enetc_promisc_set), DEVMETHOD(ifdi_timer, enetc_timer), DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status), DEVMETHOD_END }; static driver_t enetc_iflib_driver = { "enetc", enetc_iflib_methods, sizeof(struct enetc_softc) }; static struct if_txrx enetc_txrx = { .ift_txd_encap = enetc_isc_txd_encap, .ift_txd_flush = enetc_isc_txd_flush, .ift_txd_credits_update = enetc_isc_txd_credits_update, .ift_rxd_available = enetc_isc_rxd_available, .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get, .ift_rxd_refill = enetc_isc_rxd_refill, .ift_rxd_flush = enetc_isc_rxd_flush }; static struct if_shared_ctx enetc_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = ENETC_RING_ALIGN, .isc_tx_maxsize = ENETC_MAX_FRAME_LEN, .isc_tx_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = ENETC_MAX_FRAME_LEN, .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN, .isc_rx_nsegments = ENETC_MAX_SCATTER, .isc_admin_intrcnt = 0, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_vendor_info = enetc_vendor_info_array, .isc_driver_version = enetc_driver_version, .isc_driver = &enetc_iflib_driver, .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES, .isc_ntxd_min = {ENETC_MIN_DESC}, .isc_ntxd_max = {ENETC_MAX_DESC}, .isc_ntxd_default = {ENETC_DEFAULT_DESC}, .isc_nrxd_min = {ENETC_MIN_DESC}, .isc_nrxd_max = {ENETC_MAX_DESC}, .isc_nrxd_default = {ENETC_DEFAULT_DESC} }; static void* enetc_register(device_t dev) { if (!ofw_bus_status_okay(dev)) return (NULL); return (&enetc_sctx_init); } static void enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues, int *max_rx_nqueues) { uint32_t val; val = ENETC_PORT_RD4(sc, ENETC_PCAPR0); *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES); *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES); } static int enetc_setup_fixed(struct enetc_softc *sc, phandle_t node) { ssize_t size; int speed; size = OF_getencprop(node, "speed", &speed, sizeof(speed)); if (size <= 0) { device_printf(sc->dev, "Device has fixed-link node without link speed specified\n"); return (ENXIO); } switch (speed) { case 10: speed = IFM_10_T; break; case 100: speed = IFM_100_TX; break; case 1000: speed = IFM_1000_T; break; case 2500: speed = IFM_2500_T; break; default: device_printf(sc->dev, "Unsupported link speed value of %d\n", speed); return (ENXIO); } speed |= IFM_ETHER; if (OF_hasprop(node, "full-duplex")) speed |= IFM_FDX; else speed |= IFM_HDX; sc->fixed_link = true; ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change, enetc_fixed_media_status); ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL); ifmedia_set(&sc->fixed_ifmedia, speed); sc->shared->isc_media = &sc->fixed_ifmedia; return (0); } static int enetc_setup_phy(struct enetc_softc *sc) { phandle_t node, fixed_link, phy_handle; struct mii_data *miid; int phy_addr, error; ssize_t size; node = ofw_bus_get_node(sc->dev); fixed_link = ofw_bus_find_child(node, "fixed-link"); if (fixed_link != 0) return (enetc_setup_fixed(sc, fixed_link)); size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle)); if (size <= 0) { device_printf(sc->dev, "Failed to acquire PHY handle from FDT.\n"); return (ENXIO); } phy_handle = OF_node_from_xref(phy_handle); size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr)); if (size <= 0) { device_printf(sc->dev, "Failed to obtain PHY address\n"); return (ENXIO); } error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx), enetc_media_change, enetc_media_status, BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(sc->dev, "mii_attach failed\n"); return (error); } miid = device_get_softc(sc->miibus); sc->shared->isc_media = &miid->mii_media; return (0); } static int enetc_attach_pre(if_ctx_t ctx) { if_softc_ctx_t scctx; struct enetc_softc *sc; int error, rid; sc = iflib_get_softc(ctx); scctx = iflib_get_softc_ctx(ctx); sc->ctx = ctx; sc->dev = iflib_get_dev(ctx); sc->shared = scctx; mtx_init(&sc->mii_lock, "enetc_mdio", NULL, MTX_DEF); pci_save_state(sc->dev); pcie_flr(sc->dev, 1000, false); pci_restore_state(sc->dev); rid = PCIR_BAR(ENETC_BAR_REGS); sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) { device_printf(sc->dev, "Failed to allocate BAR %d\n", ENETC_BAR_REGS); return (ENXIO); } error = iflib_dma_alloc_align(ctx, ENETC_MIN_DESC * sizeof(struct enetc_cbd), ENETC_RING_ALIGN, &sc->ctrl_queue.dma, 0); if (error != 0) { device_printf(sc->dev, "Failed to allocate control ring\n"); goto fail; } sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr; scctx->isc_txrx = &enetc_txrx; scctx->isc_tx_nsegments = ENETC_MAX_SCATTER; enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max); if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) { device_printf(sc->dev, "The number of TX descriptors has to be a multiple of %d\n", ENETC_DESC_ALIGN); error = EINVAL; goto fail; } if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) { device_printf(sc->dev, "The number of RX descriptors has to be a multiple of %d\n", ENETC_DESC_ALIGN); error = EINVAL; goto fail; } scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd); scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd); scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd); scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd); scctx->isc_tx_csum_flags = 0; scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS; error = enetc_mtu_set(ctx, ETHERMTU); if (error != 0) goto fail; scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); error = enetc_setup_phy(sc); if (error != 0) goto fail; enetc_get_hwaddr(sc); return (0); fail: enetc_detach(ctx); return (error); } static int enetc_attach_post(if_ctx_t ctx) { enetc_init_hw(iflib_get_softc(ctx)); return (0); } static int enetc_detach(if_ctx_t ctx) { struct enetc_softc *sc; int error = 0, i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) iflib_irq_free(ctx, &sc->rx_queues[i].irq); if (sc->miibus != NULL) device_delete_child(sc->dev, sc->miibus); if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); if (sc->ctrl_queue.dma.idi_size != 0) iflib_dma_free(&sc->ctrl_queue.dma); mtx_destroy(&sc->mii_lock); return (error); } static int enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct enetc_softc *sc; struct enetc_tx_queue *queue; int i; sc = iflib_get_softc(ctx); MPASS(ntxqs == 1); sc->tx_queues = mallocarray(sc->tx_num_queues, sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->tx_queues == NULL) { device_printf(sc->dev, "Failed to allocate memory for TX queues.\n"); return (ENOMEM); } for (i = 0; i < sc->tx_num_queues; i++) { queue = &sc->tx_queues[i]; queue->sc = sc; queue->ring = (union enetc_tx_bd*)(vaddrs[i]); queue->ring_paddr = paddrs[i]; queue->cidx = 0; } return (0); } static int enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct enetc_softc *sc; struct enetc_rx_queue *queue; int i; sc = iflib_get_softc(ctx); MPASS(nrxqs == 1); sc->rx_queues = mallocarray(sc->rx_num_queues, sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->rx_queues == NULL) { device_printf(sc->dev, "Failed to allocate memory for RX queues.\n"); return (ENOMEM); } for (i = 0; i < sc->rx_num_queues; i++) { queue = &sc->rx_queues[i]; queue->sc = sc; queue->qid = i; queue->ring = (union enetc_rx_bd*)(vaddrs[i]); queue->ring_paddr = paddrs[i]; } return (0); } static void enetc_queues_free(if_ctx_t ctx) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); if (sc->tx_queues != NULL) { free(sc->tx_queues, M_DEVBUF); sc->tx_queues = NULL; } if (sc->rx_queues != NULL) { free(sc->rx_queues, M_DEVBUF); sc->rx_queues = NULL; } } static void enetc_get_hwaddr(struct enetc_softc *sc) { struct ether_addr hwaddr; uint16_t high; uint32_t low; low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0)); high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0)); memcpy(&hwaddr.octet[0], &low, 4); memcpy(&hwaddr.octet[4], &high, 2); if (ETHER_IS_BROADCAST(hwaddr.octet) || ETHER_IS_MULTICAST(hwaddr.octet) || ETHER_IS_ZERO(hwaddr.octet)) { ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr); device_printf(sc->dev, "Failed to obtain MAC address, using a random one\n"); memcpy(&low, &hwaddr.octet[0], 4); memcpy(&high, &hwaddr.octet[4], 2); } iflib_set_mac(sc->ctx, hwaddr.octet); } static void enetc_set_hwaddr(struct enetc_softc *sc) { - struct ifnet *ifp; + if_t ifp; uint16_t high; uint32_t low; uint8_t *hwaddr; ifp = iflib_get_ifp(sc->ctx); hwaddr = (uint8_t*)if_getlladdr(ifp); low = *((uint32_t*)hwaddr); high = *((uint16_t*)(hwaddr+4)); ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low); ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high); } static int enetc_setup_rss(struct enetc_softc *sc) { struct iflib_dma_info dma; int error, i, buckets_num = 0; uint8_t *rss_table; uint32_t reg; reg = ENETC_RD4(sc, ENETC_SIPCAPR0); if (reg & ENETC_SIPCAPR0_RSS) { reg = ENETC_RD4(sc, ENETC_SIRSSCAPR); buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg); } if (buckets_num == 0) return (ENOTSUP); for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) { arc4rand((uint8_t *)®, sizeof(reg), 0); ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg); } ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues); error = iflib_dma_alloc_align(sc->ctx, buckets_num * sizeof(*rss_table), ENETC_RING_ALIGN, &dma, 0); if (error != 0) { device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n"); return (error); } rss_table = (uint8_t *)dma.idi_vaddr; for (i = 0; i < buckets_num; i++) rss_table[i] = i % sc->rx_num_queues; error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE, buckets_num * sizeof(*rss_table), &dma); if (error != 0) device_printf(sc->dev, "Failed to setup RSS table\n"); iflib_dma_free(&dma); return (error); } static int enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size, iflib_dma_info_t dma) { struct enetc_ctrl_queue *queue; struct enetc_cbd *desc; int timeout = 1000; queue = &sc->ctrl_queue; desc = &queue->ring[queue->pidx]; if (++queue->pidx == ENETC_MIN_DESC) queue->pidx = 0; desc->addr[0] = (uint32_t)dma->idi_paddr; desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32); desc->index = 0; desc->length = (uint16_t)size; desc->cmd = (uint8_t)cmd; desc->cls = (uint8_t)(cmd >> 8); desc->status_flags = 0; /* Sync command packet, */ bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE); /* and the control ring. */ bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE); ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); while (--timeout != 0) { DELAY(20); if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx) break; } if (timeout == 0) return (ETIMEDOUT); bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD); return (0); } static void enetc_init_hw(struct enetc_softc *sc) { uint32_t val; int error; ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues); val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues); val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val); ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD); ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M); ENETC_WR4(sc, ENETC_SICAR0, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI); ENETC_WR4(sc, ENETC_SICAR2, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); enetc_init_ctrl(sc); error = enetc_setup_rss(sc); if (error != 0) ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN); else ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE); } static void enetc_init_ctrl(struct enetc_softc *sc) { struct enetc_ctrl_queue *queue = &sc->ctrl_queue; ENETC_WR4(sc, ENETC_SICBDRBAR0, (uint32_t)queue->dma.idi_paddr); ENETC_WR4(sc, ENETC_SICBDRBAR1, (uint32_t)(queue->dma.idi_paddr >> 32)); ENETC_WR4(sc, ENETC_SICBDRLENR, queue->dma.idi_size / sizeof(struct enetc_cbd)); queue->pidx = 0; ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx); ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN); } static void enetc_init_tx(struct enetc_softc *sc) { struct enetc_tx_queue *queue; int i; for (i = 0; i < sc->tx_num_queues; i++) { queue = &sc->tx_queues[i]; ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0, (uint32_t)queue->ring_paddr); ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1, (uint32_t)(queue->ring_paddr >> 32)); ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size); /* * Even though it is undoccumented resetting the TX ring * indices results in TX hang. * Do the same as Linux and simply keep those unchanged * for the drivers lifetime. */ #if 0 ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0); ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0); #endif ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN); } } static void enetc_init_rx(struct enetc_softc *sc) { struct enetc_rx_queue *queue; uint32_t rx_buf_size; int i; rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx); for (i = 0; i < sc->rx_num_queues; i++) { queue = &sc->rx_queues[i]; ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0, (uint32_t)queue->ring_paddr); ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1, (uint32_t)(queue->ring_paddr >> 32)); ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size); ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size); ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0); ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0); queue->enabled = false; } } static u_int enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint64_t *bitmap = arg; uint64_t address = 0; uint8_t hash = 0; bool bit; int i, j; bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN); /* * The six bit hash is calculated by xoring every * 6th bit of the address. * It is then used as an index in a bitmap that is * written to the device. */ for (i = 0; i < 6; i++) { bit = 0; for (j = 0; j < 8; j++) bit ^= !!(address & BIT(i + j*6)); hash |= bit << i; } *bitmap |= (1 << hash); return (1); } static void enetc_setup_multicast(if_ctx_t ctx) { struct enetc_softc *sc; - struct ifnet *ifp; + if_t ifp; uint64_t bitmap = 0; uint8_t revid; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); revid = pci_get_revid(sc->dev); if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap); /* * In revid 1 of this chip the positions multicast and unicast * hash filter registers are flipped. */ ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32); } static uint8_t enetc_hash_vid(uint16_t vid) { uint8_t hash = 0; bool bit; int i; for (i = 0;i < 6;i++) { bit = vid & BIT(i); bit ^= !!(vid & BIT(i + 6)); hash |= bit << i; } return (hash); } static void enetc_vlan_register(if_ctx_t ctx, uint16_t vid) { struct enetc_softc *sc; uint8_t hash; uint64_t bitmap; sc = iflib_get_softc(ctx); hash = enetc_hash_vid(vid); /* Check if hash is already present in the bitmap. */ if (++sc->vlan_bitmap[hash] != 1) return; bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; bitmap |= BIT(hash); ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); } static void enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid) { struct enetc_softc *sc; uint8_t hash; uint64_t bitmap; sc = iflib_get_softc(ctx); hash = enetc_hash_vid(vid); MPASS(sc->vlan_bitmap[hash] > 0); if (--sc->vlan_bitmap[hash] != 0) return; bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; bitmap &= ~BIT(hash); ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); } static void enetc_init(if_ctx_t ctx) { struct enetc_softc *sc; struct mii_data *miid; - struct ifnet *ifp; + if_t ifp; uint16_t max_frame_length; int baudrate; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); max_frame_length = sc->shared->isc_max_frame_size; MPASS(max_frame_length < ENETC_MAX_FRAME_LEN); /* Set max RX and TX frame lengths. */ ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length); ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length); ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length); /* Set "VLAN promiscious" mode if filtering is disabled. */ if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1)); else ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); sc->rbmr = ENETC_RBMR_EN; if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) sc->rbmr |= ENETC_RBMR_VTE; /* Write MAC address to hardware. */ enetc_set_hwaddr(sc); enetc_init_tx(sc); enetc_init_rx(sc); if (sc->fixed_link) { baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media); iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); } else { /* * Can't return an error from this function, there is not much * we can do if this fails. */ miid = device_get_softc(sc->miibus); (void)mii_mediachg(miid); } enetc_promisc_set(ctx, if_getflags(ifp)); } static void enetc_disable_txq(struct enetc_softc *sc, int qid) { qidx_t cidx, pidx; int timeout = 10000; /* this * DELAY(100) = 1s */ /* At this point iflib shouldn't be enquing any more frames. */ pidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBPIR); cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); while (pidx != cidx && timeout--) { DELAY(100); cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); } if (timeout == 0) device_printf(sc->dev, "Timeout while waiting for txq%d to stop transmitting packets\n", qid); ENETC_TXQ_WR4(sc, qid, ENETC_TBMR, 0); } static void enetc_stop(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0); for (i = 0; i < sc->tx_num_queues; i++) enetc_disable_txq(sc, i); } static int enetc_msix_intr_assign(if_ctx_t ctx, int msix) { struct enetc_softc *sc; struct enetc_rx_queue *rx_queue; struct enetc_tx_queue *tx_queue; int vector = 0, i, error; char irq_name[16]; sc = iflib_get_softc(ctx); MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT); MPASS(sc->rx_num_queues == sc->tx_num_queues); for (i = 0; i < sc->rx_num_queues; i++, vector++) { rx_queue = &sc->rx_queues[i]; snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX, NULL, rx_queue, i, irq_name); if (error != 0) goto fail; ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector); ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR); ENETC_RXQ_WR4(sc, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR)); } vector = 0; for (i = 0;i < sc->tx_num_queues; i++, vector++) { tx_queue = &sc->tx_queues[i]; snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, &tx_queue->irq, IFLIB_INTR_TX, tx_queue, i, irq_name); ENETC_WR4(sc, ENETC_SIMSITRV(i), vector); } return (0); fail: for (i = 0; i < sc->rx_num_queues; i++) { rx_queue = &sc->rx_queues[i]; iflib_irq_free(ctx, &rx_queue->irq); } return (error); } static int enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR); return (0); } static int enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR); return (0); } static void enetc_intr_enable(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE); for (i = 0; i < sc->tx_num_queues; i++) ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF); } static void enetc_intr_disable(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0); for (i = 0; i < sc->tx_num_queues; i++) ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0); } static int enetc_isc_txd_encap(void *data, if_pkt_info_t ipi) { struct enetc_softc *sc = data; struct enetc_tx_queue *queue; union enetc_tx_bd *desc; bus_dma_segment_t *segs; qidx_t pidx, queue_len; qidx_t i = 0; queue = &sc->tx_queues[ipi->ipi_qsidx]; segs = ipi->ipi_segs; pidx = ipi->ipi_pidx; queue_len = sc->tx_queue_size; /* * First descriptor is special. We use it to set frame * related information and offloads, e.g. VLAN tag. */ desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->frm_len = ipi->ipi_len; desc->addr = segs[i].ds_addr; desc->buf_len = segs[i].ds_len; if (ipi->ipi_flags & IPI_TX_INTR) desc->flags = ENETC_TXBD_FLAGS_FI; i++; if (++pidx == queue_len) pidx = 0; if (ipi->ipi_mflags & M_VLANTAG) { /* VLAN tag is inserted in a separate descriptor. */ desc->flags |= ENETC_TXBD_FLAGS_EX; desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->ext.vid = ipi->ipi_vtag; desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; if (++pidx == queue_len) pidx = 0; } /* Now add remaining descriptors. */ for (;i < ipi->ipi_nsegs; i++) { desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->addr = segs[i].ds_addr; desc->buf_len = segs[i].ds_len; if (++pidx == queue_len) pidx = 0; } desc->flags |= ENETC_TXBD_FLAGS_F; ipi->ipi_new_pidx = pidx; return (0); } static void enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx) { struct enetc_softc *sc = data; ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx); } static int enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear) { struct enetc_softc *sc = data; struct enetc_tx_queue *queue; int cidx, hw_cidx, count; queue = &sc->tx_queues[qid]; hw_cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK; cidx = queue->cidx; /* * RM states that the ring can hold at most ring_size - 1 descriptors. * Thanks to that we can assume that the ring is empty if cidx == pidx. * This requirement is guaranteed implicitly by iflib as it will only * encap a new frame if we have at least nfrags + 2 descriptors available * on the ring. This driver uses at most one additional descriptor for * VLAN tag insertion. * Also RM states that the TBCIR register is only updated once all * descriptors in the chain have been processed. */ if (cidx == hw_cidx) return (0); if (!clear) return (1); count = hw_cidx - cidx; if (count < 0) count += sc->tx_queue_size; queue->cidx = hw_cidx; return (count); } static int enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; qidx_t hw_pidx, queue_len; union enetc_rx_bd *desc; int count = 0; queue = &sc->rx_queues[qid]; desc = &queue->ring[pidx]; queue_len = sc->rx_queue_size; if (desc->r.lstatus == 0) return (0); if (budget == 1) return (1); hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR); while (pidx != hw_pidx && count < budget) { desc = &queue->ring[pidx]; if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) count++; if (++pidx == queue_len) pidx = 0; } return (count); } static int enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; union enetc_rx_bd *desc; uint16_t buf_len, pkt_size = 0; qidx_t cidx, queue_len; uint32_t status; int i; cidx = ri->iri_cidx; queue = &sc->rx_queues[ri->iri_qsidx]; desc = &queue->ring[cidx]; status = desc->r.lstatus; queue_len = sc->rx_queue_size; /* * Ready bit will be set only when all descriptors * in the chain have been processed. */ if ((status & ENETC_RXBD_LSTATUS_R) == 0) return (EAGAIN); /* Pass RSS hash. */ if (status & ENETC_RXBD_FLAG_RSSV) { ri->iri_flowid = desc->r.rss_hash; ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; } /* Pass IP checksum status. */ ri->iri_csum_flags = CSUM_IP_CHECKED; if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0) ri->iri_csum_flags |= CSUM_IP_VALID; /* Pass extracted VLAN tag. */ if (status & ENETC_RXBD_FLAG_VLAN) { ri->iri_vtag = desc->r.vlan_opt; ri->iri_flags = M_VLANTAG; } for (i = 0; i < ENETC_MAX_SCATTER; i++) { buf_len = desc->r.buf_len; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = buf_len; pkt_size += buf_len; if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) break; if (++cidx == queue_len) cidx = 0; desc = &queue->ring[cidx]; } ri->iri_nfrags = i + 1; ri->iri_len = pkt_size; MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F); if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)) return (EBADMSG); return (0); } static void enetc_isc_rxd_refill(void *data, if_rxd_update_t iru) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; union enetc_rx_bd *desc; qidx_t pidx, queue_len; uint64_t *paddrs; int i, count; queue = &sc->rx_queues[iru->iru_qsidx]; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; queue_len = sc->rx_queue_size; for (i = 0; i < count; i++) { desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->w.addr = paddrs[i]; if (++pidx == queue_len) pidx = 0; } /* * After enabling the queue NIC will prefetch the first * 8 descriptors. It probably assumes that the RX is fully * refilled when cidx == pidx. * Enable it only if we have enough descriptors ready on the ring. */ if (!queue->enabled && pidx >= 8) { ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr); queue->enabled = true; } } static void enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx) { struct enetc_softc *sc = data; ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx); } static uint64_t enetc_get_counter(if_ctx_t ctx, ift_counter cnt) { struct enetc_softc *sc; - struct ifnet *ifp; + if_t ifp; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IERRORS: return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR)); case IFCOUNTER_OERRORS: return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR)); default: return (if_get_counter_default(ifp, cnt)); } } static int enetc_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct enetc_softc *sc = iflib_get_softc(ctx); uint32_t max_frame_size; max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + sizeof(struct ether_vlan_header); if (max_frame_size > ENETC_MAX_FRAME_LEN) return (EINVAL); sc->shared->isc_max_frame_size = max_frame_size; return (0); } static int enetc_promisc_set(if_ctx_t ctx, int flags) { struct enetc_softc *sc; uint32_t reg = 0; sc = iflib_get_softc(ctx); if (flags & IFF_PROMISC) reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); else if (flags & IFF_ALLMULTI) reg = ENETC_PSIPMR_SET_MP(0); ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg); return (0); } static void enetc_timer(if_ctx_t ctx, uint16_t qid) { /* * Poll PHY status. Do this only for qid 0 to save * some cycles. */ if (qid == 0) iflib_admin_intr_deferred(ctx); } static void enetc_update_admin_status(if_ctx_t ctx) { struct enetc_softc *sc; struct mii_data *miid; sc = iflib_get_softc(ctx); if (!sc->fixed_link) { miid = device_get_softc(sc->miibus); mii_tick(miid); } } static int enetc_miibus_readreg(device_t dev, int phy, int reg) { struct enetc_softc *sc; int val; sc = iflib_get_softc(device_get_softc(dev)); mtx_lock(&sc->mii_lock); val = enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, phy, reg); mtx_unlock(&sc->mii_lock); return (val); } static int enetc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct enetc_softc *sc; int ret; sc = iflib_get_softc(device_get_softc(dev)); mtx_lock(&sc->mii_lock); ret = enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, phy, reg, data); mtx_unlock(&sc->mii_lock); return (ret); } static void enetc_miibus_linkchg(device_t dev) { enetc_miibus_statchg(dev); } static void enetc_miibus_statchg(device_t dev) { struct enetc_softc *sc; struct mii_data *miid; int link_state, baudrate; sc = iflib_get_softc(device_get_softc(dev)); miid = device_get_softc(sc->miibus); baudrate = ifmedia_baudrate(miid->mii_media_active); if (miid->mii_media_status & IFM_AVALID) { if (miid->mii_media_status & IFM_ACTIVE) link_state = LINK_STATE_UP; else link_state = LINK_STATE_DOWN; } else { link_state = LINK_STATE_UNKNOWN; } iflib_link_state_change(sc->ctx, link_state, baudrate); } static int enetc_media_change(if_t ifp) { struct enetc_softc *sc; struct mii_data *miid; - sc = iflib_get_softc(ifp->if_softc); + sc = iflib_get_softc(if_getsoftc(ifp)); miid = device_get_softc(sc->miibus); mii_mediachg(miid); return (0); } static void enetc_media_status(if_t ifp, struct ifmediareq* ifmr) { struct enetc_softc *sc; struct mii_data *miid; - sc = iflib_get_softc(ifp->if_softc); + sc = iflib_get_softc(if_getsoftc(ifp)); miid = device_get_softc(sc->miibus); mii_pollstat(miid); ifmr->ifm_active = miid->mii_media_active; ifmr->ifm_status = miid->mii_media_status; } static int enetc_fixed_media_change(if_t ifp) { if_printf(ifp, "Can't change media in fixed-link mode.\n"); return (0); } static void enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) { struct enetc_softc *sc; - sc = iflib_get_softc(ifp->if_softc); + sc = iflib_get_softc(if_getsoftc(ifp)); ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; return; } diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c index a98f0a78f474..60f796010232 100644 --- a/sys/dev/igc/if_igc.c +++ b/sys/dev/igc/if_igc.c @@ -1,2933 +1,2933 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016 Nicole Graziano * All rights reserved. * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "if_igc.h" #include #include #ifdef RSS #include #include #endif /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last entry must be all 0s * * { Vendor ID, Device ID, String } *********************************************************************/ static pci_vendor_info_t igc_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection - igc */ PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"), PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"), PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"), PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"), PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"), PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"), PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"), PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"), PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"), PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"), PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"), PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"), PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"), PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"), PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"), PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"), /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ static void *igc_register(device_t dev); static int igc_if_attach_pre(if_ctx_t ctx); static int igc_if_attach_post(if_ctx_t ctx); static int igc_if_detach(if_ctx_t ctx); static int igc_if_shutdown(if_ctx_t ctx); static int igc_if_suspend(if_ctx_t ctx); static int igc_if_resume(if_ctx_t ctx); static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void igc_if_queues_free(if_ctx_t ctx); static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); static void igc_if_init(if_ctx_t ctx); static void igc_if_stop(if_ctx_t ctx); static void igc_if_media_status(if_ctx_t, struct ifmediareq *); static int igc_if_media_change(if_ctx_t ctx); static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void igc_if_timer(if_ctx_t ctx, uint16_t qid); static void igc_if_watchdog_reset(if_ctx_t ctx); static bool igc_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); static void igc_identify_hardware(if_ctx_t ctx); static int igc_allocate_pci_resources(if_ctx_t ctx); static void igc_free_pci_resources(if_ctx_t ctx); static void igc_reset(if_ctx_t ctx); static int igc_setup_interface(if_ctx_t ctx); static int igc_setup_msix(if_ctx_t ctx); static void igc_initialize_transmit_unit(if_ctx_t ctx); static void igc_initialize_receive_unit(if_ctx_t ctx); static void igc_if_intr_enable(if_ctx_t ctx); static void igc_if_intr_disable(if_ctx_t ctx); static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static void igc_if_multi_set(if_ctx_t ctx); static void igc_if_update_admin_status(if_ctx_t ctx); static void igc_if_debug(if_ctx_t ctx); static void igc_update_stats_counters(struct igc_adapter *); static void igc_add_hw_stats(struct igc_adapter *adapter); static int igc_if_set_promisc(if_ctx_t ctx, int flags); static void igc_setup_vlan_hw_support(if_ctx_t ctx); static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void igc_print_nvm_info(struct igc_adapter *); static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int igc_get_rs(SYSCTL_HANDLER_ARGS); static void igc_print_debug_info(struct igc_adapter *); static int igc_is_valid_ether_addr(u8 *); static int igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void igc_add_int_delay_sysctl(struct igc_adapter *, const char *, const char *, struct igc_int_delay_info *, int, int); /* Management and WOL Support */ static void igc_get_hw_control(struct igc_adapter *); static void igc_release_hw_control(struct igc_adapter *); static void igc_get_wakeup(if_ctx_t ctx); static void igc_enable_wakeup(if_ctx_t ctx); int igc_intr(void *arg); /* MSI-X handlers */ static int igc_if_msix_intr_assign(if_ctx_t, int); static int igc_msix_link(void *); static void igc_handle_link(void *context); static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); static int igc_get_regs(SYSCTL_HANDLER_ARGS); static void igc_configure_queues(struct igc_adapter *adapter); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t igc_methods[] = { /* Device interface */ DEVMETHOD(device_register, igc_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t igc_driver = { "igc", igc_methods, sizeof(struct igc_adapter), }; DRIVER_MODULE(igc, pci, igc_driver, 0, 0); MODULE_DEPEND(igc, pci, 1, 1, 1); MODULE_DEPEND(igc, ether, 1, 1, 1); MODULE_DEPEND(igc, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); static device_method_t igc_if_methods[] = { DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), DEVMETHOD(ifdi_attach_post, igc_if_attach_post), DEVMETHOD(ifdi_detach, igc_if_detach), DEVMETHOD(ifdi_shutdown, igc_if_shutdown), DEVMETHOD(ifdi_suspend, igc_if_suspend), DEVMETHOD(ifdi_resume, igc_if_resume), DEVMETHOD(ifdi_init, igc_if_init), DEVMETHOD(ifdi_stop, igc_if_stop), DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, igc_if_queues_free), DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), DEVMETHOD(ifdi_multi_set, igc_if_multi_set), DEVMETHOD(ifdi_media_status, igc_if_media_status), DEVMETHOD(ifdi_media_change, igc_if_media_change), DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), DEVMETHOD(ifdi_timer, igc_if_timer), DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), DEVMETHOD(ifdi_get_counter, igc_if_get_counter), DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), DEVMETHOD(ifdi_debug, igc_if_debug), DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), DEVMETHOD_END }; static driver_t igc_if_driver = { "igc_if", igc_if_methods, sizeof(struct igc_adapter) }; /********************************************************************* * Tunable default values. *********************************************************************/ #define IGC_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define IGC_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "igc driver parameters"); static int igc_disable_crc_stripping = 0; SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); static int igc_tx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TIDV_VAL); static int igc_rx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RDTR_VAL); SYSCTL_INT(_hw_igc, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &igc_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs"); SYSCTL_INT(_hw_igc, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &igc_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs"); static int igc_tx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TADV_VAL); static int igc_rx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RADV_VAL); SYSCTL_INT(_hw_igc, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, &igc_tx_abs_int_delay_dflt, 0, "Default transmit interrupt delay limit in usecs"); SYSCTL_INT(_hw_igc, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN, &igc_rx_abs_int_delay_dflt, 0, "Default receive interrupt delay limit in usecs"); static int igc_smart_pwr_down = false; SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int igc_debug_sbp = true; SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, "Show bad packets in promiscuous mode"); /* How many packets rxeof tries to clean at a time */ static int igc_rx_process_limit = 100; SYSCTL_INT(_hw_igc, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &igc_rx_process_limit, 0, "Maximum number of received packets to process " "at a time, -1 means unlimited"); /* Energy efficient ethernet - default to OFF */ static int igc_eee_setting = 1; SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, "Enable Energy Efficient Ethernet"); /* ** Tuneable Interrupt rate */ static int igc_max_interrupt_rate = 8000; SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); extern struct if_txrx igc_txrx; static struct if_shared_ctx igc_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM9BYTES, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = igc_vendor_info_array, .isc_driver_version = "1", .isc_driver = &igc_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, .isc_nrxd_min = {IGC_MIN_RXD}, .isc_ntxd_min = {IGC_MIN_TXD}, .isc_nrxd_max = {IGC_MAX_RXD}, .isc_ntxd_max = {IGC_MAX_TXD}, .isc_nrxd_default = {IGC_DEFAULT_RXD}, .isc_ntxd_default = {IGC_DEFAULT_TXD}, }; /***************************************************************** * * Dump Registers * ****************************************************************/ #define IGC_REGS_LEN 739 static int igc_get_regs(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *)arg1; struct igc_hw *hw = &adapter->hw; struct sbuf *sb; u32 *regs_buff; int rc; regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) { free(regs_buff, M_DEVBUF); return (rc); } sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); MPASS(sb != NULL); if (sb == NULL) { free(regs_buff, M_DEVBUF); return (ENOMEM); } /* General Registers */ regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); sbuf_printf(sb, "General Registers\n"); sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); sbuf_printf(sb, "Interrupt Registers\n"); sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); sbuf_printf(sb, "RX Registers\n"); sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); sbuf_printf(sb, "TX Registers\n"); sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); free(regs_buff, M_DEVBUF); #ifdef DUMP_DESCS { if_softc_ctx_t scctx = adapter->shared; struct rx_ring *rxr = &rx_que->rxr; struct tx_ring *txr = &tx_que->txr; int ntxd = scctx->isc_ntxd[0]; int nrxd = scctx->isc_nrxd[0]; int j; for (j = 0; j < nrxd; j++) { u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); u32 length = le32toh(rxr->rx_base[j].wb.upper.length); sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); } for (j = 0; j < min(ntxd, 256); j++) { unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0); } } #endif rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } static void * igc_register(device_t dev) { return (&igc_sctx_init); } static int igc_set_num_queues(if_ctx_t ctx) { int maxqueues; maxqueues = 4; return (maxqueues); } #define IGC_CAPS \ IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int igc_if_attach_pre(if_ctx_t ctx) { struct igc_adapter *adapter; if_softc_ctx_t scctx; device_t dev; struct igc_hw *hw; int error = 0; INIT_DEBUGOUT("igc_if_attach_pre: begin"); dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); adapter->ctx = adapter->osdep.ctx = ctx; adapter->dev = adapter->osdep.dev = dev; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; adapter->tx_process_limit = scctx->isc_ntxd[0]; /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_nvm_info, "I", "NVM Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_set_flowcntl, "I", "Flow Control"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, igc_get_regs, "A", "Dump Registers"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rs_dump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_get_rs, "I", "Dump RS indexes"); /* Determine hardware and mac info */ igc_identify_hardware(ctx); scctx->isc_tx_nsegments = IGC_MAX_SCATTER; scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); if (bootverbose) device_printf(dev, "attach_pre capping queues at %d\n", scctx->isc_ntxqsets_max); scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); scctx->isc_txrx = &igc_txrx; scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; /* ** Some new devices, as with ixgbe, now may ** use a different BAR, so we need to keep ** track of which is used. */ scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) scctx->isc_msix_bar += 4; /* Setup PCI resources */ if (igc_allocate_pci_resources(ctx)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* Do Shared Code initialization */ error = igc_setup_init_funcs(hw, true); if (error) { device_printf(dev, "Setup of Shared code failed, error %d\n", error); error = ENXIO; goto err_pci; } igc_setup_msix(ctx); igc_get_bus_info(hw); /* Set up some sysctls for the tunable interrupt delays */ igc_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, IGC_REGISTER(hw, IGC_RDTR), igc_rx_int_delay_dflt); igc_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, IGC_REGISTER(hw, IGC_TIDV), igc_tx_int_delay_dflt); igc_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, IGC_REGISTER(hw, IGC_RADV), igc_rx_abs_int_delay_dflt); igc_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, IGC_REGISTER(hw, IGC_TADV), igc_tx_abs_int_delay_dflt); igc_add_int_delay_sysctl(adapter, "itr", "interrupt delay limit in usecs/4", &adapter->tx_itr, IGC_REGISTER(hw, IGC_ITR), DEFAULT_ITR); hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = false; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (hw->phy.media_type == igc_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; } /* * Set the frame limits assuming * standard ethernet sized frames. */ scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (igc_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ adapter->hw.dev_spec._i225.eee_disable = igc_eee_setting; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_control", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ igc_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (igc_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (igc_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (igc_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!igc_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* * Get Wake-on-Lan and Management info for later use */ igc_get_wakeup(ctx); /* Enable only WOL MAGIC by default */ scctx->isc_capenable &= ~IFCAP_WOL; if (adapter->wol != 0) scctx->isc_capenable |= IFCAP_WOL_MAGIC; iflib_set_mac(ctx, hw->mac.addr); return (0); err_late: igc_release_hw_control(adapter); err_pci: igc_free_pci_resources(ctx); free(adapter->mta, M_DEVBUF); return (error); } static int igc_if_attach_post(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; int error = 0; /* Setup OS specific network interface */ error = igc_setup_interface(ctx); if (error != 0) { goto err_late; } igc_reset(ctx); /* Initialize statistics */ igc_update_stats_counters(adapter); hw->mac.get_link_status = true; igc_if_update_admin_status(ctx); igc_add_hw_stats(adapter); /* the driver can now take control from firmware */ igc_get_hw_control(adapter); INIT_DEBUGOUT("igc_if_attach_post: end"); return (error); err_late: igc_release_hw_control(adapter); igc_free_pci_resources(ctx); igc_if_queues_free(ctx); free(adapter->mta, M_DEVBUF); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int igc_if_detach(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_detach: begin"); igc_phy_hw_reset(&adapter->hw); igc_release_hw_control(adapter); igc_free_pci_resources(ctx); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int igc_if_shutdown(if_ctx_t ctx) { return igc_if_suspend(ctx); } /* * Suspend/resume device methods. */ static int igc_if_suspend(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); igc_release_hw_control(adapter); igc_enable_wakeup(ctx); return (0); } static int igc_if_resume(if_ctx_t ctx) { igc_if_init(ctx); return(0); } static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { int max_frame_size; struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); /* 9K Jumbo Frame size */ max_frame_size = 9234; if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { return (EINVAL); } scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; return (0); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * **********************************************************************/ static void igc_if_init(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); struct igc_tx_queue *tx_que; int i; INIT_DEBUGOUT("igc_if_init: begin"); /* Get the latest mac address, User can use a LAA */ bcopy(if_getlladdr(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ igc_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Initialize the hardware */ igc_reset(ctx); igc_if_update_admin_status(ctx); for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; txr->tx_rs_cidx = txr->tx_rs_pidx; /* Initialize the last processed descriptor to be the end of * the ring, rather than the start, so that we avoid an * off-by-one error when calculating how many descriptors are * done in the credits_update function. */ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; } /* Setup VLAN support, basic and offload if available */ IGC_WRITE_REG(&adapter->hw, IGC_VET, ETHERTYPE_VLAN); /* Prepare transmit descriptors and buffers */ igc_initialize_transmit_unit(ctx); /* Setup Multicast table */ igc_if_multi_set(ctx); adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); igc_initialize_receive_unit(ctx); /* Set up VLAN support */ igc_setup_vlan_hw_support(ctx); /* Don't lose promiscuous settings */ igc_if_set_promisc(ctx, if_getflags(ifp)); igc_clear_hw_cntrs_base_generic(&adapter->hw); if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ igc_configure_queues(adapter); /* this clears any pending interrupts */ IGC_READ_REG(&adapter->hw, IGC_ICR); IGC_WRITE_REG(&adapter->hw, IGC_ICS, IGC_ICS_LSC); /* the driver can now take control from firmware */ igc_get_hw_control(adapter); /* Set Energy Efficient Ethernet */ igc_set_eee_i225(&adapter->hw, true, true, true); } /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ int igc_intr(void *arg) { struct igc_adapter *adapter = arg; if_ctx_t ctx = adapter->ctx; u32 reg_icr; reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Only MSI-X interrupts have one-shot behavior by taking advantage * of the EIAC register. Thus, explicitly disable interrupts. This * also works around the MSI message reordering errata on certain * systems. */ IFDI_INTR_DISABLE(ctx); /* Link status change */ if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) igc_handle_link(ctx); if (reg_icr & IGC_ICR_RXO) adapter->rx_overruns++; return (FILTER_SCHEDULE_THREAD); } static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *rxq = &adapter->rx_queues[rxqid]; IGC_WRITE_REG(&adapter->hw, IGC_EIMS, rxq->eims); return (0); } static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_tx_queue *txq = &adapter->tx_queues[txqid]; IGC_WRITE_REG(&adapter->hw, IGC_EIMS, txq->eims); return (0); } /********************************************************************* * * MSI-X RX Interrupt Service routine * **********************************************************************/ static int igc_msix_que(void *arg) { struct igc_rx_queue *que = arg; ++que->irqs; return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * MSI-X Link Fast Interrupt Service routine * **********************************************************************/ static int igc_msix_link(void *arg) { struct igc_adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; MPASS(adapter->hw.back != NULL); reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR); if (reg_icr & IGC_ICR_RXO) adapter->rx_overruns++; if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { igc_handle_link(adapter->ctx); } IGC_WRITE_REG(&adapter->hw, IGC_IMS, IGC_IMS_LSC); IGC_WRITE_REG(&adapter->hw, IGC_EIMS, adapter->link_mask); return (FILTER_HANDLED); } static void igc_handle_link(void *context) { if_ctx_t ctx = context; struct igc_adapter *adapter = iflib_get_softc(ctx); adapter->hw.mac.get_link_status = true; iflib_admin_intr_deferred(ctx); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_media_status: begin"); iflib_admin_intr_deferred(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { return; } ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; case 2500: ifmr->ifm_active |= IFM_2500_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int igc_if_media_change(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("igc_if_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); adapter->hw.mac.autoneg = DO_AUTO_NEG; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_2500_T: adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; break; case IFM_1000_T: adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; else adapter->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; break; case IFM_10_T: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; else adapter->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } igc_if_init(ctx); return (0); } static int igc_if_set_promisc(if_ctx_t ctx, int flags) { struct igc_adapter *adapter = iflib_get_softc(ctx); - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); u32 reg_rctl; int mcnt = 0; reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); if (flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg_rctl &= (~IGC_RCTL_MPE); IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); if (flags & IFF_PROMISC) { reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (igc_debug_sbp) reg_rctl |= IGC_RCTL_SBP; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } else if (flags & IFF_ALLMULTI) { reg_rctl |= IGC_RCTL_MPE; reg_rctl &= ~IGC_RCTL_UPE; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } return (0); } static u_int igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) { u8 *mta = arg; if (idx == MAX_NUM_MULTICAST_ADDRESSES) return (0); bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); return (1); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void igc_if_multi_set(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); u8 *mta; /* Multicast array memory */ u32 reg_rctl = 0; int mcnt = 0; IOCTL_DEBUGOUT("igc_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); if (if_getflags(ifp) & IFF_PROMISC) { reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (igc_debug_sbp) reg_rctl |= IGC_RCTL_SBP; } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || if_getflags(ifp) & IFF_ALLMULTI) { reg_rctl |= IGC_RCTL_MPE; reg_rctl &= ~IGC_RCTL_UPE; } else reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) igc_update_mc_addr_list(&adapter->hw, mta, mcnt); IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } /********************************************************************* * Timer routine * * This routine schedules igc_if_update_admin_status() to check for * link status and to gather statistics as well as to perform some * controller-specific hardware patting. * **********************************************************************/ static void igc_if_timer(if_ctx_t ctx, uint16_t qid) { if (qid != 0) return; iflib_admin_intr_deferred(ctx); } static void igc_if_update_admin_status(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); u32 link_check, thstat, ctrl; link_check = thstat = ctrl = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case igc_media_type_copper: if (hw->mac.get_link_status == true) { /* Do the work to read phy */ igc_check_for_link(hw); link_check = !hw->mac.get_link_status; } else link_check = true; break; case igc_media_type_unknown: igc_check_for_link(hw); link_check = !hw->mac.get_link_status; /* FALLTHROUGH */ default: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { igc_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(adapter->link_speed)); } else if (!link_check && (adapter->link_active == 1)) { adapter->link_speed = 0; adapter->link_duplex = 0; adapter->link_active = 0; iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); } igc_update_stats_counters(adapter); } static void igc_if_watchdog_reset(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); /* * Just count the event; iflib(4) will already trigger a * sufficient reset of the controller. */ adapter->watchdog_events++; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC. * **********************************************************************/ static void igc_if_stop(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_stop: begin"); igc_reset_hw(&adapter->hw); IGC_WRITE_REG(&adapter->hw, IGC_WUC, 0); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void igc_identify_hardware(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (igc_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int igc_allocate_pci_resources(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Set up the MSI-X Interrupt handlers * **********************************************************************/ static int igc_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *rx_que = adapter->rx_queues; struct igc_tx_queue *tx_que = adapter->tx_queues; int error, rid, i, vector = 0, rx_vectors; char buf[16]; /* First set up ring resources */ for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->rx_num_queues = i + 1; goto fail; } rx_que->msix = vector; /* * Set the bit to enable interrupt * in IGC_IMS -- bits 20 and 21 * are for RX0 and RX1, note this has * NOTHING to do with the MSI-X vector */ rx_que->eims = 1 << vector; } rx_vectors = vector; vector = 0; for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) { snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; iflib_softirq_alloc_generic(ctx, &adapter->rx_queues[i % adapter->rx_num_queues].que_irq, IFLIB_INTR_TX, tx_que, tx_que->me, buf); tx_que->msix = (vector % adapter->rx_num_queues); /* * Set the bit to enable interrupt * in IGC_IMS -- bits 22 and 23 * are for TX0 and TX1, note this has * NOTHING to do with the MSI-X vector */ tx_que->eims = 1 << i; } /* Link interrupt */ rid = rx_vectors + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); goto fail; } adapter->linkvec = rx_vectors; return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } static void igc_configure_queues(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; struct igc_rx_queue *rx_que; struct igc_tx_queue *tx_que; u32 ivar = 0, newitr = 0; /* First turn on RSS capability */ IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | IGC_GPIE_NSICR); /* Turn on MSI-X */ /* RX entries */ for (int i = 0; i < adapter->rx_num_queues; i++) { u32 index = i >> 1; ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); rx_que = &adapter->rx_queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= rx_que->msix | IGC_IVAR_VALID; } IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->tx_num_queues; i++) { u32 index = i >> 1; ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); tx_que = &adapter->tx_queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; } IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); adapter->que_mask |= tx_que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | IGC_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); /* Set the starting interrupt rate */ if (igc_max_interrupt_rate > 0) newitr = (4000000 / igc_max_interrupt_rate) & 0x7FFC; newitr |= IGC_EITR_CNT_IGNR; for (int i = 0; i < adapter->rx_num_queues; i++) { rx_que = &adapter->rx_queues[i]; IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); } return; } static void igc_free_pci_resources(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all MSI-X queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); for (int i = 0; i < adapter->rx_num_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } if (adapter->memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->memory), adapter->memory); adapter->memory = NULL; } if (adapter->flash != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->flash), adapter->flash); adapter->flash = NULL; } if (adapter->ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rman_get_rid(adapter->ioport), adapter->ioport); adapter->ioport = NULL; } } /* Set up MSI or MSI-X */ static int igc_setup_msix(if_ctx_t ctx) { return (0); } /********************************************************************* * * Initialize the DMA Coalescing feature * **********************************************************************/ static void igc_init_dmac(struct igc_adapter *adapter, u32 pba) { device_t dev = adapter->dev; struct igc_hw *hw = &adapter->hw; u32 dmac, reg = ~IGC_DMACR_DMAC_EN; u16 hwm; u16 max_frame_size; int status; max_frame_size = adapter->shared->isc_max_frame_size; if (adapter->dmac == 0) { /* Disabling it */ IGC_WRITE_REG(hw, IGC_DMACR, reg); return; } else device_printf(dev, "DMA Coalescing enabled\n"); /* Set starting threshold */ IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); hwm = 64 * pba - max_frame_size / 16; if (hwm < 64 * (pba - 6)) hwm = 64 * (pba - 6); reg = IGC_READ_REG(hw, IGC_FCRTC); reg &= ~IGC_FCRTC_RTH_COAL_MASK; reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) & IGC_FCRTC_RTH_COAL_MASK); IGC_WRITE_REG(hw, IGC_FCRTC, reg); dmac = pba - max_frame_size / 512; if (dmac < pba - 10) dmac = pba - 10; reg = IGC_READ_REG(hw, IGC_DMACR); reg &= ~IGC_DMACR_DMACTHR_MASK; reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) & IGC_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); /* Check if status is 2.5Gb backplane connection * before configuration of watchdog timer, which is * in msec values in 12.8usec intervals * watchdog timer= msec values in 32usec intervals * for non 2.5Gb connection */ status = IGC_READ_REG(hw, IGC_STATUS); if ((status & IGC_STATUS_2P5_SKU) && (!(status & IGC_STATUS_2P5_SKU_OVER))) reg |= ((adapter->dmac * 5) >> 6); else reg |= (adapter->dmac >> 5); IGC_WRITE_REG(hw, IGC_DMACR, reg); IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); /* Set the interval before transition */ reg = IGC_READ_REG(hw, IGC_DMCTLX); reg |= IGC_DMCTLX_DCFLUSH_DIS; /* ** in 2.5Gb connection, TTLX unit is 0.4 usec ** which is 0x4*2 = 0xA. But delay is still 4 usec */ status = IGC_READ_REG(hw, IGC_STATUS); if ((status & IGC_STATUS_2P5_SKU) && (!(status & IGC_STATUS_2P5_SKU_OVER))) reg |= 0xA; else reg |= 0x4; IGC_WRITE_REG(hw, IGC_DMCTLX, reg); /* free space in tx packet buffer to wake from DMA coal */ IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - (2 * max_frame_size)) >> 6); /* make low power state decision controlled by DMA coal */ reg = IGC_READ_REG(hw, IGC_PCIEMISC); reg &= ~IGC_PCIEMISC_LX_DECISION; IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. * **********************************************************************/ static void igc_reset(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; u32 rx_buffer_size; u32 pba; INIT_DEBUGOUT("igc_reset: begin"); /* Let the firmware know the OS is in control */ igc_get_hw_control(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ pba = IGC_PBA_34K; INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitrary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = (pba & 0xffff) << 10; hw->fc.high_water = rx_buffer_size - roundup2(adapter->hw.mac.max_frame_size, 1024); /* 16-byte granularity */ hw->fc.low_water = hw->fc.high_water - 16; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = igc_fc_full; hw->fc.pause_time = IGC_FC_PAUSE_TIME; hw->fc.send_xon = true; /* Issue a global reset */ igc_reset_hw(hw); IGC_WRITE_REG(hw, IGC_WUC, 0); /* and a re-init */ if (igc_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } /* Setup DMA Coalescing */ igc_init_dmac(adapter, pba); IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); igc_get_phy_info(hw); igc_check_for_link(hw); } /* * Initialise the RSS mapping for NICs that support multiple transmit/ * receive rings. */ #define RSSKEYLEN 10 static void igc_initialize_rss_mapping(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; int i; int queue_id; u32 reta; u32 rss_key[RSSKEYLEN], mrqc, shift = 0; /* * The redirection table controls which destination * queue each bucket redirects traffic to. * Each DWORD represents four queues, with the LSB * being the first queue in the DWORD. * * This just allocates buckets to queues using round-robin * allocation. * * NOTE: It Just Happens to line up with the default * RSS allocation method. */ /* Warning FM follows */ reta = 0; for (i = 0; i < 128; i++) { #ifdef RSS queue_id = rss_get_indirection_to_bucket(i); /* * If we have more queues than buckets, we'll * end up mapping buckets to a subset of the * queues. * * If we have more buckets than queues, we'll * end up instead assigning multiple buckets * to queues. * * Both are suboptimal, but we need to handle * the case so we don't go out of bounds * indexing arrays and such. */ queue_id = queue_id % adapter->rx_num_queues; #else queue_id = (i % adapter->rx_num_queues); #endif /* Adjust if required */ queue_id = queue_id << shift; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta = reta >> 8; reta = reta | ( ((uint32_t) queue_id) << 24); if ((i & 3) == 3) { IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); reta = 0; } } /* Now fill in hash table */ /* * MRQC: Multiple Receive Queues Command * Set queuing to RSS control, number depends on the device. */ mrqc = IGC_MRQC_ENABLE_RSS_4Q; #ifdef RSS /* XXX ew typecasting */ rss_getkey((uint8_t *) &rss_key); #else arc4rand(&rss_key, sizeof(rss_key), 0); #endif for (i = 0; i < RSSKEYLEN; i++) IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); /* * Configure the RSS fields to hash upon. */ mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | IGC_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); IGC_WRITE_REG(hw, IGC_MRQC, mrqc); } /********************************************************************* * * Setup networking device structure and register interface media. * **********************************************************************/ static int igc_setup_interface(if_ctx_t ctx) { - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; INIT_DEBUGOUT("igc_setup_interface: begin"); /* Single Queue */ if (adapter->tx_num_queues == 1) { if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); if_setsendqready(ifp); } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return (0); } static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; int error = IGC_SUCCESS; struct igc_tx_queue *que; int i, j; MPASS(adapter->tx_num_queues > 0); MPASS(adapter->tx_num_queues == ntxqsets); /* First allocate the top level queue structs */ if (!(adapter->tx_queues = (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); return(ENOMEM); } for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) { /* Set up some basics */ struct tx_ring *txr = &que->txr; txr->adapter = que->adapter = adapter; que->me = txr->me = i; /* Allocate report status array */ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n"); error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; /* get the virtual and physical address of the hardware queues */ txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; txr->tx_paddr = paddrs[i*ntxqs]; } if (bootverbose) device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues); return (0); fail: igc_if_queues_free(ctx); return (error); } static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct igc_adapter *adapter = iflib_get_softc(ctx); int error = IGC_SUCCESS; struct igc_rx_queue *que; int i; MPASS(adapter->rx_num_queues > 0); MPASS(adapter->rx_num_queues == nrxqsets); /* First allocate the top level queue structs */ if (!(adapter->rx_queues = (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { /* Set up some basics */ struct rx_ring *rxr = &que->rxr; rxr->adapter = que->adapter = adapter; rxr->que = que; que->me = rxr->me = i; /* get the virtual and physical address of the hardware queues */ rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; rxr->rx_paddr = paddrs[i*nrxqs]; } if (bootverbose) device_printf(iflib_get_dev(ctx), "allocated for %d rx_queues\n", adapter->rx_num_queues); return (0); fail: igc_if_queues_free(ctx); return (error); } static void igc_if_queues_free(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_tx_queue *tx_que = adapter->tx_queues; struct igc_rx_queue *rx_que = adapter->rx_queues; if (tx_que != NULL) { for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; if (txr->tx_rsq == NULL) break; free(txr->tx_rsq, M_DEVBUF); txr->tx_rsq = NULL; } free(adapter->tx_queues, M_DEVBUF); adapter->tx_queues = NULL; } if (rx_que != NULL) { free(adapter->rx_queues, M_DEVBUF); adapter->rx_queues = NULL; } igc_release_hw_control(adapter); if (adapter->mta != NULL) { free(adapter->mta, M_DEVBUF); } } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void igc_initialize_transmit_unit(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct igc_tx_queue *que; struct tx_ring *txr; struct igc_hw *hw = &adapter->hw; u32 tctl, txdctl = 0; INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { u64 bus_addr; caddr_t offp, endp; que = &adapter->tx_queues[i]; txr = &que->txr; bus_addr = txr->tx_paddr; /* Clear checksum offload context. */ offp = (caddr_t)&txr->csum_flags; endp = (caddr_t)(txr + 1); bzero(offp, endp - offp); /* Base and Len of TX Ring */ IGC_WRITE_REG(hw, IGC_TDLEN(i), scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); IGC_WRITE_REG(hw, IGC_TDBAH(i), (u32)(bus_addr >> 32)); IGC_WRITE_REG(hw, IGC_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ IGC_WRITE_REG(hw, IGC_TDT(i), 0); IGC_WRITE_REG(hw, IGC_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IGC_READ_REG(&adapter->hw, IGC_TDBAL(i)), IGC_READ_REG(&adapter->hw, IGC_TDLEN(i))); txdctl = 0; /* clear txdctl */ txdctl |= 0x1f; /* PTHRESH */ txdctl |= 1 << 8; /* HTHRESH */ txdctl |= 1 << 16;/* WTHRESH */ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ txdctl |= IGC_TXDCTL_GRAN; txdctl |= 1 << 25; /* LWTHRESH */ IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); } /* Program the Transmit Control Register */ tctl = IGC_READ_REG(&adapter->hw, IGC_TCTL); tctl &= ~IGC_TCTL_CT; tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); /* This write will effectively turn on the transmit unit. */ IGC_WRITE_REG(&adapter->hw, IGC_TCTL, tctl); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void igc_initialize_receive_unit(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); struct igc_hw *hw = &adapter->hw; struct igc_rx_queue *que; int i; u32 psize, rctl, rxcsum, srrctl = 0; INIT_DEBUGOUT("igc_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = IGC_READ_REG(hw, IGC_RCTL); IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); /* Setup the Receive Control Register */ rctl &= ~(3 << IGC_RCTL_MO_SHIFT); rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); /* Do not store bad packets */ rctl &= ~IGC_RCTL_SBP; /* Enable Long Packet receive */ if (if_getmtu(ifp) > ETHERMTU) rctl |= IGC_RCTL_LPE; else rctl &= ~IGC_RCTL_LPE; /* Strip the CRC */ if (!igc_disable_crc_stripping) rctl |= IGC_RCTL_SECRC; /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR); rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { rxcsum |= IGC_RXCSUM_CRCOFL; if (adapter->tx_num_queues > 1) rxcsum |= IGC_RXCSUM_PCSD; else rxcsum |= IGC_RXCSUM_IPPCSE; } else { if (adapter->tx_num_queues > 1) rxcsum |= IGC_RXCSUM_PCSD; else rxcsum &= ~IGC_RXCSUM_TUOFL; } IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); if (adapter->rx_num_queues > 1) igc_initialize_rss_mapping(adapter); if (if_getmtu(ifp) > ETHERMTU) { /* Set maximum packet len */ if (adapter->rx_mbuf_sz <= 4096) { srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT; rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX; } else if (adapter->rx_mbuf_sz > 4096) { srrctl |= 8192 >> IGC_SRRCTL_BSIZEPKT_SHIFT; rctl |= IGC_RCTL_SZ_8192 | IGC_RCTL_BSEX; } psize = scctx->isc_max_frame_size; /* are we on a vlan? */ - if (ifp->if_vlantrunk != NULL) + if (if_vlantrunkinuse(ifp)) psize += VLAN_TAG_SIZE; IGC_WRITE_REG(&adapter->hw, IGC_RLPML, psize); } else { srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; rctl |= IGC_RCTL_SZ_2048; } /* * If TX flow control is disabled and there's >1 queue defined, * enable DROP. * * This drops frames rather than hanging the RX MAC for all queues. */ if ((adapter->rx_num_queues > 1) && (adapter->fc == igc_fc_none || adapter->fc == igc_fc_rx_pause)) { srrctl |= IGC_SRRCTL_DROP_EN; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 bus_addr = rxr->rx_paddr; u32 rxdctl; #ifdef notyet /* Configure for header split? -- ignore for now */ rxr->hdr_split = igc_header_split; #else srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; #endif IGC_WRITE_REG(hw, IGC_RDLEN(i), scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); /* Setup the Head and Tail Descriptor Pointers */ IGC_WRITE_REG(hw, IGC_RDH(i), 0); IGC_WRITE_REG(hw, IGC_RDT(i), 0); /* Enable this Queue */ rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGC_RX_PTHRESH; rxdctl |= IGC_RX_HTHRESH << 8; rxdctl |= IGC_RX_WTHRESH << 16; IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); } /* Make sure VLAN Filters are off */ rctl &= ~IGC_RCTL_VFE; /* Write out the settings */ IGC_WRITE_REG(hw, IGC_RCTL, rctl); return; } static void igc_setup_vlan_hw_support(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); u32 reg; /* igc hardware doesn't seem to implement VFTA for HWFILTER */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && !igc_disable_crc_stripping) { reg = IGC_READ_REG(hw, IGC_CTRL); reg |= IGC_CTRL_VME; IGC_WRITE_REG(hw, IGC_CTRL, reg); } else { reg = IGC_READ_REG(hw, IGC_CTRL); reg &= ~IGC_CTRL_VME; IGC_WRITE_REG(hw, IGC_CTRL, reg); } } static void igc_if_intr_enable(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; u32 mask; if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { mask = (adapter->que_mask | adapter->link_mask); IGC_WRITE_REG(hw, IGC_EIAC, mask); IGC_WRITE_REG(hw, IGC_EIAM, mask); IGC_WRITE_REG(hw, IGC_EIMS, mask); IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); } else IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); IGC_WRITE_FLUSH(hw); } static void igc_if_intr_disable(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); IGC_WRITE_REG(hw, IGC_EIAC, 0); } IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); IGC_WRITE_FLUSH(hw); } /* * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void igc_get_hw_control(struct igc_adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); } /* * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void igc_release_hw_control(struct igc_adapter *adapter) { u32 ctrl_ext; ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); return; } static int igc_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (false); } return (true); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void igc_get_wakeup(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); u16 eeprom_data = 0, apme_mask; apme_mask = IGC_WUC_APME; eeprom_data = IGC_READ_REG(&adapter->hw, IGC_WUC); if (eeprom_data & apme_mask) adapter->wol = IGC_WUFC_LNKC; } /* * Enable PCI Wake On Lan capability */ static void igc_enable_wakeup(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); if_t ifp = iflib_get_ifp(ctx); int error = 0; u32 pmc, ctrl, rctl; u16 status; if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0) return; /* * Determine type of Wakeup: note that wol * is set with all bits on by default. */ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~IGC_WUFC_MAG; if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) adapter->wol &= ~IGC_WUFC_EX; if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~IGC_WUFC_MC; else { rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); rctl |= IGC_RCTL_MPE; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, rctl); } if (!(adapter->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) goto pme; /* Advertise the wakeup capability */ ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL); ctrl |= IGC_CTRL_ADVD3WUC; IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl); /* Enable wakeup by the MAC */ IGC_WRITE_REG(&adapter->hw, IGC_WUC, IGC_WUC_PME_EN); IGC_WRITE_REG(&adapter->hw, IGC_WUFC, adapter->wol); pme: status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void igc_update_stats_counters(struct igc_adapter *adapter) { u64 prev_xoffrxc = adapter->stats.xoffrxc; adapter->stats.crcerrs += IGC_READ_REG(&adapter->hw, IGC_CRCERRS); adapter->stats.mpc += IGC_READ_REG(&adapter->hw, IGC_MPC); adapter->stats.scc += IGC_READ_REG(&adapter->hw, IGC_SCC); adapter->stats.ecol += IGC_READ_REG(&adapter->hw, IGC_ECOL); adapter->stats.mcc += IGC_READ_REG(&adapter->hw, IGC_MCC); adapter->stats.latecol += IGC_READ_REG(&adapter->hw, IGC_LATECOL); adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_COLC); adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_RERC); adapter->stats.dc += IGC_READ_REG(&adapter->hw, IGC_DC); adapter->stats.rlec += IGC_READ_REG(&adapter->hw, IGC_RLEC); adapter->stats.xonrxc += IGC_READ_REG(&adapter->hw, IGC_XONRXC); adapter->stats.xontxc += IGC_READ_REG(&adapter->hw, IGC_XONTXC); adapter->stats.xoffrxc += IGC_READ_REG(&adapter->hw, IGC_XOFFRXC); /* * For watchdog management we need to know if we have been * paused during the last interval, so capture that here. */ if (adapter->stats.xoffrxc != prev_xoffrxc) adapter->shared->isc_pause_frames = 1; adapter->stats.xofftxc += IGC_READ_REG(&adapter->hw, IGC_XOFFTXC); adapter->stats.fcruc += IGC_READ_REG(&adapter->hw, IGC_FCRUC); adapter->stats.prc64 += IGC_READ_REG(&adapter->hw, IGC_PRC64); adapter->stats.prc127 += IGC_READ_REG(&adapter->hw, IGC_PRC127); adapter->stats.prc255 += IGC_READ_REG(&adapter->hw, IGC_PRC255); adapter->stats.prc511 += IGC_READ_REG(&adapter->hw, IGC_PRC511); adapter->stats.prc1023 += IGC_READ_REG(&adapter->hw, IGC_PRC1023); adapter->stats.prc1522 += IGC_READ_REG(&adapter->hw, IGC_PRC1522); adapter->stats.tlpic += IGC_READ_REG(&adapter->hw, IGC_TLPIC); adapter->stats.rlpic += IGC_READ_REG(&adapter->hw, IGC_RLPIC); adapter->stats.gprc += IGC_READ_REG(&adapter->hw, IGC_GPRC); adapter->stats.bprc += IGC_READ_REG(&adapter->hw, IGC_BPRC); adapter->stats.mprc += IGC_READ_REG(&adapter->hw, IGC_MPRC); adapter->stats.gptc += IGC_READ_REG(&adapter->hw, IGC_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += IGC_READ_REG(&adapter->hw, IGC_GORCL) + ((u64)IGC_READ_REG(&adapter->hw, IGC_GORCH) << 32); adapter->stats.gotc += IGC_READ_REG(&adapter->hw, IGC_GOTCL) + ((u64)IGC_READ_REG(&adapter->hw, IGC_GOTCH) << 32); adapter->stats.rnbc += IGC_READ_REG(&adapter->hw, IGC_RNBC); adapter->stats.ruc += IGC_READ_REG(&adapter->hw, IGC_RUC); adapter->stats.rfc += IGC_READ_REG(&adapter->hw, IGC_RFC); adapter->stats.roc += IGC_READ_REG(&adapter->hw, IGC_ROC); adapter->stats.rjc += IGC_READ_REG(&adapter->hw, IGC_RJC); adapter->stats.tor += IGC_READ_REG(&adapter->hw, IGC_TORH); adapter->stats.tot += IGC_READ_REG(&adapter->hw, IGC_TOTH); adapter->stats.tpr += IGC_READ_REG(&adapter->hw, IGC_TPR); adapter->stats.tpt += IGC_READ_REG(&adapter->hw, IGC_TPT); adapter->stats.ptc64 += IGC_READ_REG(&adapter->hw, IGC_PTC64); adapter->stats.ptc127 += IGC_READ_REG(&adapter->hw, IGC_PTC127); adapter->stats.ptc255 += IGC_READ_REG(&adapter->hw, IGC_PTC255); adapter->stats.ptc511 += IGC_READ_REG(&adapter->hw, IGC_PTC511); adapter->stats.ptc1023 += IGC_READ_REG(&adapter->hw, IGC_PTC1023); adapter->stats.ptc1522 += IGC_READ_REG(&adapter->hw, IGC_PTC1522); adapter->stats.mptc += IGC_READ_REG(&adapter->hw, IGC_MPTC); adapter->stats.bptc += IGC_READ_REG(&adapter->hw, IGC_BPTC); /* Interrupt Counts */ adapter->stats.iac += IGC_READ_REG(&adapter->hw, IGC_IAC); adapter->stats.rxdmtc += IGC_READ_REG(&adapter->hw, IGC_RXDMTC); adapter->stats.algnerrc += IGC_READ_REG(&adapter->hw, IGC_ALGNERRC); adapter->stats.tncrs += IGC_READ_REG(&adapter->hw, IGC_TNCRS); adapter->stats.htdpmc += IGC_READ_REG(&adapter->hw, IGC_HTDPMC); adapter->stats.tsctc += IGC_READ_REG(&adapter->hw, IGC_TSCTC); } static uint64_t igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct igc_adapter *adapter = iflib_get_softc(ctx); - struct ifnet *ifp = iflib_get_ifp(ctx); + if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_COLLISIONS: return (adapter->stats.colc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.htdpmc); case IFCOUNTER_OERRORS: return (adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events); default: return (if_get_counter_default(ifp, cnt)); } } /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context * @event: event code to check * * Defaults to returning true for unknown events. * * @returns true if iflib needs to reinit the interface */ static bool igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: return (false); default: return (true); } } /* Export a single 32-bit register via a read-only sysctl. */ static int igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = IGC_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void igc_add_hw_stats(struct igc_adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); struct igc_tx_queue *tx_que = adapter->tx_queues; struct igc_rx_queue *rx_que = adapter->rx_queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct igc_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_CTRL, igc_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RCTL, igc_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); } for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.rxdmtc, "Rx Desc Min Thresh Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) igc_print_nvm_info(adapter); return (error); } static void igc_print_nvm_info(struct igc_adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } igc_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct igc_int_delay_info *info; struct igc_adapter *adapter; u32 regval; int error, usecs, ticks; info = (struct igc_int_delay_info *) arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > IGC_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = IGC_USECS_TO_TICKS(usecs); if (info->offset == IGC_ITR) /* units are 256ns here */ ticks *= 4; adapter = info->adapter; regval = IGC_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case IGC_RDTR: break; case IGC_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~IGC_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= IGC_TXD_CMD_IDE; break; } IGC_WRITE_OFFSET(&adapter->hw, info->offset, regval); return (0); } static void igc_add_int_delay_sysctl(struct igc_adapter *adapter, const char *name, const char *description, struct igc_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, info, 0, igc_sysctl_int_delay, "I", description); } /* * Set flow control using sysctl: * Flow control values: * 0 - off * 1 - rx pause * 2 - tx pause * 3 - full */ static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct igc_adapter *adapter = (struct igc_adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case igc_fc_rx_pause: case igc_fc_tx_pause: case igc_fc_full: case igc_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; igc_force_mac_fc(&adapter->hw); return (error); } /* * Manage Energy Efficient Ethernet: * Control values: * 0/1 - enabled/disabled */ static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *) arg1; int error, value; value = adapter->hw.dev_spec._i225.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); adapter->hw.dev_spec._i225.eee_disable = (value != 0); igc_if_init(adapter->ctx); return (0); } static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct igc_adapter *) arg1; igc_print_debug_info(adapter); } return (error); } static int igc_get_rs(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *) arg1; int error; int result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr || result != 1) return (error); igc_dump_rs(adapter); return (error); } static void igc_if_debug(if_ctx_t ctx) { igc_dump_rs(iflib_get_softc(ctx)); } /* * This routine is meant to be fluid, add whatever is * needed for debugging a problem. -jfv */ static void igc_print_debug_info(struct igc_adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); - struct ifnet *ifp = iflib_get_ifp(adapter->ctx); + if_t ifp = iflib_get_ifp(adapter->ctx); struct tx_ring *txr = &adapter->tx_queues->txr; struct rx_ring *rxr = &adapter->rx_queues->rxr; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", IGC_READ_REG(&adapter->hw, IGC_TDH(i)), IGC_READ_REG(&adapter->hw, IGC_TDT(i))); } for (int j=0; j < adapter->rx_num_queues; j++, rxr++) { device_printf(dev, "RX Queue %d ------\n", j); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", IGC_READ_REG(&adapter->hw, IGC_RDH(j)), IGC_READ_REG(&adapter->hw, IGC_RDT(j))); } } diff --git a/sys/dev/igc/if_igc.h b/sys/dev/igc/if_igc.h index 86a02266a81c..545005f55f4c 100644 --- a/sys/dev/igc/if_igc.h +++ b/sys/dev/igc/if_igc.h @@ -1,421 +1,421 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016 Nicole Graziano * All rights reserved. * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #ifdef DDB #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "igc_api.h" #include "igc_i225.h" #include "ifdi_if.h" #ifndef _IGC_H_DEFINED_ #define _IGC_H_DEFINED_ /* Tunables */ /* * IGC_MAX_TXD: Maximum number of Transmit Descriptors * Valid Range: 128-4096 * Default Value: 1024 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0 */ #define IGC_MIN_TXD 128 #define IGC_MAX_TXD 4096 #define IGC_DEFAULT_TXD 1024 #define IGC_DEFAULT_MULTI_TXD 4096 #define IGC_MAX_TXD 4096 /* * IGC_MAX_RXD - Maximum number of receive Descriptors * Valid Range: 128-4096 * Default Value: 1024 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0 */ #define IGC_MIN_RXD 128 #define IGC_MAX_RXD 4096 #define IGC_DEFAULT_RXD 1024 #define IGC_DEFAULT_MULTI_RXD 4096 #define IGC_MAX_RXD 4096 /* * IGC_TIDV_VAL - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define IGC_TIDV_VAL 64 /* * IGC_TADV_VAL - Transmit Absolute Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if IGC_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with IGC_TIDV_VAL, may improve traffic throughput in specific * network conditions. */ #define IGC_TADV_VAL 64 /* * IGC_RDTR_VAL - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting IGC_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that IGC_RDTR is set to 0. */ #define IGC_RDTR_VAL 0 /* * Receive Interrupt Absolute Delay Timer * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if IGC_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with IGC_RDTR, may improve traffic throughput in specific network * conditions. */ #define IGC_RADV_VAL 64 /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG true /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) #define AUTO_ALL_MODES 0 /* * Micellaneous constants */ #define MAX_NUM_MULTICAST_ADDRESSES 128 #define IGC_FC_PAUSE_TIME 0x0680 #define IGC_TXPBSIZE 20408 #define IGC_PKTTYPE_MASK 0x0000FFF0 #define IGC_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */ #define IGC_RX_PTHRESH 8 #define IGC_RX_HTHRESH 8 #define IGC_RX_WTHRESH 4 #define IGC_TX_PTHRESH 8 #define IGC_TX_HTHRESH 1 /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define IGC_DBA_ALIGN 128 #define IGC_MSIX_BAR 3 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define IGC_MAX_SCATTER 40 #define IGC_VFTA_SIZE 128 #define IGC_TSO_SIZE 65535 #define IGC_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define IGC_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | \ CSUM_IP_SCTP | CSUM_IP6_UDP | CSUM_IP6_TCP | \ CSUM_IP6_SCTP) /* Offload bits in mbuf flag */ struct igc_adapter; struct igc_int_delay_info { struct igc_adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* * The transmit ring, one per tx queue */ struct tx_ring { struct igc_adapter *adapter; struct igc_tx_desc *tx_base; uint64_t tx_paddr; qidx_t *tx_rsq; uint8_t me; qidx_t tx_rs_cidx; qidx_t tx_rs_pidx; qidx_t tx_cidx_processed; /* Interrupt resources */ void *tag; struct resource *res; unsigned long tx_irq; /* Saved csum offloading context information */ int csum_flags; int csum_lhlen; int csum_iphlen; int csum_thlen; int csum_mss; int csum_pktlen; uint32_t csum_txd_upper; uint32_t csum_txd_lower; /* last field */ }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct igc_adapter *adapter; struct igc_rx_queue *que; u32 me; u32 payload; union igc_rx_desc_extended *rx_base; uint64_t rx_paddr; /* Interrupt resources */ void *tag; struct resource *res; /* Soft stats */ unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; }; struct igc_tx_queue { struct igc_adapter *adapter; u32 msix; u32 eims; /* This queue's EIMS bit */ u32 me; struct tx_ring txr; }; struct igc_rx_queue { struct igc_adapter *adapter; u32 me; u32 msix; u32 eims; struct rx_ring rxr; u64 irqs; struct if_irq que_irq; }; /* Our adapter structure */ struct igc_adapter { - struct ifnet *ifp; + if_t ifp; struct igc_hw hw; if_softc_ctx_t shared; if_ctx_t ctx; #define tx_num_queues shared->isc_ntxqsets #define rx_num_queues shared->isc_nrxqsets #define intr_type shared->isc_intr /* FreeBSD operating-system-specific structures. */ struct igc_osdep osdep; device_t dev; struct cdev *led_dev; struct igc_tx_queue *tx_queues; struct igc_rx_queue *rx_queues; struct if_irq irq; struct resource *memory; struct resource *flash; struct resource *ioport; struct resource *res; void *tag; u32 linkvec; u32 ivars; struct ifmedia *media; int msix; int if_flags; int igc_insert_vlan_header; u32 ims; u32 flags; /* Task for FAST handling */ struct grouptask link_task; u32 txd_cmd; u32 tx_process_limit; u32 rx_process_limit; u32 rx_mbuf_sz; /* Management and WOL features */ u32 wol; /* Multicast array memory */ u8 *mta; /* Info about the interface */ u16 link_active; u16 fc; u16 link_speed; u16 link_duplex; u32 smartspeed; u32 dmac; int link_mask; u64 que_mask; struct igc_int_delay_info tx_int_delay; struct igc_int_delay_info tx_abs_int_delay; struct igc_int_delay_info rx_int_delay; struct igc_int_delay_info rx_abs_int_delay; struct igc_int_delay_info tx_itr; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long rx_overruns; unsigned long watchdog_events; struct igc_hw_stats stats; u16 vf_ifp; }; void igc_dump_rs(struct igc_adapter *); #define IGC_RSSRK_SIZE 4 #define IGC_RSSRK_VAL(key, i) (key[(i) * IGC_RSSRK_SIZE] | \ key[(i) * IGC_RSSRK_SIZE + 1] << 8 | \ key[(i) * IGC_RSSRK_SIZE + 2] << 16 | \ key[(i) * IGC_RSSRK_SIZE + 3] << 24) #endif /* _IGC_H_DEFINED_ */ diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c index c64a0c74ea8a..2c08077a2dea 100644 --- a/sys/dev/ipw/if_ipw.c +++ b/sys/dev/ipw/if_ipw.c @@ -1,2682 +1,2682 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004-2006 * Damien Bergamini . All rights reserved. * Copyright (c) 2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Andrew Thompson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /*- * Intel(R) PRO/Wireless 2100 MiniPCI driver * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IPW_DEBUG #ifdef IPW_DEBUG #define DPRINTF(x) do { if (ipw_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (ipw_debug >= (n)) printf x; } while (0) int ipw_debug = 0; SYSCTL_INT(_debug, OID_AUTO, ipw, CTLFLAG_RW, &ipw_debug, 0, "ipw debug level"); #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif MODULE_DEPEND(ipw, pci, 1, 1, 1); MODULE_DEPEND(ipw, wlan, 1, 1, 1); MODULE_DEPEND(ipw, firmware, 1, 1, 1); struct ipw_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct ipw_ident ipw_ident_table[] = { { 0x8086, 0x1043, "Intel(R) PRO/Wireless 2100 MiniPCI" }, { 0, 0, NULL } }; static struct ieee80211vap *ipw_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ipw_vap_delete(struct ieee80211vap *); static int ipw_dma_alloc(struct ipw_softc *); static void ipw_release(struct ipw_softc *); -static void ipw_media_status(struct ifnet *, struct ifmediareq *); +static void ipw_media_status(if_t, struct ifmediareq *); static int ipw_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t ipw_read_prom_word(struct ipw_softc *, uint8_t); static uint16_t ipw_read_chanmask(struct ipw_softc *); static void ipw_rx_cmd_intr(struct ipw_softc *, struct ipw_soft_buf *); static void ipw_rx_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *); static void ipw_rx_data_intr(struct ipw_softc *, struct ipw_status *, struct ipw_soft_bd *, struct ipw_soft_buf *); static void ipw_rx_intr(struct ipw_softc *); static void ipw_release_sbd(struct ipw_softc *, struct ipw_soft_bd *); static void ipw_tx_intr(struct ipw_softc *); static void ipw_intr(void *); static void ipw_dma_map_addr(void *, bus_dma_segment_t *, int, int); static const char * ipw_cmdname(int); static int ipw_cmd(struct ipw_softc *, uint32_t, void *, uint32_t); static int ipw_tx_start(struct ipw_softc *, struct mbuf *, struct ieee80211_node *); static int ipw_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int ipw_transmit(struct ieee80211com *, struct mbuf *); static void ipw_start(struct ipw_softc *); static void ipw_watchdog(void *); static void ipw_parent(struct ieee80211com *); static void ipw_stop_master(struct ipw_softc *); static int ipw_enable(struct ipw_softc *); static int ipw_disable(struct ipw_softc *); static int ipw_reset(struct ipw_softc *); static int ipw_load_ucode(struct ipw_softc *, const char *, int); static int ipw_load_firmware(struct ipw_softc *, const char *, int); static int ipw_config(struct ipw_softc *); static void ipw_assoc(struct ieee80211com *, struct ieee80211vap *); static void ipw_disassoc(struct ieee80211com *, struct ieee80211vap *); static void ipw_init_task(void *, int); static void ipw_init(void *); static void ipw_init_locked(struct ipw_softc *); static void ipw_stop(void *); static void ipw_stop_locked(struct ipw_softc *); static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS); static uint32_t ipw_read_table1(struct ipw_softc *, uint32_t); static void ipw_write_table1(struct ipw_softc *, uint32_t, uint32_t); #if 0 static int ipw_read_table2(struct ipw_softc *, uint32_t, void *, uint32_t *); static void ipw_read_mem_1(struct ipw_softc *, bus_size_t, uint8_t *, bus_size_t); #endif static void ipw_write_mem_1(struct ipw_softc *, bus_size_t, const uint8_t *, bus_size_t); static int ipw_scan(struct ipw_softc *); static void ipw_scan_start(struct ieee80211com *); static void ipw_scan_end(struct ieee80211com *); static void ipw_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void ipw_set_channel(struct ieee80211com *); static void ipw_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell); static void ipw_scan_mindwell(struct ieee80211_scan_state *); static int ipw_probe(device_t); static int ipw_attach(device_t); static int ipw_detach(device_t); static int ipw_shutdown(device_t); static int ipw_suspend(device_t); static int ipw_resume(device_t); static device_method_t ipw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ipw_probe), DEVMETHOD(device_attach, ipw_attach), DEVMETHOD(device_detach, ipw_detach), DEVMETHOD(device_shutdown, ipw_shutdown), DEVMETHOD(device_suspend, ipw_suspend), DEVMETHOD(device_resume, ipw_resume), DEVMETHOD_END }; static driver_t ipw_driver = { "ipw", ipw_methods, sizeof (struct ipw_softc) }; DRIVER_MODULE(ipw, pci, ipw_driver, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ipw, ipw_ident_table, nitems(ipw_ident_table) - 1); MODULE_VERSION(ipw, 1); static int ipw_probe(device_t dev) { const struct ipw_ident *ident; for (ident = ipw_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } /* Base Address Register */ static int ipw_attach(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int error, i; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); mbufq_init(&sc->sc_snd, ifqmaxlen); TASK_INIT(&sc->sc_init_task, 0, ipw_init_task, sc); callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0); pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); goto fail1; } if (ipw_reset(sc) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail2; } if (ipw_dma_alloc(sc) != 0) { device_printf(dev, "could not allocate DMA resources\n"); goto fail2; } ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_DS; /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_PMGT /* power save supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i supported */ ; /* read MAC address from EEPROM */ val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0); ic->ic_macaddr[0] = val >> 8; ic->ic_macaddr[1] = val & 0xff; val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 1); ic->ic_macaddr[2] = val >> 8; ic->ic_macaddr[3] = val & 0xff; val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 2); ic->ic_macaddr[4] = val >> 8; ic->ic_macaddr[5] = val & 0xff; sc->chanmask = ipw_read_chanmask(sc); ipw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); /* check support for radio transmitter switch in EEPROM */ if (!(ipw_read_prom_word(sc, IPW_EEPROM_RADIO) & 8)) sc->flags |= IPW_FLAG_HAS_RADIO_SWITCH; ieee80211_ifattach(ic); ic->ic_scan_start = ipw_scan_start; ic->ic_scan_end = ipw_scan_end; ic->ic_getradiocaps = ipw_getradiocaps; ic->ic_set_channel = ipw_set_channel; ic->ic_scan_curchan = ipw_scan_curchan; ic->ic_scan_mindwell = ipw_scan_mindwell; ic->ic_raw_xmit = ipw_raw_xmit; ic->ic_vap_create = ipw_vap_create; ic->ic_vap_delete = ipw_vap_delete; ic->ic_transmit = ipw_transmit; ic->ic_parent = ipw_parent; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IPW_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IPW_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, ipw_sysctl_radio, "I", "radio transmitter switch state (0=off, 1=on)"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, ipw_sysctl_stats, "S", "statistics"); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ipw_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail3; } if (bootverbose) ieee80211_announce(ic); return 0; fail3: ipw_release(sc); fail2: bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); fail1: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); fail: mtx_destroy(&sc->sc_mtx); return ENXIO; } static int ipw_detach(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; bus_teardown_intr(dev, sc->irq, sc->sc_ih); ieee80211_draintask(ic, &sc->sc_init_task); ipw_stop(sc); ieee80211_ifdetach(ic); callout_drain(&sc->sc_wdtimer); mbufq_drain(&sc->sc_snd); ipw_release(sc); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); if (sc->sc_firmware != NULL) { firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; } mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * ipw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ipw_softc *sc = ic->ic_softc; struct ipw_vap *ivp; struct ieee80211vap *vap; const struct firmware *fp; const struct ipw_firmware_hdr *hdr; const char *imagename; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; switch (opmode) { case IEEE80211_M_STA: imagename = "ipw_bss"; break; case IEEE80211_M_IBSS: imagename = "ipw_ibss"; break; case IEEE80211_M_MONITOR: imagename = "ipw_monitor"; break; default: return NULL; } /* * Load firmware image using the firmware(9) subsystem. Doing * this unlocked is ok since we're single-threaded by the * 802.11 layer. */ if (sc->sc_firmware == NULL || strcmp(sc->sc_firmware->name, imagename) != 0) { if (sc->sc_firmware != NULL) firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = firmware_get(imagename); } if (sc->sc_firmware == NULL) { device_printf(sc->sc_dev, "could not load firmware image '%s'\n", imagename); return NULL; } fp = sc->sc_firmware; if (fp->datasize < sizeof *hdr) { device_printf(sc->sc_dev, "firmware image too short %zu\n", fp->datasize); firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; return NULL; } hdr = (const struct ipw_firmware_hdr *)fp->data; if (fp->datasize < sizeof *hdr + le32toh(hdr->mainsz) + le32toh(hdr->ucodesz)) { device_printf(sc->sc_dev, "firmware image too short %zu\n", fp->datasize); firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD); sc->sc_firmware = NULL; return NULL; } ivp = malloc(sizeof(struct ipw_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &ivp->vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ ivp->newstate = vap->iv_newstate; vap->iv_newstate = ipw_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ipw_media_status, mac); ic->ic_opmode = opmode; return vap; } static void ipw_vap_delete(struct ieee80211vap *vap) { struct ipw_vap *ivp = IPW_VAP(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static int ipw_dma_alloc(struct ipw_softc *sc) { struct ipw_soft_bd *sbd; struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; bus_addr_t physaddr; int error, i; /* * Allocate parent DMA tag for subsequent allocations. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create parent DMA tag\n"); goto fail; } /* * Allocate and map tx ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_TBD_SZ, 1, IPW_TBD_SZ, 0, NULL, NULL, &sc->tbd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create tx ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->tbd_dmat, (void **)&sc->tbd_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tbd_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->tbd_dmat, sc->tbd_map, sc->tbd_list, IPW_TBD_SZ, ipw_dma_map_addr, &sc->tbd_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map tx ring DMA memory\n"); goto fail; } /* * Allocate and map rx ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_RBD_SZ, 1, IPW_RBD_SZ, 0, NULL, NULL, &sc->rbd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->rbd_dmat, (void **)&sc->rbd_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rbd_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate rx ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->rbd_dmat, sc->rbd_map, sc->rbd_list, IPW_RBD_SZ, ipw_dma_map_addr, &sc->rbd_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map rx ring DMA memory\n"); goto fail; } /* * Allocate and map status ring. */ error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IPW_STATUS_SZ, 1, IPW_STATUS_SZ, 0, NULL, NULL, &sc->status_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create status ring DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->status_dmat, (void **)&sc->status_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->status_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate status ring DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->status_dmat, sc->status_map, sc->status_list, IPW_STATUS_SZ, ipw_dma_map_addr, &sc->status_phys, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map status ring DMA memory\n"); goto fail; } /* * Allocate command DMA map. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_cmd), 1, sizeof (struct ipw_cmd), 0, NULL, NULL, &sc->cmd_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create command DMA tag\n"); goto fail; } error = bus_dmamap_create(sc->cmd_dmat, 0, &sc->cmd_map); if (error != 0) { device_printf(sc->sc_dev, "could not create command DMA map\n"); goto fail; } /* * Allocate headers DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_hdr), 1, sizeof (struct ipw_hdr), 0, NULL, NULL, &sc->hdr_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create header DMA tag\n"); goto fail; } SLIST_INIT(&sc->free_shdr); for (i = 0; i < IPW_NDATA; i++) { shdr = &sc->shdr_list[i]; error = bus_dmamap_create(sc->hdr_dmat, 0, &shdr->map); if (error != 0) { device_printf(sc->sc_dev, "could not create header DMA map\n"); goto fail; } SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next); } /* * Allocate tx buffers DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IPW_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &sc->txbuf_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create tx DMA tag\n"); goto fail; } SLIST_INIT(&sc->free_sbuf); for (i = 0; i < IPW_NDATA; i++) { sbuf = &sc->tx_sbuf_list[i]; error = bus_dmamap_create(sc->txbuf_dmat, 0, &sbuf->map); if (error != 0) { device_printf(sc->sc_dev, "could not create tx DMA map\n"); goto fail; } SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next); } /* * Initialize tx ring. */ for (i = 0; i < IPW_NTBD; i++) { sbd = &sc->stbd_list[i]; sbd->bd = &sc->tbd_list[i]; sbd->type = IPW_SBD_TYPE_NOASSOC; } /* * Pre-allocate rx buffers and DMA maps. */ error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rxbuf_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA tag\n"); goto fail; } for (i = 0; i < IPW_NRBD; i++) { sbd = &sc->srbd_list[i]; sbuf = &sc->rx_sbuf_list[i]; sbd->bd = &sc->rbd_list[i]; sbuf->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (sbuf->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_create(sc->rxbuf_dmat, 0, &sbuf->map); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA map\n"); goto fail; } error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map rx DMA memory\n"); goto fail; } sbd->type = IPW_SBD_TYPE_DATA; sbd->priv = sbuf; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(MCLBYTES); } bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); return 0; fail: ipw_release(sc); return error; } static void ipw_release(struct ipw_softc *sc) { struct ipw_soft_buf *sbuf; int i; if (sc->parent_dmat != NULL) { bus_dma_tag_destroy(sc->parent_dmat); } if (sc->tbd_dmat != NULL) { bus_dmamap_unload(sc->tbd_dmat, sc->tbd_map); bus_dmamem_free(sc->tbd_dmat, sc->tbd_list, sc->tbd_map); bus_dma_tag_destroy(sc->tbd_dmat); } if (sc->rbd_dmat != NULL) { if (sc->rbd_list != NULL) { bus_dmamap_unload(sc->rbd_dmat, sc->rbd_map); bus_dmamem_free(sc->rbd_dmat, sc->rbd_list, sc->rbd_map); } bus_dma_tag_destroy(sc->rbd_dmat); } if (sc->status_dmat != NULL) { if (sc->status_list != NULL) { bus_dmamap_unload(sc->status_dmat, sc->status_map); bus_dmamem_free(sc->status_dmat, sc->status_list, sc->status_map); } bus_dma_tag_destroy(sc->status_dmat); } for (i = 0; i < IPW_NTBD; i++) ipw_release_sbd(sc, &sc->stbd_list[i]); if (sc->cmd_dmat != NULL) { bus_dmamap_destroy(sc->cmd_dmat, sc->cmd_map); bus_dma_tag_destroy(sc->cmd_dmat); } if (sc->hdr_dmat != NULL) { for (i = 0; i < IPW_NDATA; i++) bus_dmamap_destroy(sc->hdr_dmat, sc->shdr_list[i].map); bus_dma_tag_destroy(sc->hdr_dmat); } if (sc->txbuf_dmat != NULL) { for (i = 0; i < IPW_NDATA; i++) { bus_dmamap_destroy(sc->txbuf_dmat, sc->tx_sbuf_list[i].map); } bus_dma_tag_destroy(sc->txbuf_dmat); } if (sc->rxbuf_dmat != NULL) { for (i = 0; i < IPW_NRBD; i++) { sbuf = &sc->rx_sbuf_list[i]; if (sbuf->m != NULL) { bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map); m_freem(sbuf->m); } bus_dmamap_destroy(sc->rxbuf_dmat, sbuf->map); } bus_dma_tag_destroy(sc->rxbuf_dmat); } } static int ipw_shutdown(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); ipw_stop(sc); return 0; } static int ipw_suspend(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; ieee80211_suspend_all(ic); return 0; } static int ipw_resume(device_t dev) { struct ipw_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } static int ipw_cvtrate(int ipwrate) { switch (ipwrate) { case IPW_RATE_DS1: return 2; case IPW_RATE_DS2: return 4; case IPW_RATE_DS5: return 11; case IPW_RATE_DS11: return 22; } return 0; } /* * The firmware automatically adapts the transmit speed. We report its current * value here. */ static void -ipw_media_status(struct ifnet *ifp, struct ifmediareq *imr) +ipw_media_status(if_t ifp, struct ifmediareq *imr) { - struct ieee80211vap *vap = ifp->if_softc; + struct ieee80211vap *vap = if_getsoftc(ifp); struct ieee80211com *ic = vap->iv_ic; struct ipw_softc *sc = ic->ic_softc; /* read current transmission rate from adapter */ vap->iv_bss->ni_txrate = ipw_cvtrate( ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf); ieee80211_media_status(ifp, imr); } static int ipw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ipw_vap *ivp = IPW_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ipw_softc *sc = ic->ic_softc; enum ieee80211_state ostate; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); ostate = vap->iv_state; IEEE80211_UNLOCK(ic); switch (nstate) { case IEEE80211_S_RUN: if (ic->ic_opmode == IEEE80211_M_IBSS) { /* * XXX when joining an ibss network we are called * with a SCAN -> RUN transition on scan complete. * Use that to call ipw_assoc. On completing the * join we are then called again with an AUTH -> RUN * transition and we want to do nothing. This is * all totally bogus and needs to be redone. */ if (ostate == IEEE80211_S_SCAN) ipw_assoc(ic, vap); } break; case IEEE80211_S_INIT: if (sc->flags & IPW_FLAG_ASSOCIATED) ipw_disassoc(ic, vap); break; case IEEE80211_S_AUTH: /* * Move to ASSOC state after the ipw_assoc() call. Firmware * takes care of authentication, after the call we'll receive * only an assoc response which would otherwise be discared * if we are still in AUTH state. */ nstate = IEEE80211_S_ASSOC; ipw_assoc(ic, vap); break; case IEEE80211_S_ASSOC: /* * If we are not transitioning from AUTH then resend the * association request. */ if (ostate != IEEE80211_S_AUTH) ipw_assoc(ic, vap); break; default: break; } IEEE80211_LOCK(ic); return ivp->newstate(vap, nstate, arg); } /* * Read 16 bits at address 'addr' from the serial EEPROM. */ static uint16_t ipw_read_prom_word(struct ipw_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ IPW_EEPROM_CTL(sc, 0); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); /* write start bit (1) */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C); /* write READ opcode (10) */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); /* write address A7-A0 */ for (n = 7; n >= 0; n--) { IPW_EEPROM_CTL(sc, IPW_EEPROM_S | (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D)); IPW_EEPROM_CTL(sc, IPW_EEPROM_S | (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D) | IPW_EEPROM_C); } IPW_EEPROM_CTL(sc, IPW_EEPROM_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C); IPW_EEPROM_CTL(sc, IPW_EEPROM_S); tmp = MEM_READ_4(sc, IPW_MEM_EEPROM_CTL); val |= ((tmp & IPW_EEPROM_Q) >> IPW_EEPROM_SHIFT_Q) << n; } IPW_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ IPW_EEPROM_CTL(sc, IPW_EEPROM_S); IPW_EEPROM_CTL(sc, 0); IPW_EEPROM_CTL(sc, IPW_EEPROM_C); return le16toh(val); } static uint16_t ipw_read_chanmask(struct ipw_softc *sc) { uint16_t val; /* set supported .11b channels (read from EEPROM) */ if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0) val = 0x7ff; /* default to channels 1-11 */ val <<= 1; return (val); } static void ipw_rx_cmd_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) { struct ipw_cmd *cmd; bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); cmd = mtod(sbuf->m, struct ipw_cmd *); DPRINTFN(9, ("cmd ack'ed %s(%u, %u, %u, %u, %u)\n", ipw_cmdname(le32toh(cmd->type)), le32toh(cmd->type), le32toh(cmd->subtype), le32toh(cmd->seq), le32toh(cmd->len), le32toh(cmd->status))); sc->flags &= ~IPW_FLAG_BUSY; wakeup(sc); } static void ipw_rx_newstate_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf) { #define IEEESTATE(vap) ieee80211_state_name[vap->iv_state] struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t state; bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); state = le32toh(*mtod(sbuf->m, uint32_t *)); switch (state) { case IPW_STATE_ASSOCIATED: DPRINTFN(2, ("Association succeeded (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); /* XXX suppress state change in case the fw auto-associates */ if ((sc->flags & IPW_FLAG_ASSOCIATING) == 0) { DPRINTF(("Unexpected association (%s, flags 0x%x)\n", IEEESTATE(vap), sc->flags)); break; } sc->flags &= ~IPW_FLAG_ASSOCIATING; sc->flags |= IPW_FLAG_ASSOCIATED; break; case IPW_STATE_SCANNING: DPRINTFN(3, ("Scanning (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); /* * NB: Check driver state for association on assoc * loss as the firmware will immediately start to * scan and we would treat it as a beacon miss if * we checked the 802.11 layer state. */ if (sc->flags & IPW_FLAG_ASSOCIATED) { IPW_UNLOCK(sc); /* XXX probably need to issue disassoc to fw */ ieee80211_beacon_miss(ic); IPW_LOCK(sc); } break; case IPW_STATE_SCAN_COMPLETE: /* * XXX For some reason scan requests generate scan * started + scan done events before any traffic is * received (e.g. probe response frames). We work * around this by marking the HACK flag and skipping * the first scan complete event. */ DPRINTFN(3, ("Scan complete (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); if (sc->flags & IPW_FLAG_HACK) { sc->flags &= ~IPW_FLAG_HACK; break; } if (sc->flags & IPW_FLAG_SCANNING) { IPW_UNLOCK(sc); ieee80211_scan_done(vap); IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; sc->sc_scan_timer = 0; } break; case IPW_STATE_ASSOCIATION_LOST: DPRINTFN(2, ("Association lost (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); sc->flags &= ~(IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED); if (vap->iv_state == IEEE80211_S_RUN) { IPW_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); IPW_LOCK(sc); } break; case IPW_STATE_DISABLED: /* XXX? is this right? */ sc->flags &= ~(IPW_FLAG_HACK | IPW_FLAG_SCANNING | IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED); DPRINTFN(2, ("Firmware disabled (%s flags 0x%x)\n", IEEESTATE(vap), sc->flags)); break; case IPW_STATE_RADIO_DISABLED: device_printf(sc->sc_dev, "radio turned off\n"); ieee80211_notify_radio(ic, 0); ipw_stop_locked(sc); /* XXX start polling thread to detect radio on */ break; default: DPRINTFN(2, ("%s: unhandled state %u %s flags 0x%x\n", __func__, state, IEEESTATE(vap), sc->flags)); break; } #undef IEEESTATE } /* * Set driver state for current channel. */ static void ipw_setcurchan(struct ipw_softc *sc, struct ieee80211_channel *chan) { struct ieee80211com *ic = &sc->sc_ic; ic->ic_curchan = chan; ieee80211_radiotap_chan_change(ic); } /* * XXX: Hack to set the current channel to the value advertised in beacons or * probe responses. Only used during AP detection. */ static void ipw_fix_channel(struct ipw_softc *sc, struct mbuf *m) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_channel *c; struct ieee80211_frame *wh; uint8_t subtype; uint8_t *frm, *efrm; wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype != IEEE80211_FC0_SUBTYPE_BEACON && subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP) return; /* XXX use ieee80211_parse_beacon */ frm = (uint8_t *)(wh + 1); efrm = mtod(m, uint8_t *) + m->m_len; frm += 12; /* skip tstamp, bintval and capinfo fields */ while (frm < efrm) { if (*frm == IEEE80211_ELEMID_DSPARMS) #if IEEE80211_CHAN_MAX < 255 if (frm[2] <= IEEE80211_CHAN_MAX) #endif { DPRINTF(("Fixing channel to %d\n", frm[2])); c = ieee80211_find_channel(ic, ieee80211_ieee2mhz(frm[2], 0), IEEE80211_CHAN_B); if (c == NULL) c = &ic->ic_channels[0]; ipw_setcurchan(sc, c); } frm += frm[1] + 2; } } static void ipw_rx_data_intr(struct ipw_softc *sc, struct ipw_status *status, struct ipw_soft_bd *sbd, struct ipw_soft_buf *sbuf) { struct epoch_tracker et; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mnew, *m; struct ieee80211_node *ni; bus_addr_t physaddr; int error; int8_t rssi, nf; DPRINTFN(5, ("received frame len=%u, rssi=%u\n", le32toh(status->len), status->rssi)); if (le32toh(status->len) < sizeof (struct ieee80211_frame_min) || le32toh(status->len) > MCLBYTES) return; /* * Try to allocate a new mbuf for this ring element and load it before * processing the current mbuf. If the ring element cannot be loaded, * drop the received packet and reuse the old mbuf. In the unlikely * case that the old mbuf can't be reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); return; } bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map); error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(mnew, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); return; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = sbuf->m; sbuf->m = mnew; sbd->bd->physaddr = htole32(physaddr); m->m_pkthdr.len = m->m_len = le32toh(status->len); rssi = status->rssi + IPW_RSSI_TO_DBM; nf = -95; if (ieee80211_radiotap_active(ic)) { struct ipw_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_antsignal = rssi; tap->wr_antnoise = nf; } if (sc->flags & IPW_FLAG_SCANNING) ipw_fix_channel(sc, m); IPW_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); NET_EPOCH_ENTER(et); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi - nf, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi - nf, nf); NET_EPOCH_EXIT(et); IPW_LOCK(sc); bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); } static void ipw_rx_intr(struct ipw_softc *sc) { struct ipw_status *status; struct ipw_soft_bd *sbd; struct ipw_soft_buf *sbuf; uint32_t r, i; if (!(sc->flags & IPW_FLAG_FW_INITED)) return; r = CSR_READ_4(sc, IPW_CSR_RX_READ); bus_dmamap_sync(sc->status_dmat, sc->status_map, BUS_DMASYNC_POSTREAD); for (i = (sc->rxcur + 1) % IPW_NRBD; i != r; i = (i + 1) % IPW_NRBD) { status = &sc->status_list[i]; sbd = &sc->srbd_list[i]; sbuf = sbd->priv; switch (le16toh(status->code) & 0xf) { case IPW_STATUS_CODE_COMMAND: ipw_rx_cmd_intr(sc, sbuf); break; case IPW_STATUS_CODE_NEWSTATE: ipw_rx_newstate_intr(sc, sbuf); break; case IPW_STATUS_CODE_DATA_802_3: case IPW_STATUS_CODE_DATA_802_11: ipw_rx_data_intr(sc, status, sbd, sbuf); break; case IPW_STATUS_CODE_NOTIFICATION: DPRINTFN(2, ("notification status, len %u flags 0x%x\n", le32toh(status->len), status->flags)); /* XXX maybe drive state machine AUTH->ASSOC? */ break; default: device_printf(sc->sc_dev, "unexpected status code %u\n", le16toh(status->code)); } /* firmware was killed, stop processing received frames */ if (!(sc->flags & IPW_FLAG_FW_INITED)) return; sbd->bd->flags = 0; } bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE); /* kick the firmware */ sc->rxcur = (r == 0) ? IPW_NRBD - 1 : r - 1; CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur); } static void ipw_release_sbd(struct ipw_softc *sc, struct ipw_soft_bd *sbd) { struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; switch (sbd->type) { case IPW_SBD_TYPE_COMMAND: bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->cmd_dmat, sc->cmd_map); break; case IPW_SBD_TYPE_HEADER: shdr = sbd->priv; bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->hdr_dmat, shdr->map); SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next); break; case IPW_SBD_TYPE_DATA: sbuf = sbd->priv; bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txbuf_dmat, sbuf->map); SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next); ieee80211_tx_complete(sbuf->ni, sbuf->m, 0/*XXX*/); sc->sc_tx_timer = 0; break; } sbd->type = IPW_SBD_TYPE_NOASSOC; } static void ipw_tx_intr(struct ipw_softc *sc) { struct ipw_soft_bd *sbd; uint32_t r, i; if (!(sc->flags & IPW_FLAG_FW_INITED)) return; r = CSR_READ_4(sc, IPW_CSR_TX_READ); for (i = (sc->txold + 1) % IPW_NTBD; i != r; i = (i + 1) % IPW_NTBD) { sbd = &sc->stbd_list[i]; ipw_release_sbd(sc, sbd); sc->txfree++; } /* remember what the firmware has processed */ sc->txold = (r == 0) ? IPW_NTBD - 1 : r - 1; ipw_start(sc); } static void ipw_fatal_error_intr(struct ipw_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "firmware error\n"); if (vap != NULL) { IPW_UNLOCK(sc); ieee80211_cancel_scan(vap); IPW_LOCK(sc); } ieee80211_runtask(ic, &sc->sc_init_task); } static void ipw_intr(void *arg) { struct ipw_softc *sc = arg; uint32_t r; IPW_LOCK(sc); r = CSR_READ_4(sc, IPW_CSR_INTR); if (r == 0 || r == 0xffffffff) goto done; /* disable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); /* acknowledge all interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR, r); if (r & (IPW_INTR_FATAL_ERROR | IPW_INTR_PARITY_ERROR)) { ipw_fatal_error_intr(sc); goto done; } if (r & IPW_INTR_FW_INIT_DONE) wakeup(sc); if (r & IPW_INTR_RX_TRANSFER) ipw_rx_intr(sc); if (r & IPW_INTR_TX_TRANSFER) ipw_tx_intr(sc); /* re-enable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); done: IPW_UNLOCK(sc); } static void ipw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static const char * ipw_cmdname(int cmd) { static const struct { int cmd; const char *name; } cmds[] = { { IPW_CMD_ADD_MULTICAST, "ADD_MULTICAST" }, { IPW_CMD_BROADCAST_SCAN, "BROADCAST_SCAN" }, { IPW_CMD_DISABLE, "DISABLE" }, { IPW_CMD_DISABLE_PHY, "DISABLE_PHY" }, { IPW_CMD_ENABLE, "ENABLE" }, { IPW_CMD_PREPARE_POWER_DOWN, "PREPARE_POWER_DOWN" }, { IPW_CMD_SET_BASIC_TX_RATES, "SET_BASIC_TX_RATES" }, { IPW_CMD_SET_BEACON_INTERVAL, "SET_BEACON_INTERVAL" }, { IPW_CMD_SET_CHANNEL, "SET_CHANNEL" }, { IPW_CMD_SET_CONFIGURATION, "SET_CONFIGURATION" }, { IPW_CMD_SET_DESIRED_BSSID, "SET_DESIRED_BSSID" }, { IPW_CMD_SET_ESSID, "SET_ESSID" }, { IPW_CMD_SET_FRAG_THRESHOLD, "SET_FRAG_THRESHOLD" }, { IPW_CMD_SET_MAC_ADDRESS, "SET_MAC_ADDRESS" }, { IPW_CMD_SET_MANDATORY_BSSID, "SET_MANDATORY_BSSID" }, { IPW_CMD_SET_MODE, "SET_MODE" }, { IPW_CMD_SET_MSDU_TX_RATES, "SET_MSDU_TX_RATES" }, { IPW_CMD_SET_POWER_MODE, "SET_POWER_MODE" }, { IPW_CMD_SET_RTS_THRESHOLD, "SET_RTS_THRESHOLD" }, { IPW_CMD_SET_SCAN_OPTIONS, "SET_SCAN_OPTIONS" }, { IPW_CMD_SET_SECURITY_INFO, "SET_SECURITY_INFO" }, { IPW_CMD_SET_TX_POWER_INDEX, "SET_TX_POWER_INDEX" }, { IPW_CMD_SET_TX_RATES, "SET_TX_RATES" }, { IPW_CMD_SET_WEP_FLAGS, "SET_WEP_FLAGS" }, { IPW_CMD_SET_WEP_KEY, "SET_WEP_KEY" }, { IPW_CMD_SET_WEP_KEY_INDEX, "SET_WEP_KEY_INDEX" }, { IPW_CMD_SET_WPA_IE, "SET_WPA_IE" }, }; static char buf[12]; int i; for (i = 0; i < nitems(cmds); i++) if (cmds[i].cmd == cmd) return cmds[i].name; snprintf(buf, sizeof(buf), "%u", cmd); return buf; } /* * Send a command to the firmware and wait for the acknowledgement. */ static int ipw_cmd(struct ipw_softc *sc, uint32_t type, void *data, uint32_t len) { struct ipw_soft_bd *sbd; bus_addr_t physaddr; int error; IPW_LOCK_ASSERT(sc); if (sc->flags & IPW_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: %s not sent, busy\n", __func__, ipw_cmdname(type)); return EAGAIN; } sc->flags |= IPW_FLAG_BUSY; sbd = &sc->stbd_list[sc->txcur]; error = bus_dmamap_load(sc->cmd_dmat, sc->cmd_map, &sc->cmd, sizeof (struct ipw_cmd), ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map command DMA memory\n"); sc->flags &= ~IPW_FLAG_BUSY; return error; } sc->cmd.type = htole32(type); sc->cmd.subtype = 0; sc->cmd.len = htole32(len); sc->cmd.seq = 0; memcpy(sc->cmd.data, data, len); sbd->type = IPW_SBD_TYPE_COMMAND; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(sizeof (struct ipw_cmd)); sbd->bd->nfrag = 1; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_COMMAND | IPW_BD_FLAG_TX_LAST_FRAGMENT; bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE); #ifdef IPW_DEBUG if (ipw_debug >= 4) { printf("sending %s(%u, %u, %u, %u)", ipw_cmdname(type), type, 0, 0, len); /* Print the data buffer in the higher debug level */ if (ipw_debug >= 9 && len > 0) { printf(" data: 0x"); for (int i = 1; i <= len; i++) printf("%1D", (u_char *)data + len - i, ""); } printf("\n"); } #endif /* kick firmware */ sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); /* wait at most one second for command to complete */ error = msleep(sc, &sc->sc_mtx, 0, "ipwcmd", hz); if (error != 0) { device_printf(sc->sc_dev, "%s: %s failed, timeout (error %u)\n", __func__, ipw_cmdname(type), error); sc->flags &= ~IPW_FLAG_BUSY; return (error); } return (0); } static int ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ipw_soft_bd *sbd; struct ipw_soft_hdr *shdr; struct ipw_soft_buf *sbuf; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[IPW_MAX_NSEG]; bus_addr_t physaddr; int nsegs, error, i; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct ipw_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; ieee80211_radiotap_tx(vap, m0); } shdr = SLIST_FIRST(&sc->free_shdr); sbuf = SLIST_FIRST(&sc->free_sbuf); KASSERT(shdr != NULL && sbuf != NULL, ("empty sw hdr/buf pool")); shdr->hdr.type = htole32(IPW_HDR_TYPE_SEND); shdr->hdr.subtype = 0; shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) ? 1 : 0; shdr->hdr.encrypt = 0; shdr->hdr.keyidx = 0; shdr->hdr.keysz = 0; shdr->hdr.fragmentsz = 0; IEEE80211_ADDR_COPY(shdr->hdr.src_addr, wh->i_addr2); if (ic->ic_opmode == IEEE80211_M_STA) IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr3); else IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr1); /* trim IEEE802.11 header */ m_adj(m0, sizeof (struct ieee80211_frame)); error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } error = bus_dmamap_load(sc->hdr_dmat, shdr->map, &shdr->hdr, sizeof (struct ipw_hdr), ipw_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map header DMA memory\n"); bus_dmamap_unload(sc->txbuf_dmat, sbuf->map); m_freem(m0); return error; } SLIST_REMOVE_HEAD(&sc->free_sbuf, next); SLIST_REMOVE_HEAD(&sc->free_shdr, next); sbd = &sc->stbd_list[sc->txcur]; sbd->type = IPW_SBD_TYPE_HEADER; sbd->priv = shdr; sbd->bd->physaddr = htole32(physaddr); sbd->bd->len = htole32(sizeof (struct ipw_hdr)); sbd->bd->nfrag = 1 + nsegs; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3 | IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; DPRINTFN(5, ("sending tx hdr (%u, %u, %u, %u, %6D, %6D)\n", shdr->hdr.type, shdr->hdr.subtype, shdr->hdr.encrypted, shdr->hdr.encrypt, shdr->hdr.src_addr, ":", shdr->hdr.dst_addr, ":")); sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; sbuf->m = m0; sbuf->ni = ni; for (i = 0; i < nsegs; i++) { sbd = &sc->stbd_list[sc->txcur]; sbd->bd->physaddr = htole32(segs[i].ds_addr); sbd->bd->len = htole32(segs[i].ds_len); sbd->bd->nfrag = 0; sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3; if (i == nsegs - 1) { sbd->type = IPW_SBD_TYPE_DATA; sbd->priv = sbuf; sbd->bd->flags |= IPW_BD_FLAG_TX_LAST_FRAGMENT; } else { sbd->type = IPW_SBD_TYPE_NOASSOC; sbd->bd->flags |= IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT; } DPRINTFN(5, ("sending fragment (%d)\n", i)); sc->txfree--; sc->txcur = (sc->txcur + 1) % IPW_NTBD; } bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE); /* kick firmware */ CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); return 0; } static int ipw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return 0; } static int ipw_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ipw_softc *sc = ic->ic_softc; int error; IPW_LOCK(sc); if ((sc->flags & IPW_FLAG_RUNNING) == 0) { IPW_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { IPW_UNLOCK(sc); return (error); } ipw_start(sc); IPW_UNLOCK(sc); return (0); } static void ipw_start(struct ipw_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; IPW_LOCK_ASSERT(sc); while (sc->txfree >= 1 + IPW_MAX_NSEG && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ipw_tx_start(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } /* start watchdog timer */ sc->sc_tx_timer = 5; } } static void ipw_watchdog(void *arg) { struct ipw_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; IPW_LOCK_ASSERT(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(ic->ic_oerrors, 1); taskqueue_enqueue(taskqueue_swi, &sc->sc_init_task); } } if (sc->sc_scan_timer > 0) { if (--sc->sc_scan_timer == 0) { DPRINTFN(3, ("Scan timeout\n")); /* End the scan */ if (sc->flags & IPW_FLAG_SCANNING) { IPW_UNLOCK(sc); ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; } } } if (sc->flags & IPW_FLAG_RUNNING) callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc); } static void ipw_parent(struct ieee80211com *ic) { struct ipw_softc *sc = ic->ic_softc; int startall = 0; IPW_LOCK(sc); if (ic->ic_nrunning > 0) { if (!(sc->flags & IPW_FLAG_RUNNING)) { ipw_init_locked(sc); startall = 1; } } else if (sc->flags & IPW_FLAG_RUNNING) ipw_stop_locked(sc); IPW_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void ipw_stop_master(struct ipw_softc *sc) { uint32_t tmp; int ntries; /* disable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0); CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_STOP_MASTER); for (ntries = 0; ntries < 50; ntries++) { if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 50) device_printf(sc->sc_dev, "timeout waiting for master\n"); tmp = CSR_READ_4(sc, IPW_CSR_RST); CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_PRINCETON_RESET); /* Clear all flags except the following */ sc->flags &= IPW_FLAG_HAS_RADIO_SWITCH; } static int ipw_reset(struct ipw_softc *sc) { uint32_t tmp; int ntries; ipw_stop_master(sc); /* move adapter to D0 state */ tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (CSR_READ_4(sc, IPW_CSR_CTL) & IPW_CTL_CLOCK_READY) break; DELAY(200); } if (ntries == 1000) return EIO; tmp = CSR_READ_4(sc, IPW_CSR_RST); CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_SW_RESET); DELAY(10); tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT); return 0; } static int ipw_waitfordisable(struct ipw_softc *sc, int waitfor) { int ms = hz < 1000 ? 1 : hz/10; int i, error; for (i = 0; i < 100; i++) { if (ipw_read_table1(sc, IPW_INFO_CARD_DISABLED) == waitfor) return 0; error = msleep(sc, &sc->sc_mtx, PCATCH, __func__, ms); if (error == 0 || error != EWOULDBLOCK) return 0; } DPRINTF(("%s: timeout waiting for %s\n", __func__, waitfor ? "disable" : "enable")); return ETIMEDOUT; } static int ipw_enable(struct ipw_softc *sc) { int error; if ((sc->flags & IPW_FLAG_ENABLED) == 0) { DPRINTF(("Enable adapter\n")); error = ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0); if (error != 0) return error; error = ipw_waitfordisable(sc, 0); if (error != 0) return error; sc->flags |= IPW_FLAG_ENABLED; } return 0; } static int ipw_disable(struct ipw_softc *sc) { int error; if (sc->flags & IPW_FLAG_ENABLED) { DPRINTF(("Disable adapter\n")); error = ipw_cmd(sc, IPW_CMD_DISABLE, NULL, 0); if (error != 0) return error; error = ipw_waitfordisable(sc, 1); if (error != 0) return error; sc->flags &= ~IPW_FLAG_ENABLED; } return 0; } /* * Upload the microcode to the device. */ static int ipw_load_ucode(struct ipw_softc *sc, const char *uc, int size) { int ntries; MEM_WRITE_4(sc, 0x3000e0, 0x80000000); CSR_WRITE_4(sc, IPW_CSR_RST, 0); MEM_WRITE_2(sc, 0x220000, 0x0703); MEM_WRITE_2(sc, 0x220000, 0x0707); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210000, 0x40); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x40); MEM_WRITE_MULTI_1(sc, 0x210010, uc, size); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x80); MEM_WRITE_2(sc, 0x220000, 0x0703); MEM_WRITE_2(sc, 0x220000, 0x0707); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210014, 0x72); MEM_WRITE_1(sc, 0x210000, 0x00); MEM_WRITE_1(sc, 0x210000, 0x80); for (ntries = 0; ntries < 10; ntries++) { if (MEM_READ_1(sc, 0x210000) & 1) break; DELAY(10); } if (ntries == 10) { device_printf(sc->sc_dev, "timeout waiting for ucode to initialize\n"); return EIO; } MEM_WRITE_4(sc, 0x3000e0, 0); return 0; } /* set of macros to handle unaligned little endian data in firmware image */ #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) #define GETLE16(p) ((p)[0] | (p)[1] << 8) static int ipw_load_firmware(struct ipw_softc *sc, const char *fw, int size) { const uint8_t *p, *end; uint32_t tmp, dst; uint16_t len; int error; p = fw; end = fw + size; while (p < end) { dst = GETLE32(p); p += 4; len = GETLE16(p); p += 2; ipw_write_mem_1(sc, dst, p, len); p += len; } CSR_WRITE_4(sc, IPW_CSR_IO, IPW_IO_GPIO1_ENABLE | IPW_IO_GPIO3_MASK | IPW_IO_LED_OFF); /* enable interrupts */ CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK); /* kick the firmware */ CSR_WRITE_4(sc, IPW_CSR_RST, 0); tmp = CSR_READ_4(sc, IPW_CSR_CTL); CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_ALLOW_STANDBY); /* wait at most one second for firmware initialization to complete */ if ((error = msleep(sc, &sc->sc_mtx, 0, "ipwinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for firmware " "initialization to complete\n"); return error; } tmp = CSR_READ_4(sc, IPW_CSR_IO); CSR_WRITE_4(sc, IPW_CSR_IO, tmp | IPW_IO_GPIO1_MASK | IPW_IO_GPIO3_MASK); return 0; } static int ipw_setwepkeys(struct ipw_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ipw_wep_key wepkey; struct ieee80211_key *wk; int error, i; for (i = 0; i < IEEE80211_WEP_NKID; i++) { wk = &vap->iv_nw_keys[i]; if (wk->wk_cipher == NULL || wk->wk_cipher->ic_cipher != IEEE80211_CIPHER_WEP) continue; wepkey.idx = i; wepkey.len = wk->wk_keylen; memset(wepkey.key, 0, sizeof wepkey.key); memcpy(wepkey.key, wk->wk_key, wk->wk_keylen); DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx, wepkey.len)); error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY, &wepkey, sizeof wepkey); if (error != 0) return error; } return 0; } static int ipw_setwpaie(struct ipw_softc *sc, const void *ie, int ielen) { struct ipw_wpa_ie wpaie; memset(&wpaie, 0, sizeof(wpaie)); wpaie.len = htole32(ielen); /* XXX verify length */ memcpy(&wpaie.ie, ie, ielen); DPRINTF(("Setting WPA IE\n")); return ipw_cmd(sc, IPW_CMD_SET_WPA_IE, &wpaie, sizeof(wpaie)); } static int ipw_setbssid(struct ipw_softc *sc, uint8_t *bssid) { static const uint8_t zerobssid[IEEE80211_ADDR_LEN]; if (bssid == NULL || bcmp(bssid, zerobssid, IEEE80211_ADDR_LEN) == 0) { DPRINTF(("Setting mandatory BSSID to null\n")); return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, NULL, 0); } else { DPRINTF(("Setting mandatory BSSID to %6D\n", bssid, ":")); return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, bssid, IEEE80211_ADDR_LEN); } } static int ipw_setssid(struct ipw_softc *sc, void *ssid, size_t ssidlen) { if (ssidlen == 0) { /* * A bug in the firmware breaks the ``don't associate'' * bit in the scan options command. To compensate for * this install a bogus ssid when no ssid is specified * so the firmware won't try to associate. */ DPRINTF(("Setting bogus ESSID to WAR firmware bug\n")); return ipw_cmd(sc, IPW_CMD_SET_ESSID, "\x18\x19\x20\x21\x22\x23\x24\x25\x26\x27" "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31" "\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b" "\x3c\x3d", IEEE80211_NWID_LEN); } else { #ifdef IPW_DEBUG if (ipw_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ssid, ssidlen); printf("\n"); } #endif return ipw_cmd(sc, IPW_CMD_SET_ESSID, ssid, ssidlen); } } static int ipw_setscanopts(struct ipw_softc *sc, uint32_t chanmask, uint32_t flags) { struct ipw_scan_options opts; DPRINTF(("Scan options: mask 0x%x flags 0x%x\n", chanmask, flags)); opts.channels = htole32(chanmask); opts.flags = htole32(flags); return ipw_cmd(sc, IPW_CMD_SET_SCAN_OPTIONS, &opts, sizeof(opts)); } static int ipw_scan(struct ipw_softc *sc) { uint32_t params; int error; DPRINTF(("%s: flags 0x%x\n", __func__, sc->flags)); if (sc->flags & IPW_FLAG_SCANNING) return (EBUSY); sc->flags |= IPW_FLAG_SCANNING | IPW_FLAG_HACK; /* NB: IPW_SCAN_DO_NOT_ASSOCIATE does not work (we set it anyway) */ error = ipw_setscanopts(sc, 0x3fff, IPW_SCAN_DO_NOT_ASSOCIATE); if (error != 0) goto done; /* * Setup null/bogus ssid so firmware doesn't use any previous * ssid to try and associate. This is because the ``don't * associate'' option bit is broken (sigh). */ error = ipw_setssid(sc, NULL, 0); if (error != 0) goto done; /* * NB: the adapter may be disabled on association lost; * if so just re-enable it to kick off scanning. */ DPRINTF(("Starting scan\n")); sc->sc_scan_timer = 3; if (sc->flags & IPW_FLAG_ENABLED) { params = 0; /* XXX? */ error = ipw_cmd(sc, IPW_CMD_BROADCAST_SCAN, ¶ms, sizeof(params)); } else error = ipw_enable(sc); done: if (error != 0) { DPRINTF(("Scan failed\n")); sc->flags &= ~(IPW_FLAG_SCANNING | IPW_FLAG_HACK); } return (error); } static int ipw_setchannel(struct ipw_softc *sc, struct ieee80211_channel *chan) { struct ieee80211com *ic = &sc->sc_ic; uint32_t data; int error; data = htole32(ieee80211_chan2ieee(ic, chan)); DPRINTF(("Setting channel to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_CHANNEL, &data, sizeof data); if (error == 0) ipw_setcurchan(sc, chan); return error; } static void ipw_assoc(struct ieee80211com *ic, struct ieee80211vap *vap) { struct ipw_softc *sc = ic->ic_softc; struct ieee80211_node *ni = vap->iv_bss; struct ipw_security security; uint32_t data; int error; IPW_LOCK(sc); error = ipw_disable(sc); if (error != 0) goto done; memset(&security, 0, sizeof security); security.authmode = (ni->ni_authmode == IEEE80211_AUTH_SHARED) ? IPW_AUTH_SHARED : IPW_AUTH_OPEN; security.ciphers = htole32(IPW_CIPHER_NONE); DPRINTF(("Setting authmode to %u\n", security.authmode)); error = ipw_cmd(sc, IPW_CMD_SET_SECURITY_INFO, &security, sizeof security); if (error != 0) goto done; data = htole32(vap->iv_rtsthreshold); DPRINTF(("Setting RTS threshold to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_RTS_THRESHOLD, &data, sizeof data); if (error != 0) goto done; data = htole32(vap->iv_fragthreshold); DPRINTF(("Setting frag threshold to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); if (error != 0) goto done; if (vap->iv_flags & IEEE80211_F_PRIVACY) { error = ipw_setwepkeys(sc); if (error != 0) goto done; if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) { data = htole32(vap->iv_def_txkey); DPRINTF(("Setting wep tx key index to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY_INDEX, &data, sizeof data); if (error != 0) goto done; } } data = htole32((vap->iv_flags & IEEE80211_F_PRIVACY) ? IPW_WEPON : 0); DPRINTF(("Setting wep flags to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_WEP_FLAGS, &data, sizeof data); if (error != 0) goto done; error = ipw_setssid(sc, ni->ni_essid, ni->ni_esslen); if (error != 0) goto done; error = ipw_setbssid(sc, ni->ni_bssid); if (error != 0) goto done; if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *ie = vap->iv_appie_wpa; error = ipw_setwpaie(sc, ie->ie_data, ie->ie_len); if (error != 0) goto done; } if (ic->ic_opmode == IEEE80211_M_IBSS) { error = ipw_setchannel(sc, ni->ni_chan); if (error != 0) goto done; } /* lock scan to ap's channel and enable associate */ error = ipw_setscanopts(sc, 1<<(ieee80211_chan2ieee(ic, ni->ni_chan)-1), 0); if (error != 0) goto done; error = ipw_enable(sc); /* finally, enable adapter */ if (error == 0) sc->flags |= IPW_FLAG_ASSOCIATING; done: IPW_UNLOCK(sc); } static void ipw_disassoc(struct ieee80211com *ic, struct ieee80211vap *vap) { struct ieee80211_node *ni = vap->iv_bss; struct ipw_softc *sc = ic->ic_softc; IPW_LOCK(sc); DPRINTF(("Disassociate from %6D\n", ni->ni_bssid, ":")); /* * NB: don't try to do this if ipw_stop_master has * shutdown the firmware and disabled interrupts. */ if (sc->flags & IPW_FLAG_FW_INITED) { sc->flags &= ~IPW_FLAG_ASSOCIATED; /* * NB: firmware currently ignores bssid parameter, but * supply it in case this changes (follow linux driver). */ (void) ipw_cmd(sc, IPW_CMD_DISASSOCIATE, ni->ni_bssid, IEEE80211_ADDR_LEN); } IPW_UNLOCK(sc); } /* * Handler for sc_init_task. This is a simple wrapper around ipw_init(). * It is called on firmware panics or on watchdog timeouts. */ static void ipw_init_task(void *context, int pending) { ipw_init(context); } static void ipw_init(void *priv) { struct ipw_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; IPW_LOCK(sc); ipw_init_locked(sc); IPW_UNLOCK(sc); if (sc->flags & IPW_FLAG_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void ipw_init_locked(struct ipw_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); const struct firmware *fp; const struct ipw_firmware_hdr *hdr; const char *fw; IPW_LOCK_ASSERT(sc); DPRINTF(("%s: state %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], sc->flags)); /* * Avoid re-entrant calls. We need to release the mutex in ipw_init() * when loading the firmware and we don't want to be called during this * operation. */ if (sc->flags & IPW_FLAG_INIT_LOCKED) return; sc->flags |= IPW_FLAG_INIT_LOCKED; ipw_stop_locked(sc); if (ipw_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset adapter\n"); goto fail; } if (sc->sc_firmware == NULL) { device_printf(sc->sc_dev, "no firmware\n"); goto fail; } /* NB: consistency already checked on load */ fp = sc->sc_firmware; hdr = (const struct ipw_firmware_hdr *)fp->data; DPRINTF(("Loading firmware image '%s'\n", fp->name)); fw = (const char *)fp->data + sizeof *hdr + le32toh(hdr->mainsz); if (ipw_load_ucode(sc, fw, le32toh(hdr->ucodesz)) != 0) { device_printf(sc->sc_dev, "could not load microcode\n"); goto fail; } ipw_stop_master(sc); /* * Setup tx, rx and status rings. */ sc->txold = IPW_NTBD - 1; sc->txcur = 0; sc->txfree = IPW_NTBD - 2; sc->rxcur = IPW_NRBD - 1; CSR_WRITE_4(sc, IPW_CSR_TX_BASE, sc->tbd_phys); CSR_WRITE_4(sc, IPW_CSR_TX_SIZE, IPW_NTBD); CSR_WRITE_4(sc, IPW_CSR_TX_READ, 0); CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur); CSR_WRITE_4(sc, IPW_CSR_RX_BASE, sc->rbd_phys); CSR_WRITE_4(sc, IPW_CSR_RX_SIZE, IPW_NRBD); CSR_WRITE_4(sc, IPW_CSR_RX_READ, 0); CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur); CSR_WRITE_4(sc, IPW_CSR_STATUS_BASE, sc->status_phys); fw = (const char *)fp->data + sizeof *hdr; if (ipw_load_firmware(sc, fw, le32toh(hdr->mainsz)) != 0) { device_printf(sc->sc_dev, "could not load firmware\n"); goto fail; } sc->flags |= IPW_FLAG_FW_INITED; /* retrieve information tables base addresses */ sc->table1_base = CSR_READ_4(sc, IPW_CSR_TABLE1_BASE); sc->table2_base = CSR_READ_4(sc, IPW_CSR_TABLE2_BASE); ipw_write_table1(sc, IPW_INFO_LOCK, 0); if (ipw_config(sc) != 0) { device_printf(sc->sc_dev, "device configuration failed\n"); goto fail; } callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc); sc->flags |= IPW_FLAG_RUNNING; sc->flags &= ~IPW_FLAG_INIT_LOCKED; return; fail: ipw_stop_locked(sc); sc->flags &= ~IPW_FLAG_INIT_LOCKED; } static int ipw_config(struct ipw_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ipw_configuration config; uint32_t data; int error; error = ipw_disable(sc); if (error != 0) return error; switch (ic->ic_opmode) { case IEEE80211_M_STA: case IEEE80211_M_HOSTAP: case IEEE80211_M_WDS: /* XXX */ data = htole32(IPW_MODE_BSS); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: data = htole32(IPW_MODE_IBSS); break; case IEEE80211_M_MONITOR: data = htole32(IPW_MODE_MONITOR); break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode); return EINVAL; } DPRINTF(("Setting mode to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_MODE, &data, sizeof data); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_MONITOR) { error = ipw_setchannel(sc, ic->ic_curchan); if (error != 0) return error; } if (ic->ic_opmode == IEEE80211_M_MONITOR) return ipw_enable(sc); config.flags = htole32(IPW_CFG_BSS_MASK | IPW_CFG_IBSS_MASK | IPW_CFG_PREAMBLE_AUTO | IPW_CFG_802_1x_ENABLE); if (ic->ic_opmode == IEEE80211_M_IBSS) config.flags |= htole32(IPW_CFG_IBSS_AUTO_START); if (ic->ic_promisc > 0) config.flags |= htole32(IPW_CFG_PROMISCUOUS); config.bss_chan = htole32(0x3fff); /* channels 1-14 */ config.ibss_chan = htole32(0x7ff); /* channels 1-11 */ DPRINTF(("Setting configuration to 0x%x\n", le32toh(config.flags))); error = ipw_cmd(sc, IPW_CMD_SET_CONFIGURATION, &config, sizeof config); if (error != 0) return error; data = htole32(0xf); /* 1, 2, 5.5, 11 */ DPRINTF(("Setting basic tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_BASIC_TX_RATES, &data, sizeof data); if (error != 0) return error; /* Use the same rate set */ DPRINTF(("Setting msdu tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_MSDU_TX_RATES, &data, sizeof data); if (error != 0) return error; /* Use the same rate set */ DPRINTF(("Setting tx rates to 0x%x\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_TX_RATES, &data, sizeof data); if (error != 0) return error; data = htole32(IPW_POWER_MODE_CAM); DPRINTF(("Setting power mode to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_POWER_MODE, &data, sizeof data); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS) { data = htole32(32); /* default value */ DPRINTF(("Setting tx power index to %u\n", le32toh(data))); error = ipw_cmd(sc, IPW_CMD_SET_TX_POWER_INDEX, &data, sizeof data); if (error != 0) return error; } return 0; } static void ipw_stop(void *priv) { struct ipw_softc *sc = priv; IPW_LOCK(sc); ipw_stop_locked(sc); IPW_UNLOCK(sc); } static void ipw_stop_locked(struct ipw_softc *sc) { int i; IPW_LOCK_ASSERT(sc); callout_stop(&sc->sc_wdtimer); ipw_stop_master(sc); CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET); /* * Release tx buffers. */ for (i = 0; i < IPW_NTBD; i++) ipw_release_sbd(sc, &sc->stbd_list[i]); sc->sc_tx_timer = 0; sc->flags &= ~IPW_FLAG_RUNNING; } static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct ipw_softc *sc = arg1; uint32_t i, size, buf[256]; memset(buf, 0, sizeof buf); if (!(sc->flags & IPW_FLAG_FW_INITED)) return SYSCTL_OUT(req, buf, sizeof buf); CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, sc->table1_base); size = min(CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA), 256); for (i = 1; i < size; i++) buf[i] = MEM_READ_4(sc, CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA)); return SYSCTL_OUT(req, buf, size); } static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS) { struct ipw_softc *sc = arg1; int val; val = !((sc->flags & IPW_FLAG_HAS_RADIO_SWITCH) && (CSR_READ_4(sc, IPW_CSR_IO) & IPW_IO_RADIO_DISABLED)); return SYSCTL_OUT(req, &val, sizeof val); } static uint32_t ipw_read_table1(struct ipw_softc *sc, uint32_t off) { return MEM_READ_4(sc, MEM_READ_4(sc, sc->table1_base + off)); } static void ipw_write_table1(struct ipw_softc *sc, uint32_t off, uint32_t info) { MEM_WRITE_4(sc, MEM_READ_4(sc, sc->table1_base + off), info); } #if 0 static int ipw_read_table2(struct ipw_softc *sc, uint32_t off, void *buf, uint32_t *len) { uint32_t addr, info; uint16_t count, size; uint32_t total; /* addr[4] + count[2] + size[2] */ addr = MEM_READ_4(sc, sc->table2_base + off); info = MEM_READ_4(sc, sc->table2_base + off + 4); count = info >> 16; size = info & 0xffff; total = count * size; if (total > *len) { *len = total; return EINVAL; } *len = total; ipw_read_mem_1(sc, addr, buf, total); return 0; } static void ipw_read_mem_1(struct ipw_softc *sc, bus_size_t offset, uint8_t *datap, bus_size_t count) { for (; count > 0; offset++, datap++, count--) { CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); *datap = CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3)); } } #endif static void ipw_write_mem_1(struct ipw_softc *sc, bus_size_t offset, const uint8_t *datap, bus_size_t count) { for (; count > 0; offset++, datap++, count--) { CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3); CSR_WRITE_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3), *datap); } } static void ipw_scan_start(struct ieee80211com *ic) { struct ipw_softc *sc = ic->ic_softc; IPW_LOCK(sc); ipw_scan(sc); IPW_UNLOCK(sc); } static void ipw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct ipw_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; int i; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); for (i = 1; i < 16; i++) { if (sc->chanmask & (1 << i)) { ieee80211_add_channel(chans, maxchans, nchans, i, 0, 0, 0, bands); } } } static void ipw_set_channel(struct ieee80211com *ic) { struct ipw_softc *sc = ic->ic_softc; IPW_LOCK(sc); if (ic->ic_opmode == IEEE80211_M_MONITOR) { ipw_disable(sc); ipw_setchannel(sc, ic->ic_curchan); ipw_enable(sc); } IPW_UNLOCK(sc); } static void ipw_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { /* NB: all channels are scanned at once */ } static void ipw_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void ipw_scan_end(struct ieee80211com *ic) { struct ipw_softc *sc = ic->ic_softc; IPW_LOCK(sc); sc->flags &= ~IPW_FLAG_SCANNING; IPW_UNLOCK(sc); } diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c index d9220eaee0f9..71da0832d29e 100644 --- a/sys/dev/malo/if_malo.c +++ b/sys/dev/malo/if_malo.c @@ -1,2158 +1,2158 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Weongyo Jeong * Copyright (c) 2007 Marvell Semiconductor, Inc. * Copyright (c) 2007 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif #include "opt_malo.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Marvell 88w8335 driver parameters"); static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/ SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce, 0, "tx buffers to send at once"); static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf, 0, "rx buffers allocated"); static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota, 0, "max rx buffers to process per interrupt"); static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf, 0, "tx buffers allocated"); #ifdef MALO_DEBUG static int malo_debug = 0; SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug, 0, "control debugging printfs"); enum { MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MALO_DEBUG_RECV = 0x00000004, /* basic recv operation */ MALO_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MALO_DEBUG_RESET = 0x00000010, /* reset processing */ MALO_DEBUG_INTR = 0x00000040, /* ISR */ MALO_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MALO_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MALO_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MALO_DEBUG_NODE = 0x00000800, /* node management */ MALO_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MALO_DEBUG_FW = 0x00008000, /* firmware */ MALO_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \ IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ (((sc->malo_debug & MALO_DEBUG_RECV) && \ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh)))) #define IFF_DUMPPKTS_XMIT(sc) \ (sc->malo_debug & MALO_DEBUG_XMIT) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->malo_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers"); static struct ieee80211vap *malo_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void malo_vap_delete(struct ieee80211vap *); static int malo_dma_setup(struct malo_softc *); static int malo_setup_hwdma(struct malo_softc *); static void malo_txq_init(struct malo_softc *, struct malo_txq *, int); static void malo_tx_cleanupq(struct malo_softc *, struct malo_txq *); static void malo_parent(struct ieee80211com *); static int malo_transmit(struct ieee80211com *, struct mbuf *); static void malo_start(struct malo_softc *); static void malo_watchdog(void *); static void malo_updateslot(struct ieee80211com *); static int malo_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void malo_scan_start(struct ieee80211com *); static void malo_scan_end(struct ieee80211com *); static void malo_set_channel(struct ieee80211com *); static int malo_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void malo_sysctlattach(struct malo_softc *); static void malo_announce(struct malo_softc *); static void malo_dma_cleanup(struct malo_softc *); static void malo_stop(struct malo_softc *); static int malo_chan_set(struct malo_softc *, struct ieee80211_channel *); static int malo_mode_init(struct malo_softc *); static void malo_tx_proc(void *, int); static void malo_rx_proc(void *, int); static void malo_init(void *); /* * Read/Write shorthands for accesses to BAR 0. Note that all BAR 1 * operations are done in the "hal" except getting H/W MAC address at * malo_attach and there should be no reference to them here. */ static uint32_t malo_bar0_read4(struct malo_softc *sc, bus_size_t off) { return bus_space_read_4(sc->malo_io0t, sc->malo_io0h, off); } static void malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val) { DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n", __func__, (uintmax_t)off, val); bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val); } int malo_attach(uint16_t devid, struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh; int error; uint8_t bands[IEEE80211_MODE_BYTES]; MALO_LOCK_INIT(sc); callout_init_mtx(&sc->malo_watchdog_timer, &sc->malo_mtx, 0); mbufq_init(&sc->malo_snd, ifqmaxlen); mh = malo_hal_attach(sc->malo_dev, devid, sc->malo_io1h, sc->malo_io1t, sc->malo_dmat); if (mh == NULL) { device_printf(sc->malo_dev, "unable to attach HAL\n"); error = EIO; goto bad; } sc->malo_mh = mh; /* * Load firmware so we can get setup. We arbitrarily pick station * firmware; we'll re-load firmware as needed so setting up * the wrong mode isn't a big deal. */ error = malo_hal_fwload(mh, "malo8335-h", "malo8335-m"); if (error != 0) { device_printf(sc->malo_dev, "unable to setup firmware\n"); goto bad1; } /* XXX gethwspecs() extracts correct informations? not maybe! */ error = malo_hal_gethwspecs(mh, &sc->malo_hwspecs); if (error != 0) { device_printf(sc->malo_dev, "unable to fetch h/w specs\n"); goto bad1; } DPRINTF(sc, MALO_DEBUG_FW, "malo_hal_gethwspecs: hwversion 0x%x hostif 0x%x" "maxnum_wcb 0x%x maxnum_mcaddr 0x%x maxnum_tx_wcb 0x%x" "regioncode 0x%x num_antenna 0x%x fw_releasenum 0x%x" "wcbbase0 0x%x rxdesc_read 0x%x rxdesc_write 0x%x" "ul_fw_awakecookie 0x%x w[4] = %x %x %x %x", sc->malo_hwspecs.hwversion, sc->malo_hwspecs.hostinterface, sc->malo_hwspecs.maxnum_wcb, sc->malo_hwspecs.maxnum_mcaddr, sc->malo_hwspecs.maxnum_tx_wcb, sc->malo_hwspecs.regioncode, sc->malo_hwspecs.num_antenna, sc->malo_hwspecs.fw_releasenum, sc->malo_hwspecs.wcbbase0, sc->malo_hwspecs.rxdesc_read, sc->malo_hwspecs.rxdesc_write, sc->malo_hwspecs.ul_fw_awakecookie, sc->malo_hwspecs.wcbbase[0], sc->malo_hwspecs.wcbbase[1], sc->malo_hwspecs.wcbbase[2], sc->malo_hwspecs.wcbbase[3]); /* NB: firmware looks that it does not export regdomain info API. */ memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, bands); sc->malo_txantenna = 0x2; /* h/w default */ sc->malo_rxantenna = 0xffff; /* h/w default */ /* * Allocate tx + rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = malo_dma_setup(sc); if (error != 0) { device_printf(sc->malo_dev, "failed to setup descriptors: %d\n", error); goto bad1; } error = malo_setup_hwdma(sc); /* push to firmware */ if (error != 0) /* NB: malo_setupdma prints msg */ goto bad2; sc->malo_tq = taskqueue_create_fast("malo_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->malo_tq); taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->malo_dev)); NET_TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->malo_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ ; IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr); /* * Transmit requires space in the packet for a special format transmit * record and optional padding between this record and the payload. * Ask the net80211 layer to arrange this when encapsulating * packets so we can add it efficiently. */ ic->ic_headroom = sizeof(struct malo_txrec) - sizeof(struct ieee80211_frame); /* call MI attach routine. */ ieee80211_ifattach(ic); /* override default methods */ ic->ic_vap_create = malo_vap_create; ic->ic_vap_delete = malo_vap_delete; ic->ic_raw_xmit = malo_raw_xmit; ic->ic_updateslot = malo_updateslot; ic->ic_scan_start = malo_scan_start; ic->ic_scan_end = malo_scan_end; ic->ic_set_channel = malo_set_channel; ic->ic_parent = malo_parent; ic->ic_transmit = malo_transmit; sc->malo_invalid = 0; /* ready to go, enable int handling */ ieee80211_radiotap_attach(ic, &sc->malo_tx_th.wt_ihdr, sizeof(sc->malo_tx_th), MALO_TX_RADIOTAP_PRESENT, &sc->malo_rx_th.wr_ihdr, sizeof(sc->malo_rx_th), MALO_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's. */ malo_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); malo_announce(sc); return 0; bad2: malo_dma_cleanup(sc); bad1: malo_hal_detach(mh); bad: sc->malo_invalid = 1; return error; } static struct ieee80211vap * malo_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct malo_softc *sc = ic->ic_softc; struct malo_vap *mvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->malo_dev, "multiple vaps not supported\n"); return NULL; } switch (opmode) { case IEEE80211_M_STA: if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; /* fall thru... */ case IEEE80211_M_MONITOR: break; default: device_printf(sc->malo_dev, "%s mode not supported\n", ieee80211_opmode_name[opmode]); return NULL; /* unsupported */ } mvp = malloc(sizeof(struct malo_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &mvp->malo_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ mvp->malo_newstate = vap->iv_newstate; vap->iv_newstate = malo_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void malo_vap_delete(struct ieee80211vap *vap) { struct malo_vap *mvp = MALO_VAP(vap); ieee80211_vap_detach(vap); free(mvp, M_80211_VAP); } int malo_intr(void *arg) { struct malo_softc *sc = arg; struct malo_hal *mh = sc->malo_mh; uint32_t status; if (sc->malo_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return (FILTER_STRAY); } /* * Figure out the reason(s) for the interrupt. */ malo_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return (FILTER_STRAY); DPRINTF(sc, MALO_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->malo_imask); if (status & MALO_A2HRIC_BIT_RX_RDY) taskqueue_enqueue(sc->malo_tq, &sc->malo_rxtask); if (status & MALO_A2HRIC_BIT_TX_DONE) taskqueue_enqueue(sc->malo_tq, &sc->malo_txtask); if (status & MALO_A2HRIC_BIT_OPC_DONE) malo_hal_cmddone(mh); if (status & MALO_A2HRIC_BIT_MAC_EVENT) ; if (status & MALO_A2HRIC_BIT_RX_PROBLEM) ; if (status & MALO_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->malo_stats.mst_rx_badtkipicv++; } #ifdef MALO_DEBUG if (((status | sc->malo_imask) ^ sc->malo_imask) != 0) DPRINTF(sc, MALO_DEBUG_INTR, "%s: can't handle interrupt status 0x%x\n", __func__, status); #endif return (FILTER_HANDLED); } static void malo_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int malo_desc_setup(struct malo_softc *sc, const char *name, struct malo_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { int error; uint8_t *ds; DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->malo_dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { device_printf(sc->malo_dev, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, malo_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int malo_rxdma_setup(struct malo_softc *sc) { int error, bsize, i; struct malo_rxbuf *bf; struct malo_rxdesc *ds; error = malo_desc_setup(sc, "rx", &sc->malo_rxdma, malo_rxbuf, sizeof(struct malo_rxbuf), 1, sizeof(struct malo_rxdesc)); if (error != 0) return error; /* * Allocate rx buffers and set them up. */ bsize = malo_rxbuf * sizeof(struct malo_rxbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u rx buffers failed\n", bsize); return error; } sc->malo_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->malo_rxbuf); ds = sc->malo_rxdma.dd_desc; for (i = 0; i < malo_rxbuf; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->malo_rxdma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to dmamap for rx buffer, error %d\n", __func__, error); return error; } /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->malo_rxbuf, bf, bf_list); } return 0; } static int malo_txdma_setup(struct malo_softc *sc, struct malo_txq *txq) { int error, bsize, i; struct malo_txbuf *bf; struct malo_txdesc *ds; error = malo_desc_setup(sc, "tx", &txq->dma, malo_txbuf, sizeof(struct malo_txbuf), MALO_TXDESC, sizeof(struct malo_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = malo_txbuf * sizeof(struct malo_txbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u tx buffers failed\n", malo_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; STAILQ_INIT(&txq->free); txq->nfree = 0; ds = txq->dma.dd_desc; for (i = 0; i < malo_txbuf; i++, bf++, ds += MALO_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; } return 0; } static void malo_desc_cleanup(struct malo_softc *sc, struct malo_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } static void malo_rxdma_cleanup(struct malo_softc *sc) { struct malo_rxbuf *bf; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&sc->malo_rxbuf); if (sc->malo_rxdma.dd_bufptr != NULL) { free(sc->malo_rxdma.dd_bufptr, M_MALODEV); sc->malo_rxdma.dd_bufptr = NULL; } if (sc->malo_rxdma.dd_desc_len != 0) malo_desc_cleanup(sc, &sc->malo_rxdma); } static void malo_txdma_cleanup(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct ieee80211_node *ni; STAILQ_FOREACH(bf, &txq->free, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MALODEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) malo_desc_cleanup(sc, &txq->dma); } static void malo_dma_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_txdma_cleanup(sc, &sc->malo_txq[i]); malo_rxdma_cleanup(sc); } static int malo_dma_setup(struct malo_softc *sc) { int error, i; /* rxdma initializing. */ error = malo_rxdma_setup(sc); if (error != 0) return error; /* NB: we just have 1 tx queue now. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { error = malo_txdma_setup(sc, &sc->malo_txq[i]); if (error != 0) { malo_dma_cleanup(sc); return error; } malo_txq_init(sc, &sc->malo_txq[i], i); } return 0; } static void malo_hal_set_rxtxdma(struct malo_softc *sc) { int i; malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, sc->malo_hwdma.rxdesc_read); malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_write, sc->malo_hwdma.rxdesc_read); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { malo_bar0_write4(sc, sc->malo_hwspecs.wcbbase[i], sc->malo_hwdma.wcbbase[i]); } } /* * Inform firmware of our tx/rx dma setup. The BAR 0 writes below are * for compatibility with older firmware. For current firmware we send * this information with a cmd block via malo_hal_sethwdma. */ static int malo_setup_hwdma(struct malo_softc *sc) { int i; struct malo_txq *txq; sc->malo_hwdma.rxdesc_read = sc->malo_rxdma.dd_desc_paddr; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { txq = &sc->malo_txq[i]; sc->malo_hwdma.wcbbase[i] = txq->dma.dd_desc_paddr; } sc->malo_hwdma.maxnum_txwcb = malo_txbuf; sc->malo_hwdma.maxnum_wcb = MALO_NUM_TX_QUEUES; malo_hal_set_rxtxdma(sc); return 0; } static void malo_txq_init(struct malo_softc *sc, struct malo_txq *txq, int qnum) { struct malo_txbuf *bf, *bn; struct malo_txdesc *ds; MALO_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->physnext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Reclaim resources for a setup queue. */ static void malo_tx_cleanupq(struct malo_softc *sc, struct malo_txq *txq) { /* XXX hal work? */ MALO_TXQ_LOCK_DESTROY(txq); } /* * Allocate a tx buffer for sending a frame. */ static struct malo_txbuf * malo_getbuf(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MALO_TXQ_UNLOCK(txq); if (bf == NULL) { DPRINTF(sc, MALO_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); sc->malo_stats.mst_tx_qstop++; } return bf; } static int malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This also calculates * the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MALO_TXDESC + 1; } else if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that require too many * TX descriptors. We try to convert the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->malo_stats.mst_tx_linear++; m = m_defrag(m0, M_NOWAIT); if (m == NULL) { m_freem(m0); sc->malo_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MALO_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->malo_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MALO_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } #ifdef MALO_DEBUG static void malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix) { const struct malo_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x" " RATE:%02x QOS:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->physnext), le32toh(ds->physbuffdata), ds->rxcontrol, ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ? "" : (status & MALO_RXD_STATUS_OK) ? " *" : " !", ds->status, le16toh(ds->pktlen), ds->snr, ds->nf, ds->channel, ds->rate, le16toh(ds->qosctrl)); } static void malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix) { const struct malo_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->physnext), le32toh(ds->pktptr), le16toh(ds->pktlen), status, status & MALO_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->datarate, ds->txpriority, le16toh(ds->qosctrl), le32toh(ds->sap_pktinfo), le16toh(ds->format)); #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct malo_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MALO_DEBUG */ static __inline void malo_updatetxrate(struct ieee80211_node *ni, int rix) { static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 }; if (rix < nitems(ieeerates)) ni->ni_txrate = ieeerates[rix]; } static int malo_fix2rate(int fix_rate) { static const int rates[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 96, 108 }; return (fix_rate < nitems(rates) ? rates[fix_rate] : 0); } /* * Process completed xmit descriptors from the specified queue. */ static int malo_tx_processq(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct malo_txdesc *ds; struct ieee80211_node *ni; int nreaped; uint32_t status; DPRINTF(sc, MALO_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->status & htole32(MALO_TXD_STATUS_FW_OWNED)) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_XMIT_DESC) malo_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { status = le32toh(ds->status); if (status & MALO_TXD_STATUS_OK) { uint16_t format = le16toh(ds->format); uint8_t txant =_IEEE80211_MASKSHIFT( format, MALO_TXD_ANTENNA); sc->malo_stats.mst_ant_tx[txant]++; if (status & MALO_TXD_STATUS_OK_RETRY) sc->malo_stats.mst_tx_retries++; if (status & MALO_TXD_STATUS_OK_MORE_RETRY) sc->malo_stats.mst_tx_mretries++; malo_updatetxrate(ni, ds->datarate); sc->malo_stats.mst_tx_rate = ds->datarate; } else { if (status & MALO_TXD_STATUS_FAILED_LINK_ERROR) sc->malo_stats.mst_tx_linkerror++; if (status & MALO_TXD_STATUS_FAILED_XRETRY) sc->malo_stats.mst_tx_xretries++; if (status & MALO_TXD_STATUS_FAILED_AGING) sc->malo_stats.mst_tx_aging++; } /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_tx_complete(ni, bf->bf_m, (status & MALO_TXD_STATUS_OK) == 0); } else m_freem(bf->bf_m); ds->status = htole32(MALO_TXD_STATUS_IDLE); ds->pktlen = htole32(0); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } return nreaped; } /* * Deferred processing of transmit interrupt. */ static void malo_tx_proc(void *arg, int npending) { struct malo_softc *sc = arg; int i, nreaped; /* * Process each active queue. */ nreaped = 0; MALO_LOCK(sc); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { if (!STAILQ_EMPTY(&sc->malo_txq[i].active)) nreaped += malo_tx_processq(sc, &sc->malo_txq[i]); } if (nreaped != 0) { sc->malo_timer = 0; malo_start(sc); } MALO_UNLOCK(sc); } static int malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni, struct malo_txbuf *bf, struct mbuf *m0) { #define IS_DATA_FRAME(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA) int error, iswep; int hdrlen, pktlen; struct ieee80211_frame *wh; struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap = ni->ni_vap; struct malo_txdesc *ds; struct malo_txrec *tr; struct malo_txq *txq; uint16_t qos; wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; hdrlen = ieee80211_anyhdrsize(wh); pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = *(uint16_t *)ieee80211_getqos(wh); } else qos = 0; if (iswep) { struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ pktlen = m0->m_pkthdr.len; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->malo_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->malo_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->malo_tx_th.wt_txpower = ni->ni_txpower; sc->malo_tx_th.wt_antenna = sc->malo_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * malo_attach. */ if (hdrlen < sizeof(struct malo_txrec)) { const int space = sizeof(struct malo_txrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->malo_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); /* XXX stat */ return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct malo_txrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length of the fully * formed "802.11 payload". That is, it's everything except for * the 802.11 header. In particular this includes all crypto * material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = malo_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct malo_txrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->qosctrl = qos; /* NB: already little-endian */ ds->pktptr = htole32(bf->bf_segs[0].ds_addr); ds->pktlen = htole16(bf->bf_segs[0].ds_len); /* NB: pPhysNext setup once, don't touch */ ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0; ds->sap_pktinfo = 0; ds->format = 0; /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->malo_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: ds->txpriority = 1; break; case IEEE80211_FC0_TYPE_DATA: ds->txpriority = txq->qnum; break; default: device_printf(sc->malo_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ m_freem(m0); return EIO; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->datarate, -1); #endif MALO_TXQ_LOCK(txq); if (!IS_DATA_FRAME(wh)) ds->status |= htole32(1); ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->malo_timer = 5; MALO_TXQ_UNLOCK(txq); return 0; } static int malo_transmit(struct ieee80211com *ic, struct mbuf *m) { struct malo_softc *sc = ic->ic_softc; int error; MALO_LOCK(sc); if (!sc->malo_running) { MALO_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->malo_snd, m); if (error) { MALO_UNLOCK(sc); return (error); } malo_start(sc); MALO_UNLOCK(sc); return (0); } static void malo_start(struct malo_softc *sc) { struct ieee80211_node *ni; struct malo_txq *txq = &sc->malo_txq[0]; struct malo_txbuf *bf = NULL; struct mbuf *m; int nqueued = 0; MALO_LOCK_ASSERT(sc); if (!sc->malo_running || sc->malo_invalid) return; while ((m = mbufq_dequeue(&sc->malo_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; bf = malo_getbuf(sc, txq); if (bf == NULL) { mbufq_prepend(&sc->malo_snd, m); sc->malo_stats.mst_tx_qstop++; break; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m)) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); if (bf != NULL) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); MALO_TXQ_UNLOCK(txq); } ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= malo_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * malo_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } static void malo_watchdog(void *arg) { struct malo_softc *sc = arg; callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); if (sc->malo_timer == 0 || --sc->malo_timer > 0) return; if (sc->malo_running && !sc->malo_invalid) { device_printf(sc->malo_dev, "watchdog timeout\n"); /* XXX no way to reset h/w. now */ counter_u64_add(sc->malo_ic.ic_oerrors, 1); sc->malo_stats.mst_watchdog++; } } static int malo_hal_reset(struct malo_softc *sc) { static int first = 0; struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; if (first == 0) { /* * NB: when the device firstly is initialized, sometimes * firmware could override rx/tx dma registers so we re-set * these values once. */ malo_hal_set_rxtxdma(sc); first = 1; } malo_hal_setantenna(mh, MHA_ANTENNATYPE_RX, sc->malo_rxantenna); malo_hal_setantenna(mh, MHA_ANTENNATYPE_TX, sc->malo_txantenna); malo_hal_setradio(mh, 1, MHP_AUTO_PREAMBLE); malo_chan_set(sc, ic->ic_curchan); /* XXX needs other stuffs? */ return 1; } static __inline struct mbuf * malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf) { struct mbuf *m; bus_addr_t paddr; int error; /* XXX don't need mbuf, just dma buffer */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { sc->malo_stats.mst_rx_nombuf++; /* XXX */ return NULL; } error = bus_dmamap_load(sc->malo_dmat, bf->bf_dmamap, mtod(m, caddr_t), MJUMPAGESIZE, malo_load_cb, &paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); return NULL; } bf->bf_data = paddr; bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); return m; } static int malo_rxbuf_init(struct malo_softc *sc, struct malo_rxbuf *bf) { struct malo_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_m == NULL) { bf->bf_m = malo_getrxmbuf(sc, bf); if (bf->bf_m == NULL) { /* mark descriptor to be skipped */ ds->rxcontrol = MALO_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); return ENOMEM; } } /* * Setup descriptor. */ ds->qosctrl = 0; ds->snr = 0; ds->status = MALO_RXD_STATUS_IDLE; ds->channel = 0; ds->pktlen = htole16(MALO_RXSIZE); ds->nf = 0; ds->physbuffdata = htole32(bf->bf_data); /* NB: don't touch pPhysNext, set once */ ds->rxcontrol = MALO_RXD_CTRL_DRIVER_OWN; MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } /* * Setup the rx data structures. This should only be done once or we may get * out of sync with the firmware. */ static int malo_startrecv(struct malo_softc *sc) { struct malo_rxbuf *bf, *prev; struct malo_rxdesc *ds; if (sc->malo_recvsetup == 1) { malo_mode_init(sc); /* set filters, etc. */ return 0; } prev = NULL; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { int error = malo_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MALO_DEBUG_RECV, "%s: malo_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(STAILQ_FIRST(&sc->malo_rxbuf)->bf_daddr); } sc->malo_recvsetup = 1; malo_mode_init(sc); /* set filters, etc. */ return 0; } static void malo_init_locked(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int error; MALO_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe whether this is * the first time through or not. */ malo_stop(sc); /* * Push state to the firmware. */ if (!malo_hal_reset(sc)) { device_printf(sc->malo_dev, "%s: unable to reset hardware\n", __func__); return; } /* * Setup recv (once); transmit is already good to go. */ error = malo_startrecv(sc); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to start recv logic, error %d\n", __func__, error); return; } /* * Enable interrupts. */ sc->malo_imask = MALO_A2HRIC_BIT_RX_RDY | MALO_A2HRIC_BIT_TX_DONE | MALO_A2HRIC_BIT_OPC_DONE | MALO_A2HRIC_BIT_MAC_EVENT | MALO_A2HRIC_BIT_RX_PROBLEM | MALO_A2HRIC_BIT_ICV_ERROR | MALO_A2HRIC_BIT_RADAR_DETECT | MALO_A2HRIC_BIT_CHAN_SWITCH; sc->malo_running = 1; malo_hal_intrset(mh, sc->malo_imask); callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); } static void malo_init(void *arg) { struct malo_softc *sc = (struct malo_softc *) arg; struct ieee80211com *ic = &sc->malo_ic; MALO_LOCK(sc); malo_init_locked(sc); MALO_UNLOCK(sc); if (sc->malo_running) ieee80211_start_all(ic); /* start all vap's */ } struct malo_copy_maddr_ctx { uint8_t macs[IEEE80211_ADDR_LEN * MALO_HAL_MCAST_MAX]; int nmc; }; static u_int malo_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int nmc) { struct malo_copy_maddr_ctx *ctx = arg; if (ctx->nmc == MALO_HAL_MCAST_MAX) return (0); IEEE80211_ADDR_COPY(ctx->macs + (ctx->nmc * IEEE80211_ADDR_LEN), LLADDR(sdl)); ctx->nmc++; return (1); } /* * Set the multicast filter contents into the hardware. */ static void malo_setmcastfilter(struct malo_softc *sc) { struct malo_copy_maddr_ctx ctx; struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap; if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_allmulti > 0 || ic->ic_promisc > 0) goto all; ctx.nmc = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if_foreach_llmaddr(vap->iv_ifp, malo_copy_maddr, &ctx); malo_hal_setmcast(sc->malo_mh, ctx.nmc, ctx.macs); all: /* * XXX we don't know how to set the f/w for supporting * IFF_ALLMULTI | IFF_PROMISC cases */ return; } static int malo_mode_init(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; malo_hal_setpromisc(mh, ic->ic_promisc > 0); malo_setmcastfilter(sc); return ENXIO; } static void malo_tx_draintxq(struct malo_softc *sc, struct malo_txq *txq) { struct ieee80211_node *ni; struct malo_txbuf *bf; u_int ix __unused; /* * NB: this assumes output has been stopped and * we do not need to block malo_tx_tasklet */ for (ix = 0;; ix++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RESET) { struct ieee80211com *ic = &sc->malo_ic; const struct malo_txrec *tr = mtod(bf->bf_m, const struct malo_txrec *); malo_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MALO_DEBUG */ bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); bf->bf_m = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } } static void malo_stop(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int i; DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u running %u\n", __func__, sc->malo_invalid, sc->malo_running); MALO_LOCK_ASSERT(sc); if (!sc->malo_running) return; /* * Shutdown the hardware and driver: * disable interrupts * turn off the radio * drain and release tx queues * * Note that some of this work is not possible if the hardware * is gone (invalid). */ sc->malo_running = 0; callout_stop(&sc->malo_watchdog_timer); sc->malo_timer = 0; /* disable interrupt. */ malo_hal_intrset(mh, 0); /* turn off the radio. */ malo_hal_setradio(mh, 0, MHP_AUTO_PREAMBLE); /* drain and release tx queues. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_draintxq(sc, &sc->malo_txq[i]); } static void malo_parent(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; int startall = 0; MALO_LOCK(sc); if (ic->ic_nrunning > 0) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->malo_running && !sc->malo_invalid) { malo_init(sc); startall = 1; } /* * To avoid rescanning another access point, * do not call malo_init() here. Instead, * only reflect promisc mode settings. */ malo_mode_init(sc); } else if (sc->malo_running) malo_stop(sc); MALO_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void malo_updateslot(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; /* NB: can be called early; suppress needless cmds */ if (!sc->malo_running) return; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags); if (ic->ic_flags & IEEE80211_F_SHSLOT) error = malo_hal_set_slot(mh, 1); else error = malo_hal_set_slot(mh, 0); if (error != 0) device_printf(sc->malo_dev, "setting %s slot failed\n", ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long"); } static int malo_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); /* * Invoke the net80211 layer first so iv_bss is setup. */ error = MALO_VAP(vap)->malo_newstate(vap, nstate, arg); if (error != 0) return error; if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode = ieee80211_chan2mode(ni->ni_chan); const struct ieee80211_txparam *tp = &vap->iv_txparms[mode]; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d associd 0x%x mode %d rate %d\n", - vap->iv_ifp->if_xname, __func__, vap->iv_flags, + if_name(vap->iv_ifp), __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan), ni->ni_associd, mode, tp->ucastrate); malo_hal_setradio(mh, 1, (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? MHP_SHORT_PREAMBLE : MHP_LONG_PREAMBLE); malo_hal_setassocid(sc->malo_mh, ni->ni_bssid, ni->ni_associd); malo_hal_set_rate(mh, mode, tp->ucastrate == IEEE80211_FIXED_RATE_NONE ? 0 : malo_fix2rate(tp->ucastrate)); } return 0; } static int malo_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct malo_softc *sc = ic->ic_softc; struct malo_txbuf *bf; struct malo_txq *txq; if (!sc->malo_running || sc->malo_invalid) { m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. Note that we depend * on the classification by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on queue 1 but we * cannot just force that here because we may receive non-mgt frames. */ txq = &sc->malo_txq[0]; bf = malo_getbuf(sc, txq); if (bf == NULL) { m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m) != 0) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because this just * prods the firmware to check the transmit descriptors. The firmware * will also start fetching descriptors by itself if it notices * new ones are present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing it's ok. * Delivering the kick here rather than in malo_tx_start is * an optimization to avoid poking the firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); return 0; } static void malo_sysctlattach(struct malo_softc *sc) { #ifdef MALO_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->malo_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->malo_dev); sc->malo_debug = malo_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->malo_debug, 0, "control debugging printfs"); #endif } static void malo_announce(struct malo_softc *sc) { device_printf(sc->malo_dev, "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n", sc->malo_hwspecs.hwversion, (sc->malo_hwspecs.fw_releasenum >> 24) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 16) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 8) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 0) & 0xff, sc->malo_hwspecs.regioncode); if (bootverbose || malo_rxbuf != MALO_RXBUF) device_printf(sc->malo_dev, "using %u rx buffers\n", malo_rxbuf); if (bootverbose || malo_txbuf != MALO_TXBUF) device_printf(sc->malo_dev, "using %u tx buffers\n", malo_txbuf); } /* * Convert net80211 channel to a HAL channel. */ static void malo_mapchan(struct malo_hal_channel *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->flags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->flags.freqband = MALO_FREQ_BAND_2DOT4GHZ; } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * malo_init. */ static int malo_chan_set(struct malo_softc *sc, struct ieee80211_channel *chan) { struct malo_hal *mh = sc->malo_mh; struct malo_hal_channel hchan; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with the flags constrained * to reflect the current operating mode. */ malo_mapchan(&hchan, chan); malo_hal_intrset(mh, 0); /* disable interrupts */ malo_hal_setchannel(mh, &hchan); malo_hal_settxpower(mh, &hchan); /* * Update internal state. */ sc->malo_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->malo_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->malo_curchan = hchan; malo_hal_intrset(mh, sc->malo_imask); return 0; } static void malo_scan_start(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_scan_end(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_set_channel(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; (void) malo_chan_set(sc, ic->ic_curchan); } static void malo_rx_proc(void *arg, int npending) { struct epoch_tracker et; struct malo_softc *sc = arg; struct ieee80211com *ic = &sc->malo_ic; struct malo_rxbuf *bf; struct malo_rxdesc *ds; struct mbuf *m, *mnew; struct ieee80211_qosframe *wh; struct ieee80211_node *ni; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; uint32_t readptr, writeptr; DPRINTF(sc, MALO_DEBUG_RX_PROC, "%s: pending %u rdptr(0x%x) 0x%x wrptr(0x%x) 0x%x\n", __func__, npending, sc->malo_hwspecs.rxdesc_read, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read), sc->malo_hwspecs.rxdesc_write, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write)); readptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read); writeptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write); if (readptr == writeptr) return; bf = sc->malo_rxnext; for (ntodo = malo_rxquota; ntodo > 0 && readptr != writeptr; ntodo--) { if (bf == NULL) { bf = STAILQ_FIRST(&sc->malo_rxbuf); break; } ds = bf->bf_desc; if (bf->bf_m == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void)malo_rxbuf_init(sc, bf); break; } MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->rxcontrol != MALO_RXD_CTRL_DMA_OWN) break; readptr = le32toh(ds->physnext); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RECV_DESC) malo_printrxbuf(bf, 0); #endif status = ds->status; if (status & MALO_RXD_STATUS_DECRYPT_ERR_MASK) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->pktlen); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ m = bf->bf_m; data = mtod(m, uint8_t *); hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* * Calculate RSSI. XXX wrong */ rssi = 2 * ((int) ds->snr - ds->nf); /* NB: .5 dBm */ if (rssi > 100) rssi = 100; pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address frame at * the front. Hence there's no need to vet the packet length. * If the frame in fact is too small it should be discarded * at the net80211 layer. */ /* XXX don't need mbuf, just dma buffer */ mnew = malo_getrxmbuf(sc, bf); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Attach the dma buffer to the mbuf; malo_rxbuf_init will * re-setup the rx descriptor using the replacement dma * buffer we just installed above. */ bf->bf_m = mnew; m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) *(uint16_t *)ieee80211_getqos(wh) = ds->qosctrl; if (ieee80211_radiotap_active(ic)) { sc->malo_rx_th.wr_flags = 0; sc->malo_rx_th.wr_rate = ds->rate; sc->malo_rx_th.wr_antsignal = rssi; sc->malo_rx_th.wr_antnoise = ds->nf; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->rate, rssi); } #endif /* dispatch */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); NET_EPOCH_ENTER(et); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, ds->nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, ds->nf); NET_EPOCH_EXIT(et); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) malo_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, readptr); sc->malo_rxnext = bf; if (mbufq_first(&sc->malo_snd) != NULL) malo_start(sc); } /* * Reclaim all tx queue resources. */ static void malo_tx_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_cleanupq(sc, &sc->malo_txq[i]); } int malo_detach(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; malo_stop(sc); if (sc->malo_tq != NULL) { taskqueue_drain(sc->malo_tq, &sc->malo_rxtask); taskqueue_drain(sc->malo_tq, &sc->malo_txtask); taskqueue_free(sc->malo_tq); sc->malo_tq = NULL; } /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->malo_watchdog_timer); malo_dma_cleanup(sc); malo_tx_cleanup(sc); malo_hal_detach(sc->malo_mh); mbufq_drain(&sc->malo_snd); MALO_LOCK_DESTROY(sc); return 0; } void malo_shutdown(struct malo_softc *sc) { malo_stop(sc); } void malo_suspend(struct malo_softc *sc) { malo_stop(sc); } void malo_resume(struct malo_softc *sc) { if (sc->malo_ic.ic_nrunning > 0) malo_init(sc); } diff --git a/sys/dev/nfe/if_nfevar.h b/sys/dev/nfe/if_nfevar.h index c2e34f1687f7..3e814ce01404 100644 --- a/sys/dev/nfe/if_nfevar.h +++ b/sys/dev/nfe/if_nfevar.h @@ -1,163 +1,163 @@ /* $OpenBSD: if_nfevar.h,v 1.11 2006/02/19 13:57:02 damien Exp $ */ /*- * Copyright (c) 2005 Jonathan Gray * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ struct nfe_tx_data { bus_dmamap_t tx_data_map; struct mbuf *m; }; struct nfe_tx_ring { bus_dma_tag_t tx_desc_tag; bus_dmamap_t tx_desc_map; bus_addr_t physaddr; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; bus_dma_tag_t tx_data_tag; struct nfe_tx_data data[NFE_TX_RING_COUNT]; int queued; int cur; int next; }; struct nfe_rx_data { bus_dmamap_t rx_data_map; bus_addr_t paddr; struct mbuf *m; }; struct nfe_rx_ring { bus_dma_tag_t rx_desc_tag; bus_dmamap_t rx_desc_map; bus_addr_t physaddr; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; bus_dma_tag_t rx_data_tag; bus_dmamap_t rx_spare_map; struct nfe_rx_data data[NFE_RX_RING_COUNT]; int cur; int next; }; struct nfe_jrx_ring { bus_dma_tag_t jrx_desc_tag; bus_dmamap_t jrx_desc_map; bus_dma_tag_t jrx_jumbo_tag; bus_dmamap_t jrx_jumbo_map; bus_addr_t jphysaddr; struct nfe_desc32 *jdesc32; struct nfe_desc64 *jdesc64; bus_dma_tag_t jrx_data_tag; bus_dmamap_t jrx_spare_map; struct nfe_rx_data jdata[NFE_JUMBO_RX_RING_COUNT]; int jcur; int jnext; }; struct nfe_hw_stats { uint64_t rx_octets; uint32_t rx_frame_errors; uint32_t rx_extra_bytes; uint32_t rx_late_cols; uint32_t rx_runts; uint32_t rx_jumbos; uint32_t rx_fifo_overuns; uint32_t rx_crc_errors; uint32_t rx_fae; uint32_t rx_len_errors; uint32_t rx_unicast; uint32_t rx_multicast; uint32_t rx_broadcast; uint32_t rx_pause; uint32_t rx_drops; uint64_t tx_octets; uint32_t tx_zero_rexmits; uint32_t tx_one_rexmits; uint32_t tx_multi_rexmits; uint32_t tx_late_cols; uint32_t tx_fifo_underuns; uint32_t tx_carrier_losts; uint32_t tx_excess_deferals; uint32_t tx_retry_errors; uint32_t tx_deferals; uint32_t tx_frames; uint32_t tx_pause; uint32_t tx_unicast; uint32_t tx_multicast; uint32_t tx_broadcast; }; struct nfe_softc { - struct ifnet *nfe_ifp; + if_t nfe_ifp; device_t nfe_dev; uint16_t nfe_devid; uint16_t nfe_revid; device_t nfe_miibus; struct mtx nfe_mtx; struct resource *nfe_res[1]; struct resource *nfe_msix_res; struct resource *nfe_msix_pba_res; struct resource *nfe_irq[NFE_MSI_MESSAGES]; void *nfe_intrhand[NFE_MSI_MESSAGES]; struct callout nfe_stat_ch; int nfe_watchdog_timer; bus_dma_tag_t nfe_parent_tag; int nfe_if_flags; uint32_t nfe_flags; #define NFE_JUMBO_SUP 0x0001 #define NFE_40BIT_ADDR 0x0002 #define NFE_HW_CSUM 0x0004 #define NFE_HW_VLAN 0x0008 #define NFE_PWR_MGMT 0x0010 #define NFE_CORRECT_MACADDR 0x0020 #define NFE_TX_FLOW_CTRL 0x0040 #define NFE_MIB_V1 0x0080 #define NFE_MIB_V2 0x0100 #define NFE_MIB_V3 0x0200 int nfe_jumbo_disable; uint32_t rxtxctl; uint8_t mii_phyaddr; uint8_t eaddr[ETHER_ADDR_LEN]; struct nfe_hw_stats nfe_stats; struct taskqueue *nfe_tq; struct task nfe_int_task; int nfe_link; int nfe_suspended; int nfe_framesize; int nfe_process_limit; int nfe_force_tx; uint32_t nfe_irq_status; uint32_t nfe_irq_mask; uint32_t nfe_intrs; uint32_t nfe_nointrs; uint32_t nfe_msi; uint32_t nfe_msix; struct nfe_tx_ring txq; struct nfe_rx_ring rxq; struct nfe_jrx_ring jrxq; }; struct nfe_type { uint16_t vid_id; uint16_t dev_id; char *name; }; diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c index d0c8fc93ef92..76f71604567b 100644 --- a/sys/dev/otus/if_otus.c +++ b/sys/dev/otus/if_otus.c @@ -1,3318 +1,3318 @@ /* $OpenBSD: if_otus.c,v 1.49 2015/11/24 13:33:18 mpi Exp $ */ /*- * Copyright (c) 2009 Damien Bergamini * Copyright (c) 2015 Adrian Chadd * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Driver for Atheros AR9001U chipset. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #include "usbdevs.h" #define USB_DEBUG_VAR otus_debug #include #include "if_otusreg.h" static int otus_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, otus, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB otus"); SYSCTL_INT(_hw_usb_otus, OID_AUTO, debug, CTLFLAG_RWTUN, &otus_debug, 0, "Debug level"); #define OTUS_DEBUG_XMIT 0x00000001 #define OTUS_DEBUG_RECV 0x00000002 #define OTUS_DEBUG_TXDONE 0x00000004 #define OTUS_DEBUG_RXDONE 0x00000008 #define OTUS_DEBUG_CMD 0x00000010 #define OTUS_DEBUG_CMDDONE 0x00000020 #define OTUS_DEBUG_RESET 0x00000040 #define OTUS_DEBUG_STATE 0x00000080 #define OTUS_DEBUG_CMDNOTIFY 0x00000100 #define OTUS_DEBUG_REGIO 0x00000200 #define OTUS_DEBUG_IRQ 0x00000400 #define OTUS_DEBUG_TXCOMP 0x00000800 #define OTUS_DEBUG_RX_BUFFER 0x00001000 #define OTUS_DEBUG_ANY 0xffffffff #define OTUS_DPRINTF(sc, dm, ...) \ do { \ if ((dm == OTUS_DEBUG_ANY) || (dm & otus_debug)) \ device_printf(sc->sc_dev, __VA_ARGS__); \ } while (0) #define OTUS_DEV(v, p) { USB_VPI(v, p, 0) } static const STRUCT_USB_HOST_ID otus_devs[] = { OTUS_DEV(USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_WN7512), OTUS_DEV(USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_3CRUSBN275), OTUS_DEV(USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_TG121N), OTUS_DEV(USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR9170), OTUS_DEV(USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_WN612), OTUS_DEV(USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_WN821NV2), OTUS_DEV(USB_VENDOR_AVM, USB_PRODUCT_AVM_FRITZWLAN), OTUS_DEV(USB_VENDOR_CACE, USB_PRODUCT_CACE_AIRPCAPNX), OTUS_DEV(USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA130D1), OTUS_DEV(USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA160A1), OTUS_DEV(USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA160A2), OTUS_DEV(USB_VENDOR_IODATA, USB_PRODUCT_IODATA_WNGDNUS2), OTUS_DEV(USB_VENDOR_NEC, USB_PRODUCT_NEC_WL300NUG), OTUS_DEV(USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WN111V2), OTUS_DEV(USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WNA1000), OTUS_DEV(USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WNDA3100), OTUS_DEV(USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GW_US300), OTUS_DEV(USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_O8494), OTUS_DEV(USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_WNC0600), OTUS_DEV(USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_UB81), OTUS_DEV(USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_UB82), OTUS_DEV(USB_VENDOR_ZYDAS, USB_PRODUCT_ZYDAS_ZD1221), OTUS_DEV(USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_NWD271N), }; static device_probe_t otus_match; static device_attach_t otus_attach; static device_detach_t otus_detach; static int otus_attachhook(struct otus_softc *); static void otus_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); int otus_load_firmware(struct otus_softc *, const char *, uint32_t); int otus_open_pipes(struct otus_softc *); void otus_close_pipes(struct otus_softc *); static int otus_alloc_tx_cmd_list(struct otus_softc *); static void otus_free_tx_cmd_list(struct otus_softc *); static int otus_alloc_rx_list(struct otus_softc *); static void otus_free_rx_list(struct otus_softc *); static int otus_alloc_tx_list(struct otus_softc *); static void otus_free_tx_list(struct otus_softc *); static void otus_free_list(struct otus_softc *, struct otus_data [], int); static struct otus_data *_otus_getbuf(struct otus_softc *); static struct otus_data *otus_getbuf(struct otus_softc *); static void otus_freebuf(struct otus_softc *, struct otus_data *); static struct otus_tx_cmd *_otus_get_txcmd(struct otus_softc *); static struct otus_tx_cmd *otus_get_txcmd(struct otus_softc *); static void otus_free_txcmd(struct otus_softc *, struct otus_tx_cmd *); void otus_next_scan(void *, int); static void otus_tx_task(void *, int pending); void otus_do_async(struct otus_softc *, void (*)(struct otus_softc *, void *), void *, int); int otus_newstate(struct ieee80211vap *, enum ieee80211_state, int); int otus_cmd(struct otus_softc *, uint8_t, const void *, int, void *, int); void otus_write(struct otus_softc *, uint32_t, uint32_t); int otus_write_barrier(struct otus_softc *); static struct ieee80211_node *otus_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]); int otus_read_eeprom(struct otus_softc *); void otus_newassoc(struct ieee80211_node *, int); void otus_cmd_rxeof(struct otus_softc *, uint8_t *, int); void otus_sub_rxeof(struct otus_softc *, uint8_t *, int, struct mbufq *); static int otus_tx(struct otus_softc *, struct ieee80211_node *, struct mbuf *, struct otus_data *, const struct ieee80211_bpf_params *); -int otus_ioctl(struct ifnet *, u_long, caddr_t); +int otus_ioctl(if_t, u_long, caddr_t); int otus_set_multi(struct otus_softc *); static int otus_updateedca(struct ieee80211com *); static void otus_updateedca_locked(struct otus_softc *); static void otus_updateslot(struct otus_softc *); static void otus_set_operating_mode(struct otus_softc *sc); static void otus_set_rx_filter(struct otus_softc *sc); int otus_init_mac(struct otus_softc *); uint32_t otus_phy_get_def(struct otus_softc *, uint32_t); int otus_set_board_values(struct otus_softc *, struct ieee80211_channel *); int otus_program_phy(struct otus_softc *, struct ieee80211_channel *); int otus_set_rf_bank4(struct otus_softc *, struct ieee80211_channel *); void otus_get_delta_slope(uint32_t, uint32_t *, uint32_t *); static int otus_set_chan(struct otus_softc *, struct ieee80211_channel *, int); int otus_set_key(struct ieee80211com *, struct ieee80211_node *, struct ieee80211_key *); void otus_set_key_cb(struct otus_softc *, void *); void otus_delete_key(struct ieee80211com *, struct ieee80211_node *, struct ieee80211_key *); void otus_delete_key_cb(struct otus_softc *, void *); void otus_calibrate_to(void *, int); int otus_set_bssid(struct otus_softc *, const uint8_t *); int otus_set_macaddr(struct otus_softc *, const uint8_t *); void otus_led_newstate_type1(struct otus_softc *); void otus_led_newstate_type2(struct otus_softc *); void otus_led_newstate_type3(struct otus_softc *); int otus_init(struct otus_softc *sc); void otus_stop(struct otus_softc *sc); static device_method_t otus_methods[] = { DEVMETHOD(device_probe, otus_match), DEVMETHOD(device_attach, otus_attach), DEVMETHOD(device_detach, otus_detach), DEVMETHOD_END }; static driver_t otus_driver = { .name = "otus", .methods = otus_methods, .size = sizeof(struct otus_softc) }; DRIVER_MODULE(otus, uhub, otus_driver, NULL, NULL); MODULE_DEPEND(otus, wlan, 1, 1, 1); MODULE_DEPEND(otus, usb, 1, 1, 1); MODULE_DEPEND(otus, firmware, 1, 1, 1); MODULE_VERSION(otus, 1); static usb_callback_t otus_bulk_tx_callback; static usb_callback_t otus_bulk_rx_callback; static usb_callback_t otus_bulk_irq_callback; static usb_callback_t otus_bulk_cmd_callback; static const struct usb_config otus_config[OTUS_N_XFER] = { [OTUS_BULK_TX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = 0x200, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = otus_bulk_tx_callback, .timeout = 5000, /* ms */ }, [OTUS_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = OTUS_RXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1,.short_xfer_ok = 1,}, .callback = otus_bulk_rx_callback, }, [OTUS_BULK_IRQ] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = OTUS_MAX_CTRLSZ, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = otus_bulk_irq_callback, }, [OTUS_BULK_CMD] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = OTUS_MAX_CTRLSZ, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = otus_bulk_cmd_callback, .timeout = 5000, /* ms */ }, }; static int otus_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST || uaa->info.bIfaceIndex != 0 || uaa->info.bConfigIndex != 0) return (ENXIO); return (usbd_lookup_id_by_uaa(otus_devs, sizeof(otus_devs), uaa)); } static int otus_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct otus_softc *sc = device_get_softc(self); int error; uint8_t iface_index; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->scan_to, 0, otus_next_scan, sc); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->calib_to, 0, otus_calibrate_to, sc); TASK_INIT(&sc->tx_task, 0, otus_tx_task, sc); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = 0; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, otus_config, OTUS_N_XFER, sc, &sc->sc_mtx); if (error) { device_printf(sc->sc_dev, "could not allocate USB transfers, err=%s\n", usbd_errstr(error)); goto fail_usb; } if ((error = otus_open_pipes(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not open pipes\n", __func__); goto fail; } /* XXX check return status; fail out if appropriate */ if (otus_attachhook(sc) != 0) goto fail; return (0); fail: otus_close_pipes(sc); fail_usb: mtx_destroy(&sc->sc_mtx); return (ENXIO); } static int otus_detach(device_t self) { struct otus_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; otus_stop(sc); usbd_transfer_unsetup(sc->sc_xfer, OTUS_N_XFER); taskqueue_drain_timeout(taskqueue_thread, &sc->scan_to); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_to); taskqueue_drain(taskqueue_thread, &sc->tx_task); otus_close_pipes(sc); #if 0 /* Wait for all queued asynchronous commands to complete. */ usb_rem_wait_task(sc->sc_udev, &sc->sc_task); usbd_ref_wait(sc->sc_udev); #endif ieee80211_ifdetach(ic); mtx_destroy(&sc->sc_mtx); return 0; } static void otus_delay_ms(struct otus_softc *sc, int ms) { DELAY(1000 * ms); } static struct ieee80211vap * otus_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct otus_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = malloc(sizeof(struct otus_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = otus_newstate; vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return (vap); } static void otus_vap_delete(struct ieee80211vap *vap) { struct otus_vap *uvp = OTUS_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void otus_parent(struct ieee80211com *ic) { struct otus_softc *sc = ic->ic_softc; int startall = 0; if (ic->ic_nrunning > 0) { if (!sc->sc_running) { otus_init(sc); startall = 1; } else { (void) otus_set_multi(sc); } } else if (sc->sc_running) otus_stop(sc); if (startall) ieee80211_start_all(ic); } static void otus_drain_mbufq(struct otus_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; OTUS_LOCK_ASSERT(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; ieee80211_free_node(ni); m_freem(m); } } static void otus_tx_start(struct otus_softc *sc) { taskqueue_enqueue(taskqueue_thread, &sc->tx_task); } static int otus_transmit(struct ieee80211com *ic, struct mbuf *m) { struct otus_softc *sc = ic->ic_softc; int error; OTUS_LOCK(sc); if (! sc->sc_running) { OTUS_UNLOCK(sc); return (ENXIO); } /* XXX TODO: handle fragments */ error = mbufq_enqueue(&sc->sc_snd, m); if (error) { OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: mbufq_enqueue failed: %d\n", __func__, error); OTUS_UNLOCK(sc); return (error); } OTUS_UNLOCK(sc); /* Kick TX */ otus_tx_start(sc); return (0); } static void _otus_start(struct otus_softc *sc) { struct ieee80211_node *ni; struct otus_data *bf; struct mbuf *m; OTUS_LOCK_ASSERT(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { bf = otus_getbuf(sc); if (bf == NULL) { OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: failed to get buffer\n", __func__); mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (otus_tx(sc, ni, m, bf, NULL) != 0) { OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: failed to transmit\n", __func__); if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); otus_freebuf(sc, bf); ieee80211_free_node(ni); m_freem(m); break; } } } static void otus_tx_task(void *arg, int pending) { struct otus_softc *sc = arg; OTUS_LOCK(sc); _otus_start(sc); OTUS_UNLOCK(sc); } static int otus_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic= ni->ni_ic; struct otus_softc *sc = ic->ic_softc; struct otus_data *bf = NULL; int error = 0; /* Don't transmit if we're not running */ OTUS_LOCK(sc); if (! sc->sc_running) { error = ENETDOWN; goto error; } bf = otus_getbuf(sc); if (bf == NULL) { error = ENOBUFS; goto error; } if (otus_tx(sc, ni, m, bf, params) != 0) { error = EIO; goto error; } OTUS_UNLOCK(sc); return (0); error: if (bf) otus_freebuf(sc, bf); OTUS_UNLOCK(sc); m_freem(m); return (error); } static void otus_update_chw(struct ieee80211com *ic) { printf("%s: TODO\n", __func__); } static void otus_set_channel(struct ieee80211com *ic) { struct otus_softc *sc = ic->ic_softc; OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "%s: set channel: %d\n", __func__, ic->ic_curchan->ic_freq); OTUS_LOCK(sc); (void) otus_set_chan(sc, ic->ic_curchan, 0); OTUS_UNLOCK(sc); } static int otus_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { /* For now, no A-MPDU TX support in the driver */ return (0); } static void otus_scan_start(struct ieee80211com *ic) { // printf("%s: TODO\n", __func__); } static void otus_scan_end(struct ieee80211com *ic) { // printf("%s: TODO\n", __func__); } static void otus_update_mcast(struct ieee80211com *ic) { struct otus_softc *sc = ic->ic_softc; (void) otus_set_multi(sc); } static int otus_attachhook(struct otus_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; usb_device_request_t req; uint32_t in, out; int error; /* Not locked */ error = otus_load_firmware(sc, "otusfw_init", AR_FW_INIT_ADDR); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load %s firmware\n", __func__, "init"); return (ENXIO); } /* XXX not locked? */ otus_delay_ms(sc, 1000); /* Not locked */ error = otus_load_firmware(sc, "otusfw_main", AR_FW_MAIN_ADDR); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load %s firmware\n", __func__, "main"); return (ENXIO); } OTUS_LOCK(sc); /* Tell device that firmware transfer is complete. */ req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AR_FW_DOWNLOAD_COMPLETE; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); if (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, NULL, 0, NULL, 250) != 0) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: firmware initialization failed\n", __func__); return (ENXIO); } /* Send an ECHO command to check that everything is settled. */ in = 0xbadc0ffe; if (otus_cmd(sc, AR_CMD_ECHO, &in, sizeof in, &out, sizeof(out)) != 0) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: echo command failed\n", __func__); return (ENXIO); } if (in != out) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: echo reply mismatch: 0x%08x!=0x%08x\n", __func__, in, out); return (ENXIO); } /* Read entire EEPROM. */ if (otus_read_eeprom(sc) != 0) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not read EEPROM\n", __func__); return (ENXIO); } OTUS_UNLOCK(sc); sc->txmask = sc->eeprom.baseEepHeader.txMask; sc->rxmask = sc->eeprom.baseEepHeader.rxMask; sc->capflags = sc->eeprom.baseEepHeader.opCapFlags; IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->eeprom.baseEepHeader.macAddr); sc->sc_led_newstate = otus_led_newstate_type3; /* XXX */ if (sc->txmask == 0x5) ic->ic_txstream = 2; else ic->ic_txstream = 1; if (sc->rxmask == 0x5) ic->ic_rxstream = 2; else ic->ic_rxstream = 1; device_printf(sc->sc_dev, "MAC/BBP AR9170, RF AR%X, MIMO %dT%dR, address %s\n", (sc->capflags & AR5416_OPFLAGS_11A) ? 0x9104 : ((sc->txmask == 0x5) ? 0x9102 : 0x9101), (sc->txmask == 0x5) ? 2 : 1, (sc->rxmask == 0x5) ? 2 : 1, ether_sprintf(ic->ic_macaddr)); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ #if 0 IEEE80211_C_BGSCAN | /* Background scan. */ #endif IEEE80211_C_SHPREAMBLE | /* Short preamble supported. */ IEEE80211_C_WME | /* WME/QoS */ IEEE80211_C_SHSLOT | /* Short slot time supported. */ IEEE80211_C_FF | /* Atheros fast-frames supported. */ IEEE80211_C_MONITOR | /* Enable monitor mode */ IEEE80211_C_SWAMSDUTX | /* Do software A-MSDU TX */ IEEE80211_C_WPA; /* WPA/RSN. */ ic->ic_htcaps = IEEE80211_HTC_HT | #if 0 IEEE80211_HTC_AMPDU | #endif IEEE80211_HTC_AMSDU | IEEE80211_HTCAP_MAXAMSDU_3839 | IEEE80211_HTCAP_SMPS_OFF; otus_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_raw_xmit = otus_raw_xmit; ic->ic_scan_start = otus_scan_start; ic->ic_scan_end = otus_scan_end; ic->ic_set_channel = otus_set_channel; ic->ic_getradiocaps = otus_getradiocaps; ic->ic_vap_create = otus_vap_create; ic->ic_vap_delete = otus_vap_delete; ic->ic_update_mcast = otus_update_mcast; ic->ic_update_promisc = otus_update_mcast; ic->ic_parent = otus_parent; ic->ic_transmit = otus_transmit; ic->ic_update_chw = otus_update_chw; ic->ic_ampdu_enable = otus_ampdu_enable; ic->ic_wme.wme_update = otus_updateedca; ic->ic_newassoc = otus_newassoc; ic->ic_node_alloc = otus_node_alloc; #ifdef notyet ic->ic_set_key = otus_set_key; ic->ic_delete_key = otus_delete_key; #endif ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), OTUS_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), OTUS_RX_RADIOTAP_PRESENT); return (0); } static void otus_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct otus_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; /* Set supported .11b and .11g rates. */ memset(bands, 0, sizeof(bands)); if (sc->eeprom.baseEepHeader.opCapFlags & AR5416_OPFLAGS_11G) { setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); setbit(bands, IEEE80211_MODE_11NG); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, ar_chans, 14, bands, 0); } if (sc->eeprom.baseEepHeader.opCapFlags & AR5416_OPFLAGS_11A) { setbit(bands, IEEE80211_MODE_11A); setbit(bands, IEEE80211_MODE_11NA); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, &ar_chans[14], nitems(ar_chans) - 14, bands, 0); } } int otus_load_firmware(struct otus_softc *sc, const char *name, uint32_t addr) { usb_device_request_t req; char *ptr; const struct firmware *fw; int mlen, error, size; error = 0; /* Read firmware image from the filesystem. */ if ((fw = firmware_get(name)) == NULL) { device_printf(sc->sc_dev, "%s: failed loadfirmware of file %s\n", __func__, name); return (ENXIO); } req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AR_FW_DOWNLOAD; USETW(req.wIndex, 0); OTUS_LOCK(sc); /* XXX const */ ptr = __DECONST(char *, fw->data); size = fw->datasize; addr >>= 8; while (size > 0) { mlen = MIN(size, 4096); USETW(req.wValue, addr); USETW(req.wLength, mlen); if (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, ptr, 0, NULL, 250) != 0) { error = EIO; break; } addr += mlen >> 8; ptr += mlen; size -= mlen; } OTUS_UNLOCK(sc); firmware_put(fw, FIRMWARE_UNLOAD); if (error != 0) device_printf(sc->sc_dev, "%s: %s: error=%d\n", __func__, name, error); return error; } int otus_open_pipes(struct otus_softc *sc) { #if 0 int isize, error; int i; #endif int error; OTUS_UNLOCK_ASSERT(sc); if ((error = otus_alloc_tx_cmd_list(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not allocate command xfer\n", __func__); goto fail; } if ((error = otus_alloc_tx_list(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not allocate Tx xfers\n", __func__); goto fail; } if ((error = otus_alloc_rx_list(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not allocate Rx xfers\n", __func__); goto fail; } /* Enable RX transfers; needed for initial firmware messages */ OTUS_LOCK(sc); usbd_transfer_start(sc->sc_xfer[OTUS_BULK_RX]); usbd_transfer_start(sc->sc_xfer[OTUS_BULK_IRQ]); OTUS_UNLOCK(sc); return 0; fail: otus_close_pipes(sc); return error; } void otus_close_pipes(struct otus_softc *sc) { OTUS_LOCK(sc); otus_free_tx_cmd_list(sc); otus_free_tx_list(sc); otus_free_rx_list(sc); OTUS_UNLOCK(sc); usbd_transfer_unsetup(sc->sc_xfer, OTUS_N_XFER); } static void otus_free_cmd_list(struct otus_softc *sc, struct otus_tx_cmd cmd[], int ndata) { int i; /* XXX TODO: someone has to have waken up waiters! */ for (i = 0; i < ndata; i++) { struct otus_tx_cmd *dp = &cmd[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } } } static int otus_alloc_cmd_list(struct otus_softc *sc, struct otus_tx_cmd cmd[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct otus_tx_cmd *dp = &cmd[i]; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT | M_ZERO); dp->odata = NULL; if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } } return (0); fail: otus_free_cmd_list(sc, cmd, ndata); return (error); } static int otus_alloc_tx_cmd_list(struct otus_softc *sc) { int error, i; error = otus_alloc_cmd_list(sc, sc->sc_cmd, OTUS_CMD_LIST_COUNT, OTUS_MAX_TXCMDSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_inactive); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); for (i = 0; i < OTUS_CMD_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_cmd_inactive, &sc->sc_cmd[i], next_cmd); return (0); } static void otus_free_tx_cmd_list(struct otus_softc *sc) { /* * XXX TODO: something needs to wake up any pending/sleeping * waiters! */ STAILQ_INIT(&sc->sc_cmd_active); STAILQ_INIT(&sc->sc_cmd_inactive); STAILQ_INIT(&sc->sc_cmd_pending); STAILQ_INIT(&sc->sc_cmd_waiting); otus_free_cmd_list(sc, sc->sc_cmd, OTUS_CMD_LIST_COUNT); } static int otus_alloc_list(struct otus_softc *sc, struct otus_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct otus_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT | M_ZERO); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: otus_free_list(sc, data, ndata); return (error); } static int otus_alloc_rx_list(struct otus_softc *sc) { int error, i; error = otus_alloc_list(sc, sc->sc_rx, OTUS_RX_LIST_COUNT, OTUS_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < OTUS_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int otus_alloc_tx_list(struct otus_softc *sc) { int error, i; error = otus_alloc_list(sc, sc->sc_tx, OTUS_TX_LIST_COUNT, OTUS_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_inactive); for (i = 0; i != OTUS_N_XFER; i++) { STAILQ_INIT(&sc->sc_tx_active[i]); STAILQ_INIT(&sc->sc_tx_pending[i]); } for (i = 0; i < OTUS_TX_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); } return (0); } static void otus_free_tx_list(struct otus_softc *sc) { int i; /* prevent further allocations from TX list(s) */ STAILQ_INIT(&sc->sc_tx_inactive); for (i = 0; i != OTUS_N_XFER; i++) { STAILQ_INIT(&sc->sc_tx_active[i]); STAILQ_INIT(&sc->sc_tx_pending[i]); } otus_free_list(sc, sc->sc_tx, OTUS_TX_LIST_COUNT); } static void otus_free_rx_list(struct otus_softc *sc) { /* prevent further allocations from RX list(s) */ STAILQ_INIT(&sc->sc_rx_inactive); STAILQ_INIT(&sc->sc_rx_active); otus_free_list(sc, sc->sc_rx, OTUS_RX_LIST_COUNT); } static void otus_free_list(struct otus_softc *sc, struct otus_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct otus_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static struct otus_data * _otus_getbuf(struct otus_softc *sc) { struct otus_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else bf = NULL; /* XXX bzero? */ return (bf); } static struct otus_data * otus_getbuf(struct otus_softc *sc) { struct otus_data *bf; OTUS_LOCK_ASSERT(sc); bf = _otus_getbuf(sc); return (bf); } static void otus_freebuf(struct otus_softc *sc, struct otus_data *bf) { OTUS_LOCK_ASSERT(sc); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, bf, next); } static struct otus_tx_cmd * _otus_get_txcmd(struct otus_softc *sc) { struct otus_tx_cmd *bf; bf = STAILQ_FIRST(&sc->sc_cmd_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_cmd_inactive, next_cmd); else bf = NULL; return (bf); } static struct otus_tx_cmd * otus_get_txcmd(struct otus_softc *sc) { struct otus_tx_cmd *bf; OTUS_LOCK_ASSERT(sc); bf = _otus_get_txcmd(sc); if (bf == NULL) { device_printf(sc->sc_dev, "%s: no tx cmd buffers\n", __func__); } return (bf); } static void otus_free_txcmd(struct otus_softc *sc, struct otus_tx_cmd *bf) { OTUS_LOCK_ASSERT(sc); STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, bf, next_cmd); } void otus_next_scan(void *arg, int pending) { #if 0 struct otus_softc *sc = arg; if (usbd_is_dying(sc->sc_udev)) return; usbd_ref_incr(sc->sc_udev); if (sc->sc_ic.ic_state == IEEE80211_S_SCAN) ieee80211_next_scan(&sc->sc_ic.ic_if); usbd_ref_decr(sc->sc_udev); #endif } int otus_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct otus_vap *uvp = OTUS_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct otus_softc *sc = ic->ic_softc; enum ieee80211_state ostate; ostate = vap->iv_state; OTUS_DPRINTF(sc, OTUS_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); OTUS_LOCK(sc); /* XXX TODO: more fleshing out! */ switch (nstate) { case IEEE80211_S_INIT: otus_set_operating_mode(sc); otus_set_rx_filter(sc); break; case IEEE80211_S_RUN: if (ic->ic_opmode == IEEE80211_M_STA) { otus_updateslot(sc); otus_set_operating_mode(sc); otus_set_rx_filter(sc); /* Start calibration timer. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_to, hz); } break; default: break; } /* XXX TODO: calibration? */ sc->sc_led_newstate(sc); OTUS_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } int otus_cmd(struct otus_softc *sc, uint8_t code, const void *idata, int ilen, void *odata, int odatalen) { struct otus_tx_cmd *cmd; struct ar_cmd_hdr *hdr; int xferlen, error; OTUS_LOCK_ASSERT(sc); /* Always bulk-out a multiple of 4 bytes. */ xferlen = (sizeof (*hdr) + ilen + 3) & ~3; if (xferlen > OTUS_MAX_TXCMDSZ) { device_printf(sc->sc_dev, "%s: command (0x%02x) size (%d) > %d\n", __func__, code, xferlen, OTUS_MAX_TXCMDSZ); return (EIO); } cmd = otus_get_txcmd(sc); if (cmd == NULL) { device_printf(sc->sc_dev, "%s: failed to get buf\n", __func__); return (EIO); } hdr = (struct ar_cmd_hdr *)cmd->buf; hdr->code = code; hdr->len = ilen; hdr->token = ++sc->token; /* Don't care about endianness. */ cmd->token = hdr->token; /* XXX TODO: check max cmd length? */ memcpy((uint8_t *)&hdr[1], idata, ilen); OTUS_DPRINTF(sc, OTUS_DEBUG_CMD, "%s: sending command code=0x%02x len=%d token=%d\n", __func__, code, ilen, hdr->token); cmd->odata = odata; cmd->odatalen = odatalen; cmd->buflen = xferlen; /* Queue the command to the endpoint */ STAILQ_INSERT_TAIL(&sc->sc_cmd_pending, cmd, next_cmd); usbd_transfer_start(sc->sc_xfer[OTUS_BULK_CMD]); /* Sleep on the command; wait for it to complete */ error = msleep(cmd, &sc->sc_mtx, PCATCH, "otuscmd", hz); /* * At this point we don't own cmd any longer; it'll be * freed by the cmd bulk path or the RX notification * path. If the data is made available then it'll be copied * to the caller. All that is left to do is communicate * status back to the caller. */ if (error != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for command 0x%02x reply\n", __func__, code); } return error; } void otus_write(struct otus_softc *sc, uint32_t reg, uint32_t val) { OTUS_LOCK_ASSERT(sc); sc->write_buf[sc->write_idx].reg = htole32(reg); sc->write_buf[sc->write_idx].val = htole32(val); if (++sc->write_idx > (AR_MAX_WRITE_IDX-1)) (void)otus_write_barrier(sc); } int otus_write_barrier(struct otus_softc *sc) { int error; OTUS_LOCK_ASSERT(sc); if (sc->write_idx == 0) return 0; /* Nothing to flush. */ OTUS_DPRINTF(sc, OTUS_DEBUG_REGIO, "%s: called; %d updates\n", __func__, sc->write_idx); error = otus_cmd(sc, AR_CMD_WREG, sc->write_buf, sizeof (sc->write_buf[0]) * sc->write_idx, NULL, 0); sc->write_idx = 0; return error; } static struct ieee80211_node * otus_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { return malloc(sizeof (struct otus_node), M_80211_NODE, M_NOWAIT | M_ZERO); } int otus_read_eeprom(struct otus_softc *sc) { uint32_t regs[8], reg; uint8_t *eep; int i, j, error; OTUS_LOCK_ASSERT(sc); /* Read EEPROM by blocks of 32 bytes. */ eep = (uint8_t *)&sc->eeprom; reg = AR_EEPROM_OFFSET; for (i = 0; i < sizeof (sc->eeprom) / 32; i++) { for (j = 0; j < 8; j++, reg += 4) regs[j] = htole32(reg); error = otus_cmd(sc, AR_CMD_RREG, regs, sizeof regs, eep, 32); if (error != 0) break; eep += 32; } return error; } void otus_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211com *ic = ni->ni_ic; struct otus_softc *sc = ic->ic_softc; struct otus_node *on = OTUS_NODE(ni); OTUS_DPRINTF(sc, OTUS_DEBUG_STATE, "new assoc isnew=%d addr=%s\n", isnew, ether_sprintf(ni->ni_macaddr)); on->tx_done = 0; on->tx_err = 0; on->tx_retries = 0; } static void otus_cmd_handle_response(struct otus_softc *sc, struct ar_cmd_hdr *hdr) { struct otus_tx_cmd *cmd; OTUS_LOCK_ASSERT(sc); OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "%s: received reply code=0x%02x len=%d token=%d\n", __func__, hdr->code, hdr->len, hdr->token); /* * Walk the list, freeing items that aren't ours, * stopping when we hit our token. */ while ((cmd = STAILQ_FIRST(&sc->sc_cmd_waiting)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_waiting, next_cmd); OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "%s: cmd=%p; hdr.token=%d, cmd.token=%d\n", __func__, cmd, (int) hdr->token, (int) cmd->token); if (hdr->token == cmd->token) { /* Copy answer into caller's supplied buffer. */ if (cmd->odata != NULL) { if (hdr->len != cmd->odatalen) { device_printf(sc->sc_dev, "%s: code 0x%02x, len=%d, olen=%d\n", __func__, (int) hdr->code, (int) hdr->len, (int) cmd->odatalen); } memcpy(cmd->odata, &hdr[1], MIN(cmd->odatalen, hdr->len)); } wakeup(cmd); } STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next_cmd); } } void otus_cmd_rxeof(struct otus_softc *sc, uint8_t *buf, int len) { struct ieee80211com *ic = &sc->sc_ic; struct ar_cmd_hdr *hdr; OTUS_LOCK_ASSERT(sc); if (__predict_false(len < sizeof (*hdr))) { OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "cmd too small %d\n", len); return; } hdr = (struct ar_cmd_hdr *)buf; if (__predict_false(sizeof (*hdr) + hdr->len > len || sizeof (*hdr) + hdr->len > 64)) { OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "cmd too large %d\n", hdr->len); return; } OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: code=%.02x\n", __func__, hdr->code); /* * This has to reach into the cmd queue "waiting for * an RX response" list, grab the head entry and check * if we need to wake anyone up. */ if ((hdr->code & 0xc0) != 0xc0) { otus_cmd_handle_response(sc, hdr); return; } /* Received unsolicited notification. */ switch (hdr->code & 0x3f) { case AR_EVT_BEACON: break; case AR_EVT_TX_COMP: { struct ar_evt_tx_comp *tx = (struct ar_evt_tx_comp *)&hdr[1]; struct ieee80211_node *ni; ni = ieee80211_find_node(&ic->ic_sta, tx->macaddr); if (ni == NULL) { device_printf(sc->sc_dev, "%s: txcomp on unknown node (%s)\n", __func__, ether_sprintf(tx->macaddr)); break; } OTUS_DPRINTF(sc, OTUS_DEBUG_TXCOMP, "tx completed %s status=%d phy=0x%x\n", ether_sprintf(tx->macaddr), le16toh(tx->status), le32toh(tx->phy)); switch (le16toh(tx->status)) { case AR_TX_STATUS_COMP: #if 0 ackfailcnt = 0; ieee80211_ratectl_tx_complete(ni->ni_vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); #endif /* * We don't get the above; only error notifications. * Sigh. So, don't worry about this. */ break; case AR_TX_STATUS_RETRY_COMP: OTUS_NODE(ni)->tx_retries++; break; case AR_TX_STATUS_FAILED: OTUS_NODE(ni)->tx_err++; break; } ieee80211_free_node(ni); break; } case AR_EVT_TBTT: break; case AR_EVT_DO_BB_RESET: /* * This is "tell driver to reset baseband" from ar9170-fw. * * I'm not sure what we should do here, so I'm going to * fall through; it gets generated when RTSRetryCnt internally * reaches '5' - I guess the firmware authors thought that * meant that the BB may have gone deaf or something. */ default: device_printf(sc->sc_dev, "%s: received notification code=0x%02x len=%d\n", __func__, hdr->code, hdr->len); } } /* * Handle a single MPDU. * * This may be a single MPDU, or it may be a sub-frame from an A-MPDU. * In the latter case some of the header details need to be adjusted. */ void otus_sub_rxeof(struct otus_softc *sc, uint8_t *buf, int len, struct mbufq *rxq) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_rx_stats rxs; #if 0 struct ieee80211_node *ni; #endif struct ar_rx_macstatus *mac_status = NULL; struct ar_rx_phystatus *phy_status = NULL; struct ieee80211_frame *wh; struct mbuf *m; // int s; if (otus_debug & OTUS_DEBUG_RX_BUFFER) { device_printf(sc->sc_dev, "%s: %*D\n", __func__, len, buf, "-"); } /* * Before any data path stuff - check to see if this is a command * response. * * All bits in the PLCP header are set to 1 for non-MPDU. */ if ((len >= AR_PLCP_HDR_LEN) && memcmp(buf, AR_PLCP_HDR_INTR, AR_PLCP_HDR_LEN) == 0) { otus_cmd_rxeof(sc, buf + AR_PLCP_HDR_LEN, len - AR_PLCP_HDR_LEN); return; } /* * First step - get the status for the given frame. * This will tell us whether it's a single MPDU or * an A-MPDU subframe. */ if (len < sizeof(*mac_status)) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: sub-xfer too short (no mac_status) (len %d)\n", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } /* * Remove the mac_status from the payload length. * * Note: cheating, don't reallocate the buffer! */ mac_status = (struct ar_rx_macstatus *)(buf + len - sizeof(*mac_status)); len -= sizeof(*mac_status); OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: mac status=0x%x\n", __func__, mac_status->status); /* * Next - check the MAC status before doing anything else. * Extract out the PLCP header for single and first frames; * since there's a single RX path we can shove PLCP headers * from both into sc->ar_last_rx_plcp[] so it can be reused. */ if (((mac_status->status & AR_RX_STATUS_MPDU_MASK) == AR_RX_STATUS_MPDU_SINGLE) || ((mac_status->status & AR_RX_STATUS_MPDU_MASK) == AR_RX_STATUS_MPDU_FIRST)) { /* * Ok, we need to at least have a PLCP header at * this point. */ if (len < AR_PLCP_HDR_LEN) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s sub-xfer too short (no mac+plcp) (len %d\n)", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } memcpy(sc->ar_last_rx_plcp, buf, AR_PLCP_HDR_LEN); /* * At this point we can just consume the PLCP header. * The beginning of the frame should thus be data. */ buf += AR_PLCP_HDR_LEN; len -= AR_PLCP_HDR_LEN; } /* * Next - see if we have a PHY status. * * The PHY status is at the end of the final A-MPDU subframe * or a single MPDU frame. * * We'll use this to tag frames with noise floor / RSSI * if they have valid information. */ if (((mac_status->status & AR_RX_STATUS_MPDU_MASK) == AR_RX_STATUS_MPDU_SINGLE) || ((mac_status->status & AR_RX_STATUS_MPDU_MASK) == AR_RX_STATUS_MPDU_LAST)) { if (len < sizeof(*phy_status)) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s sub-xfer too short (no phy status) (len %d\n)", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } /* * Take a pointer to the phy status and remove the length * from the end of the buffer. * * Note: we're cheating here; don't reallocate the buffer! */ phy_status = (struct ar_rx_phystatus *) (buf + len - sizeof(*phy_status)); len -= sizeof(*phy_status); } /* * Middle frames just have a MAC status (stripped above.) * No PHY status, and PLCP is from ar_last_rx_plcp. */ /* * Discard error frames; don't discard BAD_RA (eg monitor mode); * let net80211 do that */ if (__predict_false((mac_status->error & ~AR_RX_ERROR_BAD_RA) != 0)) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "error frame 0x%02x\n", mac_status->error); if (mac_status->error & AR_RX_ERROR_FCS) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "bad FCS\n"); } else if (mac_status->error & AR_RX_ERROR_MMIC) { /* Report Michael MIC failures to net80211. */ #if 0 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyidx); #endif device_printf(sc->sc_dev, "%s: MIC failure\n", __func__); } counter_u64_add(ic->ic_ierrors, 1); return; } /* * Make sure there's room for an 802.11 header + FCS. * * Note: a CTS/ACK is 14 bytes (FC, DUR, RA, FCS). * Making it IEEE80211_MIN_LEN misses CTS/ACKs. * * This won't be tossed at this point; eventually once * rx radiotap is implemented this will allow for * CTS/ACK frames. Passing them up to net80211 will * currently make it angry (too short packets.) */ if (len < 2 + 2 + IEEE80211_ADDR_LEN + IEEE80211_CRC_LEN) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: too short for 802.11 (len %d)\n", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } len -= IEEE80211_CRC_LEN; /* strip 802.11 FCS */ wh = (struct ieee80211_frame *) buf; /* * The firmware does seem to spit out a bunch of frames * with invalid frame control values here. Just toss them * rather than letting net80211 get angry and log. */ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: invalid 802.11 fc version (firmware bug?)\n", __func__); counter_u64_add(ic->ic_ierrors, 1); return; } m = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: failed m_get2() (len=%d)\n", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } /* Finalize mbuf. */ memcpy(mtod(m, uint8_t *), wh, len); m->m_pkthdr.len = m->m_len = len; /* XXX TODO: add setting rx radiotap fields here */ /* * Ok, check the frame length and toss if it's too short * for net80211. This will toss ACK/CTS. */ if (m->m_len < IEEE80211_MIN_LEN) { /* XXX TODO: add radiotap receive here */ m_free(m); m = NULL; return; } /* Add RSSI to this mbuf if we have a PHY header */ bzero(&rxs, sizeof(rxs)); rxs.r_flags = IEEE80211_R_NF; rxs.c_nf = sc->sc_nf[0]; /* XXX chain 0 != combined rssi/nf */ if (phy_status != NULL) { rxs.r_flags |= IEEE80211_R_RSSI; rxs.c_rssi = phy_status->rssi; } /* XXX TODO: add MIMO RSSI/NF as well */ if (ieee80211_add_rx_params(m, &rxs) == 0) { counter_u64_add(ic->ic_ierrors, 1); return; } /* XXX make a method */ STAILQ_INSERT_TAIL(&rxq->mq_head, m, m_stailqpkt); #if 0 OTUS_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); rxi.rxi_flags = 0; rxi.rxi_rssi = tail->rssi; rxi.rxi_tstamp = 0; /* unused */ ieee80211_input(ifp, m, ni, &rxi); /* Node is no longer needed. */ ieee80211_release_node(ic, ni); OTUS_LOCK(sc); #endif } static void otus_rxeof(struct usb_xfer *xfer, struct otus_data *data, struct mbufq *rxq) { struct otus_softc *sc = usbd_xfer_softc(xfer); caddr_t buf = data->buf; struct ar_rx_head *head; uint16_t hlen; int len, offset = 0; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: transfer completed; len=%d\n", __func__, len); if (otus_debug & OTUS_DEBUG_RX_BUFFER) { device_printf(sc->sc_dev, "%s: %*D\n", __func__, len, buf, "-"); } while (len >= sizeof (*head)) { head = (struct ar_rx_head *)buf; if (__predict_false(head->tag != htole16(AR_RX_HEAD_TAG))) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "tag not valid 0x%x\n", le16toh(head->tag)); break; } hlen = le16toh(head->len); OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: hlen=%d\n", __func__, hlen); if (__predict_false(sizeof (*head) + hlen > len)) { OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "xfer too short %d/%d\n", len, hlen); break; } /* Process sub-xfer. */ otus_sub_rxeof(sc, (uint8_t *) (((uint8_t *) buf) + 4), hlen, rxq); /* Next sub-xfer is aligned on a 32-bit boundary. */ hlen = (sizeof (*head) + hlen + 3) & ~3; offset += hlen; OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: rounded size is %d, next packet starts at %d\n", __func__, hlen, offset); buf += hlen; len -= hlen; } OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE, "%s: done!\n", __func__); } static void otus_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct epoch_tracker et; struct otus_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m; struct mbufq scrx; struct otus_data *data; OTUS_LOCK_ASSERT(sc); mbufq_init(&scrx, 1024); #if 0 device_printf(sc->sc_dev, "%s: called; state=%d; error=%d\n", __func__, USB_GET_STATE(xfer), error); #endif switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); otus_rxeof(xfer, data, &scrx); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: /* * XXX TODO: what if sc_rx isn't empty, but data * is empty? Then we leak mbufs. */ data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { //KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ OTUS_UNLOCK(sc); NET_EPOCH_ENTER(et); while ((m = mbufq_dequeue(&scrx)) != NULL) { wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void)ieee80211_input_mimo(ni, m); ieee80211_free_node(ni); } else (void)ieee80211_input_mimo_all(ic, m); } NET_EPOCH_EXIT(et); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_ff_age_all(ic, 100); #endif OTUS_LOCK(sc); break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } break; } } static void otus_txeof(struct usb_xfer *xfer, struct otus_data *data) { struct otus_softc *sc = usbd_xfer_softc(xfer); OTUS_DPRINTF(sc, OTUS_DEBUG_TXDONE, "%s: called; data=%p\n", __func__, data); OTUS_LOCK_ASSERT(sc); if (sc->sc_tx_n_active == 0) { device_printf(sc->sc_dev, "%s: completed but tx_active=0\n", __func__); } else { sc->sc_tx_n_active--; } if (data->m) { /* XXX status? */ /* XXX we get TX status via the RX path.. */ ieee80211_tx_complete(data->ni, data->m, 0); data->m = NULL; data->ni = NULL; } } static void otus_txcmdeof(struct usb_xfer *xfer, struct otus_tx_cmd *cmd) { struct otus_softc *sc = usbd_xfer_softc(xfer); OTUS_LOCK_ASSERT(sc); OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "%s: called; data=%p; odata=%p\n", __func__, cmd, cmd->odata); /* * Non-response commands still need wakeup so the caller * knows it was submitted and completed OK; response commands should * wait until they're ACKed by the firmware with a response. */ if (cmd->odata) { STAILQ_INSERT_TAIL(&sc->sc_cmd_waiting, cmd, next_cmd); } else { wakeup(cmd); otus_free_txcmd(sc, cmd); } } static void otus_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { uint8_t which = OTUS_BULK_TX; struct otus_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct otus_data *data; OTUS_LOCK_ASSERT(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active[which]); if (data == NULL) goto tr_setup; OTUS_DPRINTF(sc, OTUS_DEBUG_TXDONE, "%s: transfer done %p\n", __func__, data); STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next); otus_txeof(xfer, data); otus_freebuf(sc, data); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending[which]); if (data == NULL) { OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: empty pending queue sc %p\n", __func__, sc); sc->sc_tx_n_active = 0; goto finish; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending[which], next); STAILQ_INSERT_TAIL(&sc->sc_tx_active[which], data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: submitting transfer %p\n", __func__, data); usbd_transfer_submit(xfer); sc->sc_tx_n_active++; break; default: data = STAILQ_FIRST(&sc->sc_tx_active[which]); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next); otus_txeof(xfer, data); otus_freebuf(sc, data); } counter_u64_add(ic->ic_oerrors, 1); if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } finish: #ifdef IEEE80211_SUPPORT_SUPERG /* * If the TX active queue drops below a certain * threshold, ensure we age fast-frames out so they're * transmitted. */ if (sc->sc_tx_n_active < 2) { /* XXX ew - net80211 should defer this for us! */ OTUS_UNLOCK(sc); ieee80211_ff_flush(ic, WME_AC_VO); ieee80211_ff_flush(ic, WME_AC_VI); ieee80211_ff_flush(ic, WME_AC_BE); ieee80211_ff_flush(ic, WME_AC_BK); OTUS_LOCK(sc); } #endif /* Kick TX */ otus_tx_start(sc); } static void otus_bulk_cmd_callback(struct usb_xfer *xfer, usb_error_t error) { struct otus_softc *sc = usbd_xfer_softc(xfer); #if 0 struct ieee80211com *ic = &sc->sc_ic; #endif struct otus_tx_cmd *cmd; OTUS_LOCK_ASSERT(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: cmd = STAILQ_FIRST(&sc->sc_cmd_active); if (cmd == NULL) goto tr_setup; OTUS_DPRINTF(sc, OTUS_DEBUG_CMDDONE, "%s: transfer done %p\n", __func__, cmd); STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next_cmd); otus_txcmdeof(xfer, cmd); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: cmd = STAILQ_FIRST(&sc->sc_cmd_pending); if (cmd == NULL) { OTUS_DPRINTF(sc, OTUS_DEBUG_CMD, "%s: empty pending queue sc %p\n", __func__, sc); return; } STAILQ_REMOVE_HEAD(&sc->sc_cmd_pending, next_cmd); STAILQ_INSERT_TAIL(&sc->sc_cmd_active, cmd, next_cmd); usbd_xfer_set_frame_data(xfer, 0, cmd->buf, cmd->buflen); OTUS_DPRINTF(sc, OTUS_DEBUG_CMD, "%s: submitting transfer %p; buf=%p, buflen=%d\n", __func__, cmd, cmd->buf, cmd->buflen); usbd_transfer_submit(xfer); break; default: cmd = STAILQ_FIRST(&sc->sc_cmd_active); if (cmd != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next_cmd); otus_txcmdeof(xfer, cmd); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } /* * This isn't used by carl9170; it however may be used by the * initial bootloader. */ static void otus_bulk_irq_callback(struct usb_xfer *xfer, usb_error_t error) { struct otus_softc *sc = usbd_xfer_softc(xfer); int actlen; int sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); OTUS_DPRINTF(sc, OTUS_DEBUG_IRQ, "%s: called; state=%d\n", __func__, USB_GET_STATE(xfer)); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* * Read usb frame data, if any. * "actlen" has the total length for all frames * transferred. */ OTUS_DPRINTF(sc, OTUS_DEBUG_IRQ, "%s: comp; %d bytes\n", __func__, actlen); #if 0 pc = usbd_xfer_get_frame(xfer, 0); otus_dump_usb_rx_page(sc, pc, actlen); #endif /* XXX fallthrough */ case USB_ST_SETUP: /* * Setup xfer frame lengths/count and data */ OTUS_DPRINTF(sc, OTUS_DEBUG_IRQ, "%s: setup\n", __func__); usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ /* * Print error message and clear stall * for example. */ OTUS_DPRINTF(sc, OTUS_DEBUG_IRQ, "%s: ERROR?\n", __func__); break; } } /* * Map net80211 rate to hw rate for otus MAC/PHY. */ static uint8_t otus_rate_to_hw_rate(struct otus_softc *sc, uint8_t rate) { int is_2ghz; is_2ghz = !! (IEEE80211_IS_CHAN_2GHZ(sc->sc_ic.ic_curchan)); /* MCS check */ if (rate & 0x80) { return rate; } switch (rate) { /* CCK */ case 2: return (0x0); case 4: return (0x1); case 11: return (0x2); case 22: return (0x3); /* OFDM */ case 12: return (0xb); case 18: return (0xf); case 24: return (0xa); case 36: return (0xe); case 48: return (0x9); case 72: return (0xd); case 96: return (0x8); case 108: return (0xc); default: device_printf(sc->sc_dev, "%s: unknown rate '%d'\n", __func__, (int) rate); case 0: if (is_2ghz) return (0x0); /* 1MB CCK */ else return (0xb); /* 6MB OFDM */ } } static int otus_hw_rate_is_ht(struct otus_softc *sc, uint8_t hw_rate) { return !! (hw_rate & 0x80); } static int otus_hw_rate_is_ofdm(struct otus_softc *sc, uint8_t hw_rate) { switch (hw_rate) { case 0x0: case 0x1: case 0x2: case 0x3: return (0); default: return (1); } } static void otus_tx_update_ratectl(struct otus_softc *sc, struct ieee80211_node *ni) { struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; struct otus_node *on = OTUS_NODE(ni); txs->flags = IEEE80211_RATECTL_TX_STATS_NODE | IEEE80211_RATECTL_TX_STATS_RETRIES; txs->ni = ni; txs->nframes = on->tx_done; txs->nsuccess = on->tx_done - on->tx_err; txs->nretries = on->tx_retries; ieee80211_ratectl_tx_update(ni->ni_vap, txs); on->tx_done = on->tx_err = on->tx_retries = 0; } /* * XXX TODO: support tx bpf parameters for configuration! * * Relevant pieces: * * ac = params->ibp_pri & 3; * rate = params->ibp_rate0; * params->ibp_flags & IEEE80211_BPF_NOACK * params->ibp_flags & IEEE80211_BPF_RTS * params->ibp_flags & IEEE80211_BPF_CTS * tx->rts_ntries = params->ibp_try1; * tx->data_ntries = params->ibp_try0; */ static int otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m, struct otus_data *data, const struct ieee80211_bpf_params *params) { const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ieee80211_key *k; struct ar_tx_head *head; uint32_t phyctl; uint16_t macctl, qos; uint8_t qid, rate; int hasqos, xferlen, type, ismcast; wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { device_printf(sc->sc_dev, "%s: m=%p: ieee80211_crypto_encap returns NULL\n", __func__, m); return (ENOBUFS); } wh = mtod(m, struct ieee80211_frame *); } /* Calculate transfer length; ensure data buffer is large enough */ xferlen = sizeof (*head) + m->m_pkthdr.len; if (xferlen > OTUS_TXBUFSZ) { device_printf(sc->sc_dev, "%s: 802.11 TX frame is %d bytes, max %d bytes\n", __func__, xferlen, OTUS_TXBUFSZ); return (ENOBUFS); } hasqos = !! IEEE80211_QOS_HAS_SEQ(wh); if (hasqos) { uint8_t tid; qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; qid = TID_TO_WME_AC(tid); } else { qos = 0; qid = WME_AC_BE; } type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Pickup a rate index. */ if (params != NULL) rate = otus_rate_to_hw_rate(sc, params->ibp_rate0); else if (!!(m->m_flags & M_EAPOL) || type != IEEE80211_FC0_TYPE_DATA) rate = otus_rate_to_hw_rate(sc, tp->mgmtrate); else if (ismcast) rate = otus_rate_to_hw_rate(sc, tp->mcastrate); else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = otus_rate_to_hw_rate(sc, tp->ucastrate); else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = otus_rate_to_hw_rate(sc, ni->ni_txrate); } phyctl = 0; macctl = AR_TX_MAC_BACKOFF | AR_TX_MAC_HW_DUR | AR_TX_MAC_QID(qid); /* * XXX TODO: params for NOACK, ACK, RTS, CTS, etc */ if (ismcast || (hasqos && ((qos & IEEE80211_QOS_ACKPOLICY) == IEEE80211_QOS_ACKPOLICY_NOACK))) macctl |= AR_TX_MAC_NOACK; if (!ismcast) { if (m->m_pkthdr.len + IEEE80211_CRC_LEN >= vap->iv_rtsthreshold) macctl |= AR_TX_MAC_RTS; else if (otus_hw_rate_is_ht(sc, rate)) { if (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) macctl |= AR_TX_MAC_RTS; } else if (ic->ic_flags & IEEE80211_F_USEPROT) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) macctl |= AR_TX_MAC_CTS; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) macctl |= AR_TX_MAC_RTS; } } phyctl |= AR_TX_PHY_MCS(rate & 0x7f); /* Note: MCS rates are 0x80 and above */ if (otus_hw_rate_is_ht(sc, rate)) { phyctl |= AR_TX_PHY_MT_HT; /* Always use all tx antennas for now, just to be safe */ phyctl |= AR_TX_PHY_ANTMSK(sc->txmask); /* Heavy clip */ phyctl |= (rate & 0x7) << AR_TX_PHY_TX_HEAVY_CLIP_SHIFT; } else if (otus_hw_rate_is_ofdm(sc, rate)) { phyctl |= AR_TX_PHY_MT_OFDM; /* Always use all tx antennas for now, just to be safe */ phyctl |= AR_TX_PHY_ANTMSK(sc->txmask); } else { /* CCK */ phyctl |= AR_TX_PHY_MT_CCK; phyctl |= AR_TX_PHY_ANTMSK(sc->txmask); } /* Update net80211 with the current counters */ otus_tx_update_ratectl(sc, ni); /* Update rate control stats for frames that are ACK'ed. */ if (!(macctl & AR_TX_MAC_NOACK)) OTUS_NODE(ni)->tx_done++; /* Fill Tx descriptor. */ head = (struct ar_tx_head *)data->buf; head->len = htole16(m->m_pkthdr.len + IEEE80211_CRC_LEN); head->macctl = htole16(macctl); head->phyctl = htole32(phyctl); m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)&head[1]); data->buflen = xferlen; data->ni = ni; data->m = m; OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT, "%s: tx: m=%p; data=%p; len=%d mac=0x%04x phy=0x%08x rate=0x%02x, ni_txrate=%d\n", __func__, m, data, le16toh(head->len), macctl, phyctl, (int) rate, (int) ni->ni_txrate); /* Submit transfer */ STAILQ_INSERT_TAIL(&sc->sc_tx_pending[OTUS_BULK_TX], data, next); usbd_transfer_start(sc->sc_xfer[OTUS_BULK_TX]); return 0; } static u_int otus_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t val, *hashes = arg; val = le32dec(LLADDR(sdl) + 4); /* Get address byte 5 */ val = val & 0x0000ff00; val = val >> 8; /* As per below, shift it >> 2 to get only 6 bits */ val = val >> 2; if (val < 32) hashes[0] |= 1 << val; else hashes[1] |= 1 << (val - 32); return (1); } int otus_set_multi(struct otus_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t hashes[2]; int r; if (ic->ic_allmulti > 0 || ic->ic_promisc > 0 || ic->ic_opmode == IEEE80211_M_MONITOR) { hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } else { struct ieee80211vap *vap; hashes[0] = hashes[1] = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if_foreach_llmaddr(vap->iv_ifp, otus_hash_maddr, hashes); } #if 0 /* XXX openbsd code */ while (enm != NULL) { bit = enm->enm_addrlo[5] >> 2; if (bit < 32) hashes[0] |= 1 << bit; else hashes[1] |= 1 << (bit - 32); ETHER_NEXT_MULTI(step, enm); } #endif hashes[1] |= 1U << 31; /* Make sure the broadcast bit is set. */ OTUS_LOCK(sc); otus_write(sc, AR_MAC_REG_GROUP_HASH_TBL_L, hashes[0]); otus_write(sc, AR_MAC_REG_GROUP_HASH_TBL_H, hashes[1]); r = otus_write_barrier(sc); /* XXX operating mode? filter? */ OTUS_UNLOCK(sc); return (r); } static int otus_updateedca(struct ieee80211com *ic) { struct otus_softc *sc = ic->ic_softc; OTUS_LOCK(sc); /* * XXX TODO: take temporary copy of EDCA information * when scheduling this so we have a more time-correct view * of things. * XXX TODO: this can be done on the net80211 level */ otus_updateedca_locked(sc); OTUS_UNLOCK(sc); return (0); } static void otus_updateedca_locked(struct otus_softc *sc) { #define EXP2(val) ((1 << (val)) - 1) #define AIFS(val) ((val) * 9 + 10) struct chanAccParams chp; struct ieee80211com *ic = &sc->sc_ic; const struct wmeParams *edca; ieee80211_wme_ic_getparams(ic, &chp); OTUS_LOCK_ASSERT(sc); edca = chp.cap_wmeParams; /* Set CWmin/CWmax values. */ otus_write(sc, AR_MAC_REG_AC0_CW, EXP2(edca[WME_AC_BE].wmep_logcwmax) << 16 | EXP2(edca[WME_AC_BE].wmep_logcwmin)); otus_write(sc, AR_MAC_REG_AC1_CW, EXP2(edca[WME_AC_BK].wmep_logcwmax) << 16 | EXP2(edca[WME_AC_BK].wmep_logcwmin)); otus_write(sc, AR_MAC_REG_AC2_CW, EXP2(edca[WME_AC_VI].wmep_logcwmax) << 16 | EXP2(edca[WME_AC_VI].wmep_logcwmin)); otus_write(sc, AR_MAC_REG_AC3_CW, EXP2(edca[WME_AC_VO].wmep_logcwmax) << 16 | EXP2(edca[WME_AC_VO].wmep_logcwmin)); otus_write(sc, AR_MAC_REG_AC4_CW, /* Special TXQ. */ EXP2(edca[WME_AC_VO].wmep_logcwmax) << 16 | EXP2(edca[WME_AC_VO].wmep_logcwmin)); /* Set AIFSN values. */ otus_write(sc, AR_MAC_REG_AC1_AC0_AIFS, AIFS(edca[WME_AC_VI].wmep_aifsn) << 24 | AIFS(edca[WME_AC_BK].wmep_aifsn) << 12 | AIFS(edca[WME_AC_BE].wmep_aifsn)); otus_write(sc, AR_MAC_REG_AC3_AC2_AIFS, AIFS(edca[WME_AC_VO].wmep_aifsn) << 16 | /* Special TXQ. */ AIFS(edca[WME_AC_VO].wmep_aifsn) << 4 | AIFS(edca[WME_AC_VI].wmep_aifsn) >> 8); /* Set TXOP limit. */ otus_write(sc, AR_MAC_REG_AC1_AC0_TXOP, edca[WME_AC_BK].wmep_txopLimit << 16 | edca[WME_AC_BE].wmep_txopLimit); otus_write(sc, AR_MAC_REG_AC3_AC2_TXOP, edca[WME_AC_VO].wmep_txopLimit << 16 | edca[WME_AC_VI].wmep_txopLimit); /* XXX ACK policy? */ (void)otus_write_barrier(sc); #undef AIFS #undef EXP2 } static void otus_updateslot(struct otus_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t slottime; OTUS_LOCK_ASSERT(sc); slottime = IEEE80211_GET_SLOTTIME(ic); otus_write(sc, AR_MAC_REG_SLOT_TIME, slottime << 10); (void)otus_write_barrier(sc); } /* * Things to do based on 2GHz or 5GHz: * * + slottime * + dyn_sifs_ack * + rts_cts_rate * + slot time * + mac_rates * + mac_tpc * * And in the transmit path * + tpc: carl9170_tx_rate_tpc_chains * + carl9170_tx_physet() * + disable short premable tx */ int otus_init_mac(struct otus_softc *sc) { int error; OTUS_LOCK_ASSERT(sc); otus_write(sc, AR_MAC_REG_ACK_EXTENSION, 0x40); otus_write(sc, AR_MAC_REG_RETRY_MAX, 0); otus_write(sc, AR_MAC_REG_RX_THRESHOLD, 0xc1f80); otus_write(sc, AR_MAC_REG_RX_PE_DELAY, 0x70); otus_write(sc, AR_MAC_REG_EIFS_AND_SIFS, 0xa144000); otus_write(sc, AR_MAC_REG_SLOT_TIME, 9 << 10); otus_write(sc, AR_MAC_REG_TID_CFACK_CFEND_RATE, 0x19000000); /* NAV protects ACK only (in TXOP). */ otus_write(sc, AR_MAC_REG_TXOP_DURATION, 0x201); /* Set beacon Tx power to 0x7. */ otus_write(sc, AR_MAC_REG_BCN_HT1, 0x8000170); otus_write(sc, AR_MAC_REG_BACKOFF_PROTECT, 0x105); otus_write(sc, AR_MAC_REG_AMPDU_FACTOR, 0x10000a); otus_set_rx_filter(sc); otus_write(sc, AR_MAC_REG_BASIC_RATE, 0x150f); otus_write(sc, AR_MAC_REG_MANDATORY_RATE, 0x150f); otus_write(sc, AR_MAC_REG_RTS_CTS_RATE, 0x10b01bb); otus_write(sc, AR_MAC_REG_ACK_TPC, 0x4003c1e); /* Enable LED0 and LED1. */ otus_write(sc, AR_GPIO_REG_PORT_TYPE, 0x3); otus_write(sc, AR_GPIO_REG_PORT_DATA, 0x3); /* Switch MAC to OTUS interface. */ otus_write(sc, 0x1c3600, 0x3); otus_write(sc, AR_MAC_REG_AMPDU_RX_THRESH, 0xffff); otus_write(sc, AR_MAC_REG_MISC_680, 0xf00008); /* Disable Rx timeout (workaround). */ otus_write(sc, AR_MAC_REG_RX_TIMEOUT, 0); /* Set USB Rx stream mode maximum frame number to 2. */ otus_write(sc, 0x1e1110, 0x4); /* Set USB Rx stream mode timeout to 10us. */ otus_write(sc, 0x1e1114, 0x80); /* Set clock frequency to 88/80MHz. */ otus_write(sc, AR_PWR_REG_CLOCK_SEL, 0x73); /* Set WLAN DMA interrupt mode: generate intr per packet. */ otus_write(sc, AR_MAC_REG_TXRX_MPI, 0x110011); otus_write(sc, AR_MAC_REG_FCS_SELECT, 0x4); otus_write(sc, AR_MAC_REG_TXOP_NOT_ENOUGH_INDICATION, 0x141e0f48); /* Disable HW decryption for now. */ otus_write(sc, AR_MAC_REG_ENCRYPTION, 0x78); if ((error = otus_write_barrier(sc)) != 0) return error; /* Set default EDCA parameters. */ otus_updateedca_locked(sc); return 0; } /* * Return default value for PHY register based on current operating mode. */ uint32_t otus_phy_get_def(struct otus_softc *sc, uint32_t reg) { int i; for (i = 0; i < nitems(ar5416_phy_regs); i++) if (AR_PHY(ar5416_phy_regs[i]) == reg) return sc->phy_vals[i]; return 0; /* Register not found. */ } /* * Update PHY's programming based on vendor-specific data stored in EEPROM. * This is for FEM-type devices only. */ int otus_set_board_values(struct otus_softc *sc, struct ieee80211_channel *c) { const struct ModalEepHeader *eep; uint32_t tmp, offset; if (IEEE80211_IS_CHAN_5GHZ(c)) eep = &sc->eeprom.modalHeader[0]; else eep = &sc->eeprom.modalHeader[1]; /* Offset of chain 2. */ offset = 2 * 0x1000; tmp = le32toh(eep->antCtrlCommon); otus_write(sc, AR_PHY_SWITCH_COM, tmp); tmp = le32toh(eep->antCtrlChain[0]); otus_write(sc, AR_PHY_SWITCH_CHAIN_0, tmp); tmp = le32toh(eep->antCtrlChain[1]); otus_write(sc, AR_PHY_SWITCH_CHAIN_0 + offset, tmp); if (1 /* sc->sc_sco == AR_SCO_SCN */) { tmp = otus_phy_get_def(sc, AR_PHY_SETTLING); tmp &= ~(0x7f << 7); tmp |= (eep->switchSettling & 0x7f) << 7; otus_write(sc, AR_PHY_SETTLING, tmp); } tmp = otus_phy_get_def(sc, AR_PHY_DESIRED_SZ); tmp &= ~0xffff; tmp |= eep->pgaDesiredSize << 8 | eep->adcDesiredSize; otus_write(sc, AR_PHY_DESIRED_SZ, tmp); tmp = eep->txEndToXpaOff << 24 | eep->txEndToXpaOff << 16 | eep->txFrameToXpaOn << 8 | eep->txFrameToXpaOn; otus_write(sc, AR_PHY_RF_CTL4, tmp); tmp = otus_phy_get_def(sc, AR_PHY_RF_CTL3); tmp &= ~(0xff << 16); tmp |= eep->txEndToRxOn << 16; otus_write(sc, AR_PHY_RF_CTL3, tmp); tmp = otus_phy_get_def(sc, AR_PHY_CCA); tmp &= ~(0x7f << 12); tmp |= (eep->thresh62 & 0x7f) << 12; otus_write(sc, AR_PHY_CCA, tmp); tmp = otus_phy_get_def(sc, AR_PHY_RXGAIN); tmp &= ~(0x3f << 12); tmp |= (eep->txRxAttenCh[0] & 0x3f) << 12; otus_write(sc, AR_PHY_RXGAIN, tmp); tmp = otus_phy_get_def(sc, AR_PHY_RXGAIN + offset); tmp &= ~(0x3f << 12); tmp |= (eep->txRxAttenCh[1] & 0x3f) << 12; otus_write(sc, AR_PHY_RXGAIN + offset, tmp); tmp = otus_phy_get_def(sc, AR_PHY_GAIN_2GHZ); tmp &= ~(0x3f << 18); tmp |= (eep->rxTxMarginCh[0] & 0x3f) << 18; if (IEEE80211_IS_CHAN_5GHZ(c)) { tmp &= ~(0xf << 10); tmp |= (eep->bswMargin[0] & 0xf) << 10; } otus_write(sc, AR_PHY_GAIN_2GHZ, tmp); tmp = otus_phy_get_def(sc, AR_PHY_GAIN_2GHZ + offset); tmp &= ~(0x3f << 18); tmp |= (eep->rxTxMarginCh[1] & 0x3f) << 18; otus_write(sc, AR_PHY_GAIN_2GHZ + offset, tmp); tmp = otus_phy_get_def(sc, AR_PHY_TIMING_CTRL4); tmp &= ~(0x3f << 5 | 0x1f); tmp |= (eep->iqCalICh[0] & 0x3f) << 5 | (eep->iqCalQCh[0] & 0x1f); otus_write(sc, AR_PHY_TIMING_CTRL4, tmp); tmp = otus_phy_get_def(sc, AR_PHY_TIMING_CTRL4 + offset); tmp &= ~(0x3f << 5 | 0x1f); tmp |= (eep->iqCalICh[1] & 0x3f) << 5 | (eep->iqCalQCh[1] & 0x1f); otus_write(sc, AR_PHY_TIMING_CTRL4 + offset, tmp); tmp = otus_phy_get_def(sc, AR_PHY_TPCRG1); tmp &= ~(0xf << 16); tmp |= (eep->xpd & 0xf) << 16; otus_write(sc, AR_PHY_TPCRG1, tmp); return otus_write_barrier(sc); } int otus_program_phy(struct otus_softc *sc, struct ieee80211_channel *c) { const uint32_t *vals; int error, i; /* Select PHY programming based on band and bandwidth. */ if (IEEE80211_IS_CHAN_2GHZ(c)) { if (IEEE80211_IS_CHAN_HT40(c)) vals = ar5416_phy_vals_2ghz_40mhz; else vals = ar5416_phy_vals_2ghz_20mhz; } else { if (IEEE80211_IS_CHAN_HT40(c)) vals = ar5416_phy_vals_5ghz_40mhz; else vals = ar5416_phy_vals_5ghz_20mhz; } for (i = 0; i < nitems(ar5416_phy_regs); i++) otus_write(sc, AR_PHY(ar5416_phy_regs[i]), vals[i]); sc->phy_vals = vals; if (sc->eeprom.baseEepHeader.deviceType == 0x80) /* FEM */ if ((error = otus_set_board_values(sc, c)) != 0) return error; /* Initial Tx power settings. */ otus_write(sc, AR_PHY_POWER_TX_RATE_MAX, 0x7f); otus_write(sc, AR_PHY_POWER_TX_RATE1, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE2, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE3, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE4, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE5, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE6, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE7, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE8, 0x3f3f3f3f); otus_write(sc, AR_PHY_POWER_TX_RATE9, 0x3f3f3f3f); if (IEEE80211_IS_CHAN_2GHZ(c)) otus_write(sc, AR_PWR_REG_PLL_ADDAC, 0x5163); else otus_write(sc, AR_PWR_REG_PLL_ADDAC, 0x5143); return otus_write_barrier(sc); } static __inline uint8_t otus_reverse_bits(uint8_t v) { v = ((v >> 1) & 0x55) | ((v & 0x55) << 1); v = ((v >> 2) & 0x33) | ((v & 0x33) << 2); v = ((v >> 4) & 0x0f) | ((v & 0x0f) << 4); return v; } int otus_set_rf_bank4(struct otus_softc *sc, struct ieee80211_channel *c) { uint8_t chansel, d0, d1; uint16_t data; int error; OTUS_LOCK_ASSERT(sc); d0 = 0; if (IEEE80211_IS_CHAN_5GHZ(c)) { chansel = (c->ic_freq - 4800) / 5; if (chansel & 1) d0 |= AR_BANK4_AMODE_REFSEL(2); else d0 |= AR_BANK4_AMODE_REFSEL(1); } else { d0 |= AR_BANK4_AMODE_REFSEL(2); if (c->ic_freq == 2484) { /* CH 14 */ d0 |= AR_BANK4_BMODE_LF_SYNTH_FREQ; chansel = 10 + (c->ic_freq - 2274) / 5; } else chansel = 16 + (c->ic_freq - 2272) / 5; chansel <<= 2; } d0 |= AR_BANK4_ADDR(1) | AR_BANK4_CHUP; d1 = otus_reverse_bits(chansel); /* Write bits 0-4 of d0 and d1. */ data = (d1 & 0x1f) << 5 | (d0 & 0x1f); otus_write(sc, AR_PHY(44), data); /* Write bits 5-7 of d0 and d1. */ data = (d1 >> 5) << 5 | (d0 >> 5); otus_write(sc, AR_PHY(58), data); if ((error = otus_write_barrier(sc)) == 0) otus_delay_ms(sc, 10); return error; } void otus_get_delta_slope(uint32_t coeff, uint32_t *exponent, uint32_t *mantissa) { #define COEFF_SCALE_SHIFT 24 uint32_t exp, man; /* exponent = 14 - floor(log2(coeff)) */ for (exp = 31; exp > 0; exp--) if (coeff & (1 << exp)) break; KASSERT(exp != 0, ("exp")); exp = 14 - (exp - COEFF_SCALE_SHIFT); /* mantissa = floor(coeff * 2^exponent + 0.5) */ man = coeff + (1 << (COEFF_SCALE_SHIFT - exp - 1)); *mantissa = man >> (COEFF_SCALE_SHIFT - exp); *exponent = exp - 16; #undef COEFF_SCALE_SHIFT } static int otus_set_chan(struct otus_softc *sc, struct ieee80211_channel *c, int assoc) { struct ieee80211com *ic = &sc->sc_ic; struct ar_cmd_frequency cmd; struct ar_rsp_frequency rsp; const uint32_t *vals; uint32_t coeff, exp, man, tmp; uint8_t code; int error, chan, i; error = 0; chan = ieee80211_chan2ieee(ic, c); OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "setting channel %d (%dMHz)\n", chan, c->ic_freq); tmp = IEEE80211_IS_CHAN_2GHZ(c) ? 0x105 : 0x104; otus_write(sc, AR_MAC_REG_DYNAMIC_SIFS_ACK, tmp); if ((error = otus_write_barrier(sc)) != 0) goto finish; /* Disable BB Heavy Clip. */ otus_write(sc, AR_PHY_HEAVY_CLIP_ENABLE, 0x200); if ((error = otus_write_barrier(sc)) != 0) goto finish; /* XXX Is that FREQ_START ? */ error = otus_cmd(sc, AR_CMD_FREQ_STRAT, NULL, 0, NULL, 0); if (error != 0) goto finish; /* Reprogram PHY and RF on channel band or bandwidth changes. */ if (sc->bb_reset || c->ic_flags != sc->sc_curchan->ic_flags) { OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "band switch\n"); /* Cold/Warm reset BB/ADDA. */ otus_write(sc, AR_PWR_REG_RESET, sc->bb_reset ? 0x800 : 0x400); if ((error = otus_write_barrier(sc)) != 0) goto finish; otus_write(sc, AR_PWR_REG_RESET, 0); if ((error = otus_write_barrier(sc)) != 0) goto finish; sc->bb_reset = 0; if ((error = otus_program_phy(sc, c)) != 0) { device_printf(sc->sc_dev, "%s: could not program PHY\n", __func__); goto finish; } /* Select RF programming based on band. */ if (IEEE80211_IS_CHAN_5GHZ(c)) vals = ar5416_banks_vals_5ghz; else vals = ar5416_banks_vals_2ghz; for (i = 0; i < nitems(ar5416_banks_regs); i++) otus_write(sc, AR_PHY(ar5416_banks_regs[i]), vals[i]); if ((error = otus_write_barrier(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not program RF\n", __func__); goto finish; } code = AR_CMD_RF_INIT; } else { code = AR_CMD_FREQUENCY; } if ((error = otus_set_rf_bank4(sc, c)) != 0) goto finish; tmp = (sc->txmask == 0x5) ? 0x340 : 0x240; otus_write(sc, AR_PHY_TURBO, tmp); if ((error = otus_write_barrier(sc)) != 0) goto finish; /* Send firmware command to set channel. */ cmd.freq = htole32((uint32_t)c->ic_freq * 1000); cmd.dynht2040 = htole32(0); cmd.htena = htole32(1); /* Set Delta Slope (exponent and mantissa). */ coeff = (100 << 24) / c->ic_freq; otus_get_delta_slope(coeff, &exp, &man); cmd.dsc_exp = htole32(exp); cmd.dsc_man = htole32(man); OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "ds coeff=%u exp=%u man=%u\n", coeff, exp, man); /* For Short GI, coeff is 9/10 that of normal coeff. */ coeff = (9 * coeff) / 10; otus_get_delta_slope(coeff, &exp, &man); cmd.dsc_shgi_exp = htole32(exp); cmd.dsc_shgi_man = htole32(man); OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "ds shgi coeff=%u exp=%u man=%u\n", coeff, exp, man); /* Set wait time for AGC and noise calibration (100 or 200ms). */ cmd.check_loop_count = assoc ? htole32(2000) : htole32(1000); OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "%s\n", (code == AR_CMD_RF_INIT) ? "RF_INIT" : "FREQUENCY"); error = otus_cmd(sc, code, &cmd, sizeof cmd, &rsp, sizeof(rsp)); if (error != 0) goto finish; if ((rsp.status & htole32(AR_CAL_ERR_AGC | AR_CAL_ERR_NF_VAL)) != 0) { OTUS_DPRINTF(sc, OTUS_DEBUG_RESET, "status=0x%x\n", le32toh(rsp.status)); /* Force cold reset on next channel. */ sc->bb_reset = 1; } #ifdef USB_DEBUG if (otus_debug & OTUS_DEBUG_RESET) { device_printf(sc->sc_dev, "calibration status=0x%x\n", le32toh(rsp.status)); for (i = 0; i < 2; i++) { /* 2 Rx chains */ /* Sign-extend 9-bit NF values. */ device_printf(sc->sc_dev, "noisefloor chain %d=%d\n", i, (((int32_t)le32toh(rsp.nf[i])) << 4) >> 23); device_printf(sc->sc_dev, "noisefloor ext chain %d=%d\n", i, ((int32_t)le32toh(rsp.nf_ext[i])) >> 23); } } #endif for (i = 0; i < OTUS_NUM_CHAINS; i++) { sc->sc_nf[i] = ((((int32_t)le32toh(rsp.nf[i])) << 4) >> 23); } sc->sc_curchan = c; finish: return (error); } #ifdef notyet int otus_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, struct ieee80211_key *k) { struct otus_softc *sc = ic->ic_softc; struct otus_cmd_key cmd; /* Defer setting of WEP keys until interface is brought up. */ if ((ic->ic_if.if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) return 0; /* Do it in a process context. */ cmd.key = *k; cmd.associd = (ni != NULL) ? ni->ni_associd : 0; otus_do_async(sc, otus_set_key_cb, &cmd, sizeof cmd); return 0; } void otus_set_key_cb(struct otus_softc *sc, void *arg) { struct otus_cmd_key *cmd = arg; struct ieee80211_key *k = &cmd->key; struct ar_cmd_ekey key; uint16_t cipher; int error; memset(&key, 0, sizeof key); if (k->k_flags & IEEE80211_KEY_GROUP) { key.uid = htole16(k->k_id); IEEE80211_ADDR_COPY(key.macaddr, sc->sc_ic.ic_myaddr); key.macaddr[0] |= 0x80; } else { key.uid = htole16(OTUS_UID(cmd->associd)); IEEE80211_ADDR_COPY(key.macaddr, ni->ni_macaddr); } key.kix = htole16(0); /* Map net80211 cipher to hardware. */ switch (k->k_cipher) { case IEEE80211_CIPHER_WEP40: cipher = AR_CIPHER_WEP64; break; case IEEE80211_CIPHER_WEP104: cipher = AR_CIPHER_WEP128; break; case IEEE80211_CIPHER_TKIP: cipher = AR_CIPHER_TKIP; break; case IEEE80211_CIPHER_CCMP: cipher = AR_CIPHER_AES; break; default: return; } key.cipher = htole16(cipher); memcpy(key.key, k->k_key, MIN(k->k_len, 16)); error = otus_cmd(sc, AR_CMD_EKEY, &key, sizeof key, NULL, 0); if (error != 0 || k->k_cipher != IEEE80211_CIPHER_TKIP) return; /* TKIP: set Tx/Rx MIC Key. */ key.kix = htole16(1); memcpy(key.key, k->k_key + 16, 16); (void)otus_cmd(sc, AR_CMD_EKEY, &key, sizeof key, NULL, 0); } void otus_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, struct ieee80211_key *k) { struct otus_softc *sc = ic->ic_softc; struct otus_cmd_key cmd; if (!(ic->ic_if.if_flags & IFF_RUNNING) || ic->ic_state != IEEE80211_S_RUN) return; /* Nothing to do. */ /* Do it in a process context. */ cmd.key = *k; cmd.associd = (ni != NULL) ? ni->ni_associd : 0; otus_do_async(sc, otus_delete_key_cb, &cmd, sizeof cmd); } void otus_delete_key_cb(struct otus_softc *sc, void *arg) { struct otus_cmd_key *cmd = arg; struct ieee80211_key *k = &cmd->key; uint32_t uid; if (k->k_flags & IEEE80211_KEY_GROUP) uid = htole32(k->k_id); else uid = htole32(OTUS_UID(cmd->associd)); (void)otus_cmd(sc, AR_CMD_DKEY, &uid, sizeof uid, NULL, 0); } #endif /* * XXX TODO: check if we have to be doing any calibration in the host * or whether it's purely a firmware thing. */ void otus_calibrate_to(void *arg, int pending) { #if 0 struct otus_softc *sc = arg; device_printf(sc->sc_dev, "%s: called\n", __func__); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; int s; if (usbd_is_dying(sc->sc_udev)) return; usbd_ref_incr(sc->sc_udev); s = splnet(); ni = ic->ic_bss; ieee80211_amrr_choose(&sc->amrr, ni, &((struct otus_node *)ni)->amn); splx(s); if (!usbd_is_dying(sc->sc_udev)) timeout_add_sec(&sc->calib_to, 1); usbd_ref_decr(sc->sc_udev); #endif } int otus_set_bssid(struct otus_softc *sc, const uint8_t *bssid) { OTUS_LOCK_ASSERT(sc); otus_write(sc, AR_MAC_REG_BSSID_L, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); otus_write(sc, AR_MAC_REG_BSSID_H, bssid[4] | bssid[5] << 8); return otus_write_barrier(sc); } int otus_set_macaddr(struct otus_softc *sc, const uint8_t *addr) { OTUS_LOCK_ASSERT(sc); otus_write(sc, AR_MAC_REG_MAC_ADDR_L, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); otus_write(sc, AR_MAC_REG_MAC_ADDR_H, addr[4] | addr[5] << 8); return otus_write_barrier(sc); } /* Default single-LED. */ void otus_led_newstate_type1(struct otus_softc *sc) { /* TBD */ device_printf(sc->sc_dev, "%s: TODO\n", __func__); } /* NETGEAR, dual-LED. */ void otus_led_newstate_type2(struct otus_softc *sc) { /* TBD */ device_printf(sc->sc_dev, "%s: TODO\n", __func__); } /* NETGEAR, single-LED/3 colors (blue, red, purple.) */ void otus_led_newstate_type3(struct otus_softc *sc) { #if 0 struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t state = sc->led_state; OTUS_LOCK_ASSERT(sc); if (!vap) { state = 0; /* led off */ } else if (vap->iv_state == IEEE80211_S_INIT) { state = 0; /* LED off. */ } else if (vap->iv_state == IEEE80211_S_RUN) { /* Associated, LED always on. */ if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) state = AR_LED0_ON; /* 2GHz=>Red. */ else state = AR_LED1_ON; /* 5GHz=>Blue. */ } else { /* Scanning, blink LED. */ state ^= AR_LED0_ON | AR_LED1_ON; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) state &= ~AR_LED1_ON; else state &= ~AR_LED0_ON; } if (state != sc->led_state) { otus_write(sc, AR_GPIO_REG_PORT_DATA, state); if (otus_write_barrier(sc) == 0) sc->led_state = state; } #endif } static uint8_t zero_macaddr[IEEE80211_ADDR_LEN] = { 0,0,0,0,0,0 }; /* * Set up operating mode, MAC/BSS address and RX filter. */ static void otus_set_operating_mode(struct otus_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap; uint32_t cam_mode = AR_MAC_CAM_DEFAULTS; uint32_t rx_ctrl = AR_MAC_RX_CTRL_DEAGG | AR_MAC_RX_CTRL_SHORT_FILTER; uint32_t sniffer = AR_MAC_SNIFFER_DEFAULTS; uint32_t enc_mode = 0x78; /* XXX */ const uint8_t *macaddr; uint8_t bssid[IEEE80211_ADDR_LEN]; struct ieee80211_node *ni; OTUS_LOCK_ASSERT(sc); /* * If we're in sniffer mode or we don't have a MAC * address assigned, ensure it gets reset to all-zero. */ IEEE80211_ADDR_COPY(bssid, zero_macaddr); vap = TAILQ_FIRST(&ic->ic_vaps); macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr; switch (ic->ic_opmode) { case IEEE80211_M_STA: if (vap) { ni = ieee80211_ref_node(vap->iv_bss); IEEE80211_ADDR_COPY(bssid, ni->ni_bssid); ieee80211_free_node(ni); } cam_mode |= AR_MAC_CAM_STA; rx_ctrl |= AR_MAC_RX_CTRL_PASS_TO_HOST; break; case IEEE80211_M_MONITOR: /* * Note: monitor mode ends up causing the MAC to * generate ACK frames for everything it sees. * So don't do that; instead just put it in STA mode * and disable RX filters. */ default: cam_mode |= AR_MAC_CAM_STA; rx_ctrl |= AR_MAC_RX_CTRL_PASS_TO_HOST; break; } /* * TODO: if/when we do hardware encryption, ensure it's * disabled if the NIC is in monitor mode. */ otus_write(sc, AR_MAC_REG_SNIFFER, sniffer); otus_write(sc, AR_MAC_REG_CAM_MODE, cam_mode); otus_write(sc, AR_MAC_REG_ENCRYPTION, enc_mode); otus_write(sc, AR_MAC_REG_RX_CONTROL, rx_ctrl); otus_set_macaddr(sc, macaddr); otus_set_bssid(sc, bssid); /* XXX barrier? */ } static void otus_set_rx_filter(struct otus_softc *sc) { // struct ieee80211com *ic = &sc->sc_ic; OTUS_LOCK_ASSERT(sc); #if 0 if (ic->ic_allmulti > 0 || ic->ic_promisc > 0 || ic->ic_opmode == IEEE80211_M_MONITOR) { otus_write(sc, AR_MAC_REG_FRAMETYPE_FILTER, 0xff00ffff); } else { #endif /* Filter any control frames, BAR is bit 24. */ otus_write(sc, AR_MAC_REG_FRAMETYPE_FILTER, 0x0500ffff); #if 0 } #endif } int otus_init(struct otus_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int error; OTUS_UNLOCK_ASSERT(sc); OTUS_LOCK(sc); /* Drain any pending TX frames */ otus_drain_mbufq(sc); /* Init MAC */ if ((error = otus_init_mac(sc)) != 0) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not initialize MAC\n", __func__); return error; } otus_set_operating_mode(sc); otus_set_rx_filter(sc); (void) otus_set_operating_mode(sc); sc->bb_reset = 1; /* Force cold reset. */ if ((error = otus_set_chan(sc, ic->ic_curchan, 0)) != 0) { OTUS_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not set channel\n", __func__); return error; } /* Start Rx. */ otus_write(sc, AR_MAC_REG_DMA_TRIGGER, 0x100); (void)otus_write_barrier(sc); sc->sc_running = 1; OTUS_UNLOCK(sc); return 0; } void otus_stop(struct otus_softc *sc) { #if 0 int s; #endif OTUS_UNLOCK_ASSERT(sc); OTUS_LOCK(sc); sc->sc_running = 0; sc->sc_tx_timer = 0; OTUS_UNLOCK(sc); taskqueue_drain_timeout(taskqueue_thread, &sc->scan_to); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_to); taskqueue_drain(taskqueue_thread, &sc->tx_task); OTUS_LOCK(sc); sc->sc_running = 0; /* Stop Rx. */ otus_write(sc, AR_MAC_REG_DMA_TRIGGER, 0); (void)otus_write_barrier(sc); /* Drain any pending TX frames */ otus_drain_mbufq(sc); OTUS_UNLOCK(sc); } diff --git a/sys/dev/rtwn/rtl8812a/r12a_caps.c b/sys/dev/rtwn/rtl8812a/r12a_caps.c index ab175b41ec70..dfe18deb504f 100644 --- a/sys/dev/rtwn/rtl8812a/r12a_caps.c +++ b/sys/dev/rtwn/rtl8812a/r12a_caps.c @@ -1,119 +1,119 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int r12a_ioctl_net(struct ieee80211com *ic, u_long cmd, void *data) { struct rtwn_softc *sc = ic->ic_softc; struct r12a_softc *rs = sc->sc_priv; struct ifreq *ifr = (struct ifreq *)data; int error; error = 0; switch (cmd) { case SIOCSIFCAP: { struct ieee80211vap *vap; int changed, rxmask; rxmask = ifr->ifr_reqcap & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); RTWN_LOCK(sc); changed = 0; if (!(rs->rs_flags & R12A_RXCKSUM_EN) ^ !(ifr->ifr_reqcap & IFCAP_RXCSUM)) { rs->rs_flags ^= R12A_RXCKSUM_EN; changed = 1; } if (!(rs->rs_flags & R12A_RXCKSUM6_EN) ^ !(ifr->ifr_reqcap & IFCAP_RXCSUM_IPV6)) { rs->rs_flags ^= R12A_RXCKSUM6_EN; changed = 1; } if (changed) { if (rxmask == 0) sc->rcr &= ~R12A_RCR_TCP_OFFLD_EN; else sc->rcr |= R12A_RCR_TCP_OFFLD_EN; if (sc->sc_flags & RTWN_RUNNING) rtwn_rxfilter_set(sc); } RTWN_UNLOCK(sc); IEEE80211_LOCK(ic); /* XXX */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { - struct ifnet *ifp = vap->iv_ifp; + if_t ifp = vap->iv_ifp; - ifp->if_capenable &= - ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); - ifp->if_capenable |= rxmask; + if_setcapenablebit(ifp, 0, + IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); + if_setcapenablebit(ifp, rxmask, 0); } IEEE80211_UNLOCK(ic); break; } default: error = ENOTTY; /* for net80211 */ break; } return (error); } diff --git a/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c b/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c index dd5223ce969b..abb786132fb7 100644 --- a/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c +++ b/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c @@ -1,297 +1,297 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void r12au_attach(struct rtwn_usb_softc *); static void r12au_postattach(struct rtwn_softc *sc) { struct rtwn_usb_softc *uc = RTWN_USB_SOFTC(sc); struct r12a_softc *rs = sc->sc_priv; if (usbd_get_speed(uc->uc_udev) == USB_SPEED_SUPER) { rs->ac_usb_dma_size = 0x07; rs->ac_usb_dma_time = 0x1a; } else { rs->ac_usb_dma_size = 0x01; rs->ac_usb_dma_time = 0x10; } if (rs->chip & R12A_CHIP_C_CUT) sc->sc_rf_read = r12a_c_cut_rf_read; else sc->sc_rf_read = r12a_rf_read; if (rs->board_type == R92C_BOARD_TYPE_MINICARD || rs->board_type == R92C_BOARD_TYPE_SOLO || rs->board_type == R92C_BOARD_TYPE_COMBO) sc->sc_set_led = r88e_set_led; else sc->sc_set_led = r12a_set_led; if (!(rs->ext_pa_2g || rs->ext_lna_2g || rs->ext_pa_5g || rs->ext_lna_5g)) sc->mac_prog = &rtl8812au_mac_no_ext_pa_lna[0]; sc->sc_ic.ic_ioctl = r12a_ioctl_net; } void r12a_vap_preattach(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct r12a_softc *rs = sc->sc_priv; - struct ifnet *ifp = vap->iv_ifp; + if_t ifp = vap->iv_ifp; - ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; + if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); RTWN_LOCK(sc); if (rs->rs_flags & R12A_RXCKSUM_EN) - ifp->if_capenable |= IFCAP_RXCSUM; + if_setcapenablebit(ifp, IFCAP_RXCSUM, 0); if (rs->rs_flags & R12A_RXCKSUM6_EN) - ifp->if_capenable |= IFCAP_RXCSUM_IPV6; + if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0); RTWN_UNLOCK(sc); } static void r12a_attach_private(struct rtwn_softc *sc) { struct r12a_softc *rs; rs = malloc(sizeof(struct r12a_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_flags = R12A_RXCKSUM_EN | R12A_RXCKSUM6_EN; rs->rs_fix_spur = r12a_fix_spur; rs->rs_set_band_2ghz = r12a_set_band_2ghz; rs->rs_set_band_5ghz = r12a_set_band_5ghz; rs->rs_init_burstlen = r12au_init_burstlen; rs->rs_init_ampdu_fwhw = r12au_init_ampdu_fwhw; rs->rs_crystalcap_write = r12a_crystalcap_write; #ifndef RTWN_WITHOUT_UCODE rs->rs_iq_calib_fw_supported = r12a_iq_calib_fw_supported; #endif rs->rs_iq_calib_sw = r12a_iq_calib_sw; rs->ampdu_max_time = 0x70; sc->sc_priv = rs; } void r12a_detach_private(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; free(rs, M_RTWN_PRIV); } static void r12a_read_chipid_vendor(struct rtwn_softc *sc, uint32_t reg_sys_cfg) { struct r12a_softc *rs = sc->sc_priv; if (MS(reg_sys_cfg, R92C_SYS_CFG_CHIP_VER_RTL) == 1) rs->chip |= R12A_CHIP_C_CUT; } static void r12au_adj_devcaps(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; struct ieee80211com *ic = &sc->sc_ic; if (rs->chip & R12A_CHIP_C_CUT) { ic->ic_htcaps |= IEEE80211_HTCAP_LDPC | IEEE80211_HTC_TXLDPC; } /* TODO: STBC, VHT etc */ } void r12au_attach(struct rtwn_usb_softc *uc) { struct rtwn_softc *sc = &uc->uc_sc; /* USB part. */ uc->uc_align_rx = r12au_align_rx; uc->tx_agg_desc_num = 1; /* Common part. */ sc->sc_flags = RTWN_FLAG_EXT_HDR; sc->sc_set_chan = r12a_set_chan; sc->sc_fill_tx_desc = r12a_fill_tx_desc; sc->sc_fill_tx_desc_raw = r12a_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r12a_fill_tx_desc_null; sc->sc_dump_tx_desc = r12au_dump_tx_desc; sc->sc_tx_radiotap_flags = r12a_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r12a_rx_radiotap_flags; sc->sc_get_rx_stats = r12a_get_rx_stats; sc->sc_get_rssi_cck = r88e_get_rssi_cck; sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm; sc->sc_classify_intr = r12au_classify_intr; sc->sc_handle_tx_report = r12a_ratectl_tx_complete; sc->sc_handle_c2h_report = r12a_handle_c2h_report; sc->sc_check_frame = r12a_check_frame_checksum; sc->sc_rf_write = r12a_rf_write; sc->sc_check_condition = r12a_check_condition; sc->sc_efuse_postread = rtwn_nop_softc; sc->sc_parse_rom = r12a_parse_rom; sc->sc_power_on = r12a_power_on; sc->sc_power_off = r12a_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r12a_fw_reset; sc->sc_fw_download_enable = r12a_fw_download_enable; #endif sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = r12a_set_page_size; sc->sc_lc_calib = r12a_lc_calib; sc->sc_iq_calib = r12a_iq_calib; sc->sc_read_chipid_vendor = r12a_read_chipid_vendor; sc->sc_adj_devcaps = r12au_adj_devcaps; sc->sc_vap_preattach = r12a_vap_preattach; sc->sc_postattach = r12au_postattach; sc->sc_detach_private = r12a_detach_private; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_media_status = r12a_set_media_status; sc->sc_set_rsvd_page = r88e_set_rsvd_page; sc->sc_set_pwrmode = r12a_set_pwrmode; sc->sc_set_rssi = rtwn_nop_softc; /* XXX TODO */ #else sc->sc_set_media_status = rtwn_nop_softc_int; #endif sc->sc_beacon_init = r12a_beacon_init; sc->sc_beacon_enable = r92c_beacon_enable; sc->sc_beacon_set_rate = r12a_beacon_set_rate; sc->sc_beacon_select = rtwn_nop_softc_int; sc->sc_temp_measure = r88e_temp_measure; sc->sc_temp_read = r88e_temp_read; sc->sc_init_tx_agg = r92cu_init_tx_agg; sc->sc_init_rx_agg = r12au_init_rx_agg; sc->sc_init_ampdu = r12au_init_ampdu; sc->sc_init_intr = r12a_init_intr; sc->sc_init_edca = r12a_init_edca; sc->sc_init_bb = r12a_init_bb; sc->sc_init_rf = r12a_init_rf; sc->sc_init_antsel = r12a_init_antsel; sc->sc_post_init = r12au_post_init; sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; sc->chan_list_5ghz[0] = r12a_chan_5ghz_0; sc->chan_list_5ghz[1] = r12a_chan_5ghz_1; sc->chan_list_5ghz[2] = r12a_chan_5ghz_2; sc->chan_num_5ghz[0] = nitems(r12a_chan_5ghz_0); sc->chan_num_5ghz[1] = nitems(r12a_chan_5ghz_1); sc->chan_num_5ghz[2] = nitems(r12a_chan_5ghz_2); sc->mac_prog = &rtl8812au_mac[0]; sc->mac_size = nitems(rtl8812au_mac); sc->bb_prog = &rtl8812au_bb[0]; sc->bb_size = nitems(rtl8812au_bb); sc->agc_prog = &rtl8812au_agc[0]; sc->agc_size = nitems(rtl8812au_agc); sc->rf_prog = &rtl8812au_rf[0]; sc->name = "RTL8812AU"; sc->fwname = "rtwn-rtl8812aufw"; sc->fwsig = 0x950; sc->page_count = R12A_TX_PAGE_COUNT; sc->pktbuf_count = R12A_TXPKTBUF_COUNT; sc->ackto = 0x80; sc->npubqpages = R12A_PUBQ_NPAGES; sc->page_size = R12A_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r12au_tx_desc); sc->efuse_maxlen = R12A_EFUSE_MAX_LEN; sc->efuse_maplen = R12A_EFUSE_MAP_LEN; sc->rx_dma_size = R12A_RX_DMA_BUFFER_SIZE; sc->macid_limit = R12A_MACID_MAX + 1; sc->cam_entry_limit = R12A_CAM_ENTRY_COUNT; sc->fwsize_limit = R12A_MAX_FW_SIZE; sc->temp_delta = R88E_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; sc->bcn_status_reg[1] = R92C_TDECTRL; sc->rcr = R12A_RCR_DIS_CHK_14 | R12A_RCR_VHT_ACK | R12A_RCR_TCP_OFFLD_EN; sc->ntxchains = 2; sc->nrxchains = 2; r12a_attach_private(sc); }