diff --git a/sys/arm/broadcom/bcm2835/bcm2835_bsc.c b/sys/arm/broadcom/bcm2835/bcm2835_bsc.c index c7de4df0e8fb..a4cbef7d528f 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_bsc.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_bsc.c @@ -1,724 +1,722 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Tsubai Masanari. * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2013 Luiz Otavio O Souza * Copyright (c) 2017 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include /* * Driver for bcm2835 i2c-compatible two-wire bus, named 'BSC' on this SoC. * * This controller can only perform complete transfers, it does not provide * low-level control over sending start/repeat-start/stop sequences on the bus. * In addition, bugs in the silicon make it somewhat difficult to perform a * repeat-start, and limit the repeat-start to a read following a write on * the same slave device. (The i2c protocol allows a repeat start to change * direction or not, and change slave address or not at any time.) * * The repeat-start bug and workaround are described in a problem report at * https://github.com/raspberrypi/linux/issues/254 with the crucial part being * in a comment block from a fragment of a GPU i2c driver, containing this: * * ----------------------------------------------------------------------------- * - See i2c.v: The I2C peripheral samples the values for rw_bit and xfer_count * - in the IDLE state if start is set. * - * - We want to generate a ReSTART not a STOP at the end of the TX phase. In * - order to do that we must ensure the state machine goes RACK1 -> RACK2 -> * - SRSTRT1 (not RACK1 -> RACK2 -> SSTOP1). * - * - So, in the RACK2 state when (TX) xfer_count==0 we must therefore have * - already set, ready to be sampled: * - READ ; rw_bit <= I2CC bit 0 -- must be "read" * - ST; start <= I2CC bit 7 -- must be "Go" in order to not issue STOP * - DLEN; xfer_count <= I2CDLEN -- must be equal to our read amount * - * - The plan to do this is: * - 1. Start the sub-address write, but don't let it finish * - (keep xfer_count > 0) * - 2. Populate READ, DLEN and ST in preparation for ReSTART read sequence * - 3. Let TX finish (write the rest of the data) * - 4. Read back data as it arrives * ----------------------------------------------------------------------------- * * The transfer function below scans the list of messages passed to it, looking * for a read following a write to the same slave. When it finds that, it * starts the write without prefilling the tx fifo, which holds xfer_count>0, * then presets the direction, length, and start command for the following read, * as described above. Then the tx fifo is filled and the rest of the transfer * proceeds as normal, with the controller automatically supplying a * repeat-start on the bus when the write operation finishes. * * XXX I suspect the controller may be able to do a repeat-start on any * write->read or write->write transition, even when the slave addresses differ. * It's unclear whether the slave address can be prestaged along with the * direction and length while the write xfer_count is being held at zero. In * fact, if it can't do this, then it couldn't be used to read EDID data. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" static struct ofw_compat_data compat_data[] = { {"broadcom,bcm2835-bsc", 1}, {"brcm,bcm2708-i2c", 1}, {"brcm,bcm2835-i2c", 1}, {NULL, 0} }; #define DEVICE_DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->sc_debug) \ device_printf((sc)->sc_dev, fmt, ##args) #define DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->sc_debug) \ printf(fmt, ##args) static void bcm_bsc_intr(void *); static int bcm_bsc_detach(device_t); static void bcm_bsc_modifyreg(struct bcm_bsc_softc *sc, uint32_t off, uint32_t mask, uint32_t value) { uint32_t reg; mtx_assert(&sc->sc_mtx, MA_OWNED); reg = BCM_BSC_READ(sc, off); reg &= ~mask; reg |= value; BCM_BSC_WRITE(sc, off, reg); } static int bcm_bsc_clock_proc(SYSCTL_HANDLER_ARGS) { struct bcm_bsc_softc *sc; uint32_t clk; sc = (struct bcm_bsc_softc *)arg1; BCM_BSC_LOCK(sc); clk = BCM_BSC_READ(sc, BCM_BSC_CLOCK); BCM_BSC_UNLOCK(sc); clk &= 0xffff; if (clk == 0) clk = 32768; clk = BCM_BSC_CORE_CLK / clk; return (sysctl_handle_int(oidp, &clk, 0, req)); } static int bcm_bsc_clkt_proc(SYSCTL_HANDLER_ARGS) { struct bcm_bsc_softc *sc; uint32_t clkt; int error; sc = (struct bcm_bsc_softc *)arg1; BCM_BSC_LOCK(sc); clkt = BCM_BSC_READ(sc, BCM_BSC_CLKT); BCM_BSC_UNLOCK(sc); clkt &= 0xffff; error = sysctl_handle_int(oidp, &clkt, sizeof(clkt), req); if (error != 0 || req->newptr == NULL) return (error); BCM_BSC_LOCK(sc); BCM_BSC_WRITE(sc, BCM_BSC_CLKT, clkt & 0xffff); BCM_BSC_UNLOCK(sc); return (0); } static int bcm_bsc_fall_proc(SYSCTL_HANDLER_ARGS) { struct bcm_bsc_softc *sc; uint32_t clk, reg; int error; sc = (struct bcm_bsc_softc *)arg1; BCM_BSC_LOCK(sc); reg = BCM_BSC_READ(sc, BCM_BSC_DELAY); BCM_BSC_UNLOCK(sc); reg >>= 16; error = sysctl_handle_int(oidp, ®, sizeof(reg), req); if (error != 0 || req->newptr == NULL) return (error); BCM_BSC_LOCK(sc); clk = BCM_BSC_READ(sc, BCM_BSC_CLOCK); clk = BCM_BSC_CORE_CLK / clk; if (reg > clk / 2) reg = clk / 2 - 1; bcm_bsc_modifyreg(sc, BCM_BSC_DELAY, 0xffff0000, reg << 16); BCM_BSC_UNLOCK(sc); return (0); } static int bcm_bsc_rise_proc(SYSCTL_HANDLER_ARGS) { struct bcm_bsc_softc *sc; uint32_t clk, reg; int error; sc = (struct bcm_bsc_softc *)arg1; BCM_BSC_LOCK(sc); reg = BCM_BSC_READ(sc, BCM_BSC_DELAY); BCM_BSC_UNLOCK(sc); reg &= 0xffff; error = sysctl_handle_int(oidp, ®, sizeof(reg), req); if (error != 0 || req->newptr == NULL) return (error); BCM_BSC_LOCK(sc); clk = BCM_BSC_READ(sc, BCM_BSC_CLOCK); clk = BCM_BSC_CORE_CLK / clk; if (reg > clk / 2) reg = clk / 2 - 1; bcm_bsc_modifyreg(sc, BCM_BSC_DELAY, 0xffff, reg); BCM_BSC_UNLOCK(sc); return (0); } static void bcm_bsc_sysctl_init(struct bcm_bsc_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree_node; struct sysctl_oid_list *tree; /* * Add system sysctl tree/handlers. */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree_node = device_get_sysctl_tree(sc->sc_dev); tree = SYSCTL_CHILDREN(tree_node); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "frequency", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, sc, sizeof(*sc), bcm_bsc_clock_proc, "IU", "I2C BUS clock frequency"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "clock_stretch", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, sc, sizeof(*sc), bcm_bsc_clkt_proc, "IU", "I2C BUS clock stretch timeout"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "fall_edge_delay", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, sc, sizeof(*sc), bcm_bsc_fall_proc, "IU", "I2C BUS falling edge delay"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "rise_edge_delay", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, sc, sizeof(*sc), bcm_bsc_rise_proc, "IU", "I2C BUS rising edge delay"); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0, "Enable debug; 1=reads/writes, 2=add starts/stops"); } static void bcm_bsc_reset(struct bcm_bsc_softc *sc) { /* Enable the BSC Controller, disable interrupts. */ BCM_BSC_WRITE(sc, BCM_BSC_CTRL, BCM_BSC_CTRL_I2CEN); /* Clear pending interrupts. */ BCM_BSC_WRITE(sc, BCM_BSC_STATUS, BCM_BSC_STATUS_CLKT | BCM_BSC_STATUS_ERR | BCM_BSC_STATUS_DONE); /* Clear the FIFO. */ bcm_bsc_modifyreg(sc, BCM_BSC_CTRL, BCM_BSC_CTRL_CLEAR0, BCM_BSC_CTRL_CLEAR0); } static int bcm_bsc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "BCM2708/2835 BSC controller"); return (BUS_PROBE_DEFAULT); } static int bcm_bsc_attach(device_t dev) { struct bcm_bsc_softc *sc; int rid; sc = device_get_softc(dev); sc->sc_dev = dev; rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Hook up our interrupt handler. */ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_bsc_intr, sc, &sc->sc_intrhand)) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot setup the interrupt handler\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "bcm_bsc", NULL, MTX_DEF); bcm_bsc_sysctl_init(sc); /* Enable the BSC controller. Flush the FIFO. */ BCM_BSC_LOCK(sc); bcm_bsc_reset(sc); BCM_BSC_UNLOCK(sc); sc->sc_iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->sc_iicbus == NULL) { bcm_bsc_detach(dev); return (ENXIO); } /* Probe and attach the iicbus when interrupts are available. */ bus_delayed_attach_children(dev); return (0); } static int bcm_bsc_detach(device_t dev) { struct bcm_bsc_softc *sc; bus_generic_detach(dev); sc = device_get_softc(dev); - if (sc->sc_iicbus != NULL) - device_delete_child(dev, sc->sc_iicbus); mtx_destroy(&sc->sc_mtx); if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (0); } static void bcm_bsc_empty_rx_fifo(struct bcm_bsc_softc *sc) { uint32_t status; /* Assumes sc_totlen > 0 and BCM_BSC_STATUS_RXD is asserted on entry. */ do { if (sc->sc_resid == 0) { sc->sc_data = sc->sc_curmsg->buf; sc->sc_dlen = sc->sc_curmsg->len; sc->sc_resid = sc->sc_dlen; ++sc->sc_curmsg; } do { *sc->sc_data = BCM_BSC_READ(sc, BCM_BSC_DATA); DEBUGF(sc, 1, "0x%02x ", *sc->sc_data); ++sc->sc_data; --sc->sc_resid; --sc->sc_totlen; status = BCM_BSC_READ(sc, BCM_BSC_STATUS); } while (sc->sc_resid > 0 && (status & BCM_BSC_STATUS_RXD)); } while (sc->sc_totlen > 0 && (status & BCM_BSC_STATUS_RXD)); } static void bcm_bsc_fill_tx_fifo(struct bcm_bsc_softc *sc) { uint32_t status; /* Assumes sc_totlen > 0 and BCM_BSC_STATUS_TXD is asserted on entry. */ do { if (sc->sc_resid == 0) { sc->sc_data = sc->sc_curmsg->buf; sc->sc_dlen = sc->sc_curmsg->len; sc->sc_resid = sc->sc_dlen; ++sc->sc_curmsg; } do { BCM_BSC_WRITE(sc, BCM_BSC_DATA, *sc->sc_data); DEBUGF(sc, 1, "0x%02x ", *sc->sc_data); ++sc->sc_data; --sc->sc_resid; --sc->sc_totlen; status = BCM_BSC_READ(sc, BCM_BSC_STATUS); } while (sc->sc_resid > 0 && (status & BCM_BSC_STATUS_TXD)); /* * If a repeat-start was pending and we just hit the end of a tx * buffer, see if it's also the end of the writes that preceeded * the repeat-start. If so, log the repeat-start and the start * of the following read, and return because we're not writing * anymore (and TXD will be true because there's room to write * in the fifo). */ if (sc->sc_replen > 0 && sc->sc_resid == 0) { sc->sc_replen -= sc->sc_dlen; if (sc->sc_replen == 0) { DEBUGF(sc, 1, " err=0\n"); DEVICE_DEBUGF(sc, 2, "rstart 0x%02x\n", sc->sc_curmsg->slave | 0x01); DEVICE_DEBUGF(sc, 1, "read 0x%02x len %d: ", sc->sc_curmsg->slave | 0x01, sc->sc_totlen); sc->sc_flags |= BCM_I2C_READ; return; } } } while (sc->sc_totlen > 0 && (status & BCM_BSC_STATUS_TXD)); } static void bcm_bsc_intr(void *arg) { struct bcm_bsc_softc *sc; uint32_t status; sc = (struct bcm_bsc_softc *)arg; BCM_BSC_LOCK(sc); /* The I2C interrupt is shared among all the BSC controllers. */ if ((sc->sc_flags & BCM_I2C_BUSY) == 0) { BCM_BSC_UNLOCK(sc); return; } status = BCM_BSC_READ(sc, BCM_BSC_STATUS); DEBUGF(sc, 4, " ", status); /* RXD and DONE can assert together, empty fifo before checking done. */ if ((sc->sc_flags & BCM_I2C_READ) && (status & BCM_BSC_STATUS_RXD)) bcm_bsc_empty_rx_fifo(sc); /* Check for completion. */ if (status & (BCM_BSC_STATUS_ERRBITS | BCM_BSC_STATUS_DONE)) { sc->sc_flags |= BCM_I2C_DONE; if (status & BCM_BSC_STATUS_ERRBITS) sc->sc_flags |= BCM_I2C_ERROR; /* Disable interrupts. */ bcm_bsc_reset(sc); wakeup(sc); } else if (!(sc->sc_flags & BCM_I2C_READ)) { /* * Don't check for TXD until after determining whether the * transfer is complete; TXD will be asserted along with ERR or * DONE if there is room in the fifo. */ if ((status & BCM_BSC_STATUS_TXD) && sc->sc_totlen > 0) bcm_bsc_fill_tx_fifo(sc); } BCM_BSC_UNLOCK(sc); } static int bcm_bsc_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct bcm_bsc_softc *sc; struct iic_msg *endmsgs, *nxtmsg; uint32_t readctl, status; int err; uint16_t curlen; uint8_t curisread, curslave, nxtisread, nxtslave; sc = device_get_softc(dev); BCM_BSC_LOCK(sc); /* If the controller is busy wait until it is available. */ while (sc->sc_flags & BCM_I2C_BUSY) mtx_sleep(dev, &sc->sc_mtx, 0, "bscbusw", 0); /* Now we have control over the BSC controller. */ sc->sc_flags = BCM_I2C_BUSY; DEVICE_DEBUGF(sc, 3, "Transfer %d msgs\n", nmsgs); /* Clear the FIFO and the pending interrupts. */ bcm_bsc_reset(sc); /* * Perform all the transfers requested in the array of msgs. Note that * it is bcm_bsc_empty_rx_fifo() and bcm_bsc_fill_tx_fifo() that advance * sc->sc_curmsg through the array of messages, as the data from each * message is fully consumed, but it is this loop that notices when we * have no more messages to process. */ err = 0; sc->sc_resid = 0; sc->sc_curmsg = msgs; endmsgs = &msgs[nmsgs]; while (sc->sc_curmsg < endmsgs) { readctl = 0; curslave = sc->sc_curmsg->slave >> 1; curisread = sc->sc_curmsg->flags & IIC_M_RD; sc->sc_replen = 0; sc->sc_totlen = sc->sc_curmsg->len; /* * Scan for scatter/gather IO (same slave and direction) or * repeat-start (read following write for the same slave). */ for (nxtmsg = sc->sc_curmsg + 1; nxtmsg < endmsgs; ++nxtmsg) { nxtslave = nxtmsg->slave >> 1; if (curslave == nxtslave) { nxtisread = nxtmsg->flags & IIC_M_RD; if (curisread == nxtisread) { /* * Same slave and direction, this * message will be part of the same * transfer as the previous one. */ sc->sc_totlen += nxtmsg->len; continue; } else if (curisread == IIC_M_WR) { /* * Read after write to same slave means * repeat-start, remember how many bytes * come before the repeat-start, switch * the direction to IIC_M_RD, and gather * up following reads to the same slave. */ curisread = IIC_M_RD; sc->sc_replen = sc->sc_totlen; sc->sc_totlen += nxtmsg->len; continue; } } break; } /* * curslave and curisread temporaries from above may refer to * the after-repstart msg, reset them to reflect sc_curmsg. */ curisread = (sc->sc_curmsg->flags & IIC_M_RD) ? 1 : 0; curslave = sc->sc_curmsg->slave | curisread; /* Write the slave address. */ BCM_BSC_WRITE(sc, BCM_BSC_SLAVE, curslave >> 1); DEVICE_DEBUGF(sc, 2, "start 0x%02x\n", curslave); /* * Either set up read length and direction variables for a * simple transfer or get the hardware started on the first * piece of a transfer that involves a repeat-start and set up * the read length and direction vars for the second piece. */ if (sc->sc_replen == 0) { DEVICE_DEBUGF(sc, 1, "%-6s 0x%02x len %d: ", (curisread) ? "read" : "write", curslave, sc->sc_totlen); curlen = sc->sc_totlen; if (curisread) { readctl = BCM_BSC_CTRL_READ; sc->sc_flags |= BCM_I2C_READ; } else { readctl = 0; sc->sc_flags &= ~BCM_I2C_READ; } } else { DEVICE_DEBUGF(sc, 1, "%-6s 0x%02x len %d: ", (curisread) ? "read" : "write", curslave, sc->sc_replen); /* * Start the write transfer with an empty fifo and wait * for the 'transfer active' status bit to light up; * that indicates that the hardware has latched the * direction and length for the write, and we can safely * reload those registers and issue the start for the * following read; interrupts are not enabled here. */ BCM_BSC_WRITE(sc, BCM_BSC_DLEN, sc->sc_replen); BCM_BSC_WRITE(sc, BCM_BSC_CTRL, BCM_BSC_CTRL_I2CEN | BCM_BSC_CTRL_ST); do { status = BCM_BSC_READ(sc, BCM_BSC_STATUS); if (status & BCM_BSC_STATUS_ERR) { /* no ACK on slave addr */ err = EIO; goto xfer_done; } } while ((status & BCM_BSC_STATUS_TA) == 0); /* * Set curlen and readctl for the repeat-start read that * we need to set up below, but set sc_flags to write, * because that is the operation in progress right now. */ curlen = sc->sc_totlen - sc->sc_replen; readctl = BCM_BSC_CTRL_READ; sc->sc_flags &= ~BCM_I2C_READ; } /* * Start the transfer with interrupts enabled, then if doing a * write, fill the tx fifo. Not prefilling the fifo until after * this start command is the key workaround for making * repeat-start work, and it's harmless to do it in this order * for a regular write too. */ BCM_BSC_WRITE(sc, BCM_BSC_DLEN, curlen); BCM_BSC_WRITE(sc, BCM_BSC_CTRL, readctl | BCM_BSC_CTRL_I2CEN | BCM_BSC_CTRL_ST | BCM_BSC_CTRL_INT_ALL); if (!(sc->sc_curmsg->flags & IIC_M_RD)) { bcm_bsc_fill_tx_fifo(sc); } /* Wait for the transaction to complete. */ while (err == 0 && !(sc->sc_flags & BCM_I2C_DONE)) { err = mtx_sleep(sc, &sc->sc_mtx, 0, "bsciow", hz); } /* Check for errors. */ if (err == 0 && (sc->sc_flags & BCM_I2C_ERROR)) err = EIO; xfer_done: DEBUGF(sc, 1, " err=%d\n", err); DEVICE_DEBUGF(sc, 2, "stop\n"); if (err != 0) break; } /* Disable interrupts, clean fifo, etc. */ bcm_bsc_reset(sc); /* Clean the controller flags. */ sc->sc_flags = 0; /* Wake up the threads waiting for bus. */ wakeup(dev); BCM_BSC_UNLOCK(sc); return (err); } static int bcm_bsc_iicbus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct bcm_bsc_softc *sc; uint32_t busfreq; sc = device_get_softc(dev); BCM_BSC_LOCK(sc); bcm_bsc_reset(sc); if (sc->sc_iicbus == NULL) busfreq = 100000; else busfreq = IICBUS_GET_FREQUENCY(sc->sc_iicbus, speed); BCM_BSC_WRITE(sc, BCM_BSC_CLOCK, BCM_BSC_CORE_CLK / busfreq); BCM_BSC_UNLOCK(sc); return (IIC_ENOADDR); } static phandle_t bcm_bsc_get_node(device_t bus, device_t dev) { /* We only have one child, the I2C bus, which needs our own node. */ return (ofw_bus_get_node(bus)); } static device_method_t bcm_bsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcm_bsc_probe), DEVMETHOD(device_attach, bcm_bsc_attach), DEVMETHOD(device_detach, bcm_bsc_detach), /* iicbus interface */ DEVMETHOD(iicbus_reset, bcm_bsc_iicbus_reset), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_transfer, bcm_bsc_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, bcm_bsc_get_node), DEVMETHOD_END }; static driver_t bcm_bsc_driver = { "iichb", bcm_bsc_methods, sizeof(struct bcm_bsc_softc), }; DRIVER_MODULE(iicbus, bcm2835_bsc, iicbus_driver, 0, 0); DRIVER_MODULE(bcm2835_bsc, simplebus, bcm_bsc_driver, 0, 0); diff --git a/sys/arm/freescale/imx/imx_i2c.c b/sys/arm/freescale/imx/imx_i2c.c index c1d5c8c974a8..3190ec294ab4 100644 --- a/sys/arm/freescale/imx/imx_i2c.c +++ b/sys/arm/freescale/imx/imx_i2c.c @@ -1,729 +1,726 @@ /*- * Copyright (C) 2008-2009 Semihalf, Michal Hajduk * Copyright (c) 2012, 2013 The FreeBSD Foundation * Copyright (c) 2015 Ian Lepore * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * I2C driver for Freescale i.MX hardware. * * Note that the hardware is capable of running as both a master and a slave. * This driver currently implements only master-mode operations. * * This driver supports multi-master i2c buses, by detecting bus arbitration * loss and returning IIC_EBUSBSY status. Notably, it does not do any kind of * retries if some other master jumps onto the bus and interrupts one of our * transfer cycles resulting in arbitration loss in mid-transfer. The caller * must handle retries in a way that makes sense for the slave being addressed. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include #include #include #include #include #if defined(__aarch64__) #define IMX_ENABLE_CLOCKS #endif #ifdef IMX_ENABLE_CLOCKS #include #endif #define I2C_ADDR_REG 0x00 /* I2C slave address register */ #define I2C_FDR_REG 0x04 /* I2C frequency divider register */ #define I2C_CONTROL_REG 0x08 /* I2C control register */ #define I2C_STATUS_REG 0x0C /* I2C status register */ #define I2C_DATA_REG 0x10 /* I2C data register */ #define I2C_DFSRR_REG 0x14 /* I2C Digital Filter Sampling rate */ #define I2CCR_MEN (1 << 7) /* Module enable */ #define I2CCR_MSTA (1 << 5) /* Master/slave mode */ #define I2CCR_MTX (1 << 4) /* Transmit/receive mode */ #define I2CCR_TXAK (1 << 3) /* Transfer acknowledge */ #define I2CCR_RSTA (1 << 2) /* Repeated START */ #define I2CSR_MCF (1 << 7) /* Data transfer */ #define I2CSR_MASS (1 << 6) /* Addressed as a slave */ #define I2CSR_MBB (1 << 5) /* Bus busy */ #define I2CSR_MAL (1 << 4) /* Arbitration lost */ #define I2CSR_SRW (1 << 2) /* Slave read/write */ #define I2CSR_MIF (1 << 1) /* Module interrupt */ #define I2CSR_RXAK (1 << 0) /* Received acknowledge */ #define I2C_BAUD_RATE_FAST 0x31 #define I2C_BAUD_RATE_DEF 0x3F #define I2C_DFSSR_DIV 0x10 /* * A table of available divisors and the associated coded values to put in the * FDR register to achieve that divisor.. There is no algorithmic relationship I * can see between divisors and the codes that go into the register. The table * begins and ends with entries that handle insane configuration values. */ struct clkdiv { u_int divisor; u_int regcode; }; static struct clkdiv clkdiv_table[] = { { 0, 0x20 }, { 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 }, { 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 }, { 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 }, { 56, 0x29 }, { 60, 0x06 }, { 64, 0x2a }, { 72, 0x2b }, { 80, 0x2c }, { 88, 0x09 }, { 96, 0x2d }, { 104, 0x0a }, { 112, 0x2e }, { 128, 0x2f }, { 144, 0x0c }, { 160, 0x30 }, { 192, 0x31 }, { 224, 0x32 }, { 240, 0x0f }, { 256, 0x33 }, { 288, 0x10 }, { 320, 0x34 }, { 384, 0x35 }, { 448, 0x36 }, { 480, 0x13 }, { 512, 0x37 }, { 576, 0x14 }, { 640, 0x38 }, { 768, 0x39 }, { 896, 0x3a }, { 960, 0x17 }, { 1024, 0x3b }, { 1152, 0x18 }, { 1280, 0x3c }, { 1536, 0x3d }, { 1792, 0x3e }, { 1920, 0x1b }, { 2048, 0x3f }, { 2304, 0x1c }, { 2560, 0x1d }, { 3072, 0x1e }, { 3840, 0x1f }, {UINT_MAX, 0x1f} }; static struct ofw_compat_data compat_data[] = { {"fsl,imx21-i2c", 1}, {"fsl,imx6q-i2c", 1}, {"fsl,imx-i2c", 1}, {NULL, 0} }; struct i2c_softc { device_t dev; device_t iicbus; struct resource *res; int rid; sbintime_t byte_time_sbt; int rb_pinctl_idx; gpio_pin_t rb_sclpin; gpio_pin_t rb_sdapin; u_int debug; u_int slave; #ifdef IMX_ENABLE_CLOCKS clk_t ipgclk; #endif }; #define DEVICE_DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->debug) \ device_printf((sc)->dev, fmt, ##args) #define DEBUGF(sc, lvl, fmt, args...) \ if ((lvl) <= (sc)->debug) \ printf(fmt, ##args) static phandle_t i2c_get_node(device_t, device_t); static int i2c_probe(device_t); static int i2c_attach(device_t); static int i2c_detach(device_t); static int i2c_repeated_start(device_t, u_char, int); static int i2c_start(device_t, u_char, int); static int i2c_stop(device_t); static int i2c_reset(device_t, u_char, u_char, u_char *); static int i2c_read(device_t, char *, int, int *, int, int); static int i2c_write(device_t, const char *, int, int *, int); static device_method_t i2c_methods[] = { DEVMETHOD(device_probe, i2c_probe), DEVMETHOD(device_attach, i2c_attach), DEVMETHOD(device_detach, i2c_detach), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, i2c_get_node), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, i2c_repeated_start), DEVMETHOD(iicbus_start, i2c_start), DEVMETHOD(iicbus_stop, i2c_stop), DEVMETHOD(iicbus_reset, i2c_reset), DEVMETHOD(iicbus_read, i2c_read), DEVMETHOD(iicbus_write, i2c_write), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), DEVMETHOD_END }; static driver_t i2c_driver = { "imx_i2c", i2c_methods, sizeof(struct i2c_softc), }; DRIVER_MODULE(imx_i2c, simplebus, i2c_driver, 0, 0); DRIVER_MODULE(ofw_iicbus, imx_i2c, ofw_iicbus_driver, 0, 0); MODULE_DEPEND(imx_i2c, iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); static phandle_t i2c_get_node(device_t bus, device_t dev) { /* * Share controller node with iicbus device */ return ofw_bus_get_node(bus); } static __inline void i2c_write_reg(struct i2c_softc *sc, bus_size_t off, uint8_t val) { bus_write_1(sc->res, off, val); } static __inline uint8_t i2c_read_reg(struct i2c_softc *sc, bus_size_t off) { return (bus_read_1(sc->res, off)); } static __inline void i2c_flag_set(struct i2c_softc *sc, bus_size_t off, uint8_t mask) { uint8_t status; status = i2c_read_reg(sc, off); status |= mask; i2c_write_reg(sc, off, status); } /* Wait for bus to become busy or not-busy. */ static int wait_for_busbusy(struct i2c_softc *sc, int wantbusy) { int retry, srb; retry = 1000; while (retry --) { srb = i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB; if ((srb && wantbusy) || (!srb && !wantbusy)) return (IIC_NOERR); DELAY(1); } return (IIC_ETIMEOUT); } /* Wait for transfer to complete, optionally check RXAK. */ static int wait_for_xfer(struct i2c_softc *sc, int checkack) { int retry, sr; /* * Sleep for about the time it takes to transfer a byte (with precision * set to tolerate 5% oversleep). We calculate the approximate byte * transfer time when we set the bus speed divisor. Slaves are allowed * to do clock-stretching so the actual transfer time can be larger, but * this gets the bulk of the waiting out of the way without tying up the * processor the whole time. */ pause_sbt("imxi2c", sc->byte_time_sbt, sc->byte_time_sbt / 20, 0); retry = 10000; while (retry --) { sr = i2c_read_reg(sc, I2C_STATUS_REG); if (sr & I2CSR_MIF) { if (sr & I2CSR_MAL) return (IIC_EBUSERR); else if (checkack && (sr & I2CSR_RXAK)) return (IIC_ENOACK); else return (IIC_NOERR); } DELAY(1); } return (IIC_ETIMEOUT); } /* * Implement the error handling shown in the state diagram of the imx6 reference * manual. If there was an error, then: * - Clear master mode (MSTA and MTX). * - Wait for the bus to become free or for a timeout to happen. * - Disable the controller. */ static int i2c_error_handler(struct i2c_softc *sc, int error) { if (error != 0) { i2c_write_reg(sc, I2C_STATUS_REG, 0); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); wait_for_busbusy(sc, false); i2c_write_reg(sc, I2C_CONTROL_REG, 0); } return (error); } static int i2c_recover_getsda(void *ctx) { bool active; gpio_pin_is_active(((struct i2c_softc *)ctx)->rb_sdapin, &active); return (active); } static void i2c_recover_setsda(void *ctx, int value) { gpio_pin_set_active(((struct i2c_softc *)ctx)->rb_sdapin, value); } static int i2c_recover_getscl(void *ctx) { bool active; gpio_pin_is_active(((struct i2c_softc *)ctx)->rb_sclpin, &active); return (active); } static void i2c_recover_setscl(void *ctx, int value) { gpio_pin_set_active(((struct i2c_softc *)ctx)->rb_sclpin, value); } static int i2c_recover_bus(struct i2c_softc *sc) { struct iicrb_pin_access pins; int err; /* * If we have gpio pinmux config, reconfigure the pins to gpio mode, * invoke iic_recover_bus which checks for a hung bus and bitbangs a * recovery sequence if necessary, then configure the pins back to i2c * mode (idx 0). */ if (sc->rb_pinctl_idx == 0) return (0); fdt_pinctrl_configure(sc->dev, sc->rb_pinctl_idx); pins.ctx = sc; pins.getsda = i2c_recover_getsda; pins.setsda = i2c_recover_setsda; pins.getscl = i2c_recover_getscl; pins.setscl = i2c_recover_setscl; err = iic_recover_bus(&pins); fdt_pinctrl_configure(sc->dev, 0); return (err); } static int i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Freescale i.MX I2C"); return (BUS_PROBE_DEFAULT); } static int i2c_attach(device_t dev) { char wrkstr[16]; struct i2c_softc *sc; phandle_t node; int err, cfgidx; sc = device_get_softc(dev); sc->dev = dev; sc->rid = 0; #ifdef IMX_ENABLE_CLOCKS if (clk_get_by_ofw_index(sc->dev, 0, 0, &sc->ipgclk) != 0) { device_printf(dev, "could not get ipg clock"); return (ENOENT); } err = clk_enable(sc->ipgclk); if (err != 0) { device_printf(sc->dev, "could not enable ipg clock\n"); return (err); } #endif sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate resources"); return (ENXIO); } sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(dev, "could not add iicbus child"); return (ENXIO); } /* Set up debug-enable sysctl. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->debug, 0, "Enable debug; 1=reads/writes, 2=add starts/stops"); /* * Set up for bus recovery using gpio pins, if the pinctrl and gpio * properties are present. This is optional. If all the config data is * not in place, we just don't do gpio bitbang bus recovery. */ node = ofw_bus_get_node(sc->dev); err = gpio_pin_get_by_ofw_property(dev, node, "scl-gpios", &sc->rb_sclpin); if (err != 0) goto no_recovery; err = gpio_pin_get_by_ofw_property(dev, node, "sda-gpios", &sc->rb_sdapin); if (err != 0) goto no_recovery; /* * Preset the gpio pins to output high (idle bus state). The signal * won't actually appear on the pins until the bus recovery code changes * the pinmux config from i2c to gpio. */ gpio_pin_setflags(sc->rb_sclpin, GPIO_PIN_OUTPUT); gpio_pin_setflags(sc->rb_sdapin, GPIO_PIN_OUTPUT); gpio_pin_set_active(sc->rb_sclpin, true); gpio_pin_set_active(sc->rb_sdapin, true); /* * Obtain the index of pinctrl node for bus recovery using gpio pins, * then confirm that pinctrl properties exist for that index and for the * default pinctrl-0. If sc->rb_pinctl_idx is non-zero, the reset code * will also do a bus recovery, so setting this value must be last. */ err = ofw_bus_find_string_index(node, "pinctrl-names", "gpio", &cfgidx); if (err == 0) { snprintf(wrkstr, sizeof(wrkstr), "pinctrl-%d", cfgidx); if (OF_hasprop(node, "pinctrl-0") && OF_hasprop(node, wrkstr)) sc->rb_pinctl_idx = cfgidx; } no_recovery: /* We don't do a hardware reset here because iicbus_attach() does it. */ /* Probe and attach the iicbus when interrupts are available. */ bus_delayed_attach_children(dev); return (0); } static int i2c_detach(device_t dev) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); #ifdef IMX_ENABLE_CLOCKS error = clk_disable(sc->ipgclk); if (error != 0) { device_printf(sc->dev, "could not disable ipg clock\n"); return (error); } #endif if ((error = bus_generic_detach(sc->dev)) != 0) { device_printf(sc->dev, "cannot detach child devices\n"); return (error); } - if (sc->iicbus != NULL) - device_delete_child(dev, sc->iicbus); - /* Release bus-recover pins; gpio_pin_release() handles NULL args. */ gpio_pin_release(sc->rb_sclpin); gpio_pin_release(sc->rb_sdapin); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); return (0); } static int i2c_repeated_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); if ((i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB) == 0) { return (IIC_EBUSERR); } /* * Set repeated start condition, delay (per reference manual, min 156nS) * before writing slave address, wait for ack after write. */ i2c_flag_set(sc, I2C_CONTROL_REG, I2CCR_RSTA); DELAY(1); i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_DATA_REG, slave); sc->slave = slave; DEVICE_DEBUGF(sc, 2, "rstart 0x%02x\n", sc->slave); error = wait_for_xfer(sc, true); return (i2c_error_handler(sc, error)); } static int i2c_start_ll(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); DELAY(10); /* Delay for controller to sample bus state. */ if (i2c_read_reg(sc, I2C_STATUS_REG) & I2CSR_MBB) { return (i2c_error_handler(sc, IIC_EBUSERR)); } i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_MTX); if ((error = wait_for_busbusy(sc, true)) != IIC_NOERR) return (i2c_error_handler(sc, error)); i2c_write_reg(sc, I2C_STATUS_REG, 0); i2c_write_reg(sc, I2C_DATA_REG, slave); sc->slave = slave; DEVICE_DEBUGF(sc, 2, "start 0x%02x\n", sc->slave); error = wait_for_xfer(sc, true); return (i2c_error_handler(sc, error)); } static int i2c_start(device_t dev, u_char slave, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); /* * Invoke the low-level code to put the bus into master mode and address * the given slave. If that fails, idle the controller and attempt a * bus recovery, and then try again one time. Signaling a start and * addressing the slave is the only operation that a low-level driver * can safely retry without any help from the upper layers that know * more about the slave device. */ if ((error = i2c_start_ll(dev, slave, timeout)) != 0) { i2c_write_reg(sc, I2C_CONTROL_REG, 0x0); if ((error = i2c_recover_bus(sc)) != 0) return (error); error = i2c_start_ll(dev, slave, timeout); } return (error); } static int i2c_stop(device_t dev) { struct i2c_softc *sc; sc = device_get_softc(dev); i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN); wait_for_busbusy(sc, false); i2c_write_reg(sc, I2C_CONTROL_REG, 0); DEVICE_DEBUGF(sc, 2, "stop 0x%02x\n", sc->slave); return (IIC_NOERR); } static int i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldadr) { struct i2c_softc *sc; u_int busfreq, div, i, ipgfreq; #ifdef IMX_ENABLE_CLOCKS int err; uint64_t freq; #endif sc = device_get_softc(dev); DEVICE_DEBUGF(sc, 1, "reset\n"); /* * Look up the divisor that gives the nearest speed that doesn't exceed * the configured value for the bus. */ #ifdef IMX_ENABLE_CLOCKS err = clk_get_freq(sc->ipgclk, &freq); if (err != 0) { device_printf(sc->dev, "cannot get frequency\n"); return (err); } ipgfreq = (int32_t)freq; #else ipgfreq = imx_ccm_ipg_hz(); #endif busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); div = howmany(ipgfreq, busfreq); for (i = 0; i < nitems(clkdiv_table); i++) { if (clkdiv_table[i].divisor >= div) break; } /* * Calculate roughly how long it will take to transfer a byte (which * requires 9 clock cycles) at the new bus speed. This value is used to * pause() while waiting for transfer-complete. With a 66MHz IPG clock * and the actual i2c bus speeds that leads to, for nominal 100KHz and * 400KHz bus speeds the transfer times are roughly 104uS and 22uS. */ busfreq = ipgfreq / clkdiv_table[i].divisor; sc->byte_time_sbt = SBT_1US * (9000000 / busfreq); /* * Disable the controller (do the reset), and set the new clock divisor. */ i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_CONTROL_REG, 0x0); i2c_write_reg(sc, I2C_FDR_REG, (uint8_t)clkdiv_table[i].regcode); /* * Now that the controller is idle, perform bus recovery. If the bus * isn't hung, this a fairly fast no-op. */ return (i2c_recover_bus(sc)); } static int i2c_read(device_t dev, char *buf, int len, int *read, int last, int delay) { struct i2c_softc *sc; int error, reg; sc = device_get_softc(dev); *read = 0; DEVICE_DEBUGF(sc, 1, "read 0x%02x len %d: ", sc->slave, len); if (len) { if (len == 1) i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_TXAK); else i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA); /* Dummy read to prime the receiver. */ i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_read_reg(sc, I2C_DATA_REG); } error = 0; *read = 0; while (*read < len) { if ((error = wait_for_xfer(sc, false)) != IIC_NOERR) break; i2c_write_reg(sc, I2C_STATUS_REG, 0x0); if (last) { if (*read == len - 2) { /* NO ACK on last byte */ i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_MSTA | I2CCR_TXAK); } else if (*read == len - 1) { /* Transfer done, signal stop. */ i2c_write_reg(sc, I2C_CONTROL_REG, I2CCR_MEN | I2CCR_TXAK); wait_for_busbusy(sc, false); } } reg = i2c_read_reg(sc, I2C_DATA_REG); DEBUGF(sc, 1, "0x%02x ", reg); *buf++ = reg; (*read)++; } DEBUGF(sc, 1, "\n"); return (i2c_error_handler(sc, error)); } static int i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout) { struct i2c_softc *sc; int error; sc = device_get_softc(dev); error = 0; *sent = 0; DEVICE_DEBUGF(sc, 1, "write 0x%02x len %d: ", sc->slave, len); while (*sent < len) { DEBUGF(sc, 1, "0x%02x ", *buf); i2c_write_reg(sc, I2C_STATUS_REG, 0x0); i2c_write_reg(sc, I2C_DATA_REG, *buf++); if ((error = wait_for_xfer(sc, true)) != IIC_NOERR) break; (*sent)++; } DEBUGF(sc, 1, "\n"); return (i2c_error_handler(sc, error)); } diff --git a/sys/arm/freescale/imx/imx_spi.c b/sys/arm/freescale/imx/imx_spi.c index 53d9f739b97c..dac42a800e9e 100644 --- a/sys/arm/freescale/imx/imx_spi.c +++ b/sys/arm/freescale/imx/imx_spi.c @@ -1,608 +1,605 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver for imx Enhanced Configurable SPI; master-mode only. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" #define ECSPI_RXDATA 0x00 #define ECSPI_TXDATA 0x04 #define ECSPI_CTLREG 0x08 #define CTLREG_BLEN_SHIFT 20 #define CTLREG_BLEN_MASK 0x0fff #define CTLREG_CSEL_SHIFT 18 #define CTLREG_CSEL_MASK 0x03 #define CTLREG_DRCTL_SHIFT 16 #define CTLREG_DRCTL_MASK 0x03 #define CTLREG_PREDIV_SHIFT 12 #define CTLREG_PREDIV_MASK 0x0f #define CTLREG_POSTDIV_SHIFT 8 #define CTLREG_POSTDIV_MASK 0x0f #define CTLREG_CMODE_SHIFT 4 #define CTLREG_CMODE_MASK 0x0f #define CTLREG_CMODES_MASTER (CTLREG_CMODE_MASK << CTLREG_CMODE_SHIFT) #define CTLREG_SMC (1u << 3) #define CTLREG_XCH (1u << 2) #define CTLREG_HT (1u << 1) #define CTLREG_EN (1u << 0) #define ECSPI_CFGREG 0x0c #define CFGREG_HTLEN_SHIFT 24 #define CFGREG_SCLKCTL_SHIFT 20 #define CFGREG_DATACTL_SHIFT 16 #define CFGREG_SSPOL_SHIFT 12 #define CFGREG_SSCTL_SHIFT 8 #define CFGREG_SCLKPOL_SHIFT 4 #define CFGREG_SCLKPHA_SHIFT 0 #define CFGREG_MASK 0x0f /* all CFGREG fields are 4 bits */ #define ECSPI_INTREG 0x10 #define INTREG_TCEN (1u << 7) #define INTREG_ROEN (1u << 6) #define INTREG_RFEN (1u << 5) #define INTREG_RDREN (1u << 4) #define INTREG_RREN (1u << 3) #define INTREG_TFEN (1u << 2) #define INTREG_TDREN (1u << 1) #define INTREG_TEEN (1u << 0) #define ECSPI_DMAREG 0x14 #define DMA_RX_THRESH_SHIFT 16 #define DMA_RX_THRESH_MASK 0x3f #define DMA_TX_THRESH_SHIFT 0 #define DMA_TX_THRESH_MASK 0x3f #define ECSPI_STATREG 0x18 #define SREG_TC (1u << 7) #define SREG_RO (1u << 6) #define SREG_RF (1u << 5) #define SREG_RDR (1u << 4) #define SREG_RR (1u << 3) #define SREG_TF (1u << 2) #define SREG_TDR (1u << 1) #define SREG_TE (1u << 0) #define ECSPI_PERIODREG 0x1c #define ECSPI_TESTREG 0x20 #define CS_MAX 4 /* Max number of chip selects. */ #define CS_MASK 0x03 /* Mask flag bits out of chipsel. */ #define FIFO_SIZE 64 #define FIFO_RXTHRESH 32 #define FIFO_TXTHRESH 32 struct spi_softc { device_t dev; device_t spibus; struct mtx mtx; struct resource *memres; struct resource *intres; void *inthandle; gpio_pin_t cspins[CS_MAX]; u_int debug; u_int basefreq; uint32_t ctlreg; uint32_t intreg; uint32_t fifocnt; uint8_t *rxbuf; uint32_t rxidx; uint32_t rxlen; uint8_t *txbuf; uint32_t txidx; uint32_t txlen; }; static struct ofw_compat_data compat_data[] = { {"fsl,imx51-ecspi", true}, {"fsl,imx53-ecspi", true}, {"fsl,imx6dl-ecspi", true}, {"fsl,imx6q-ecspi", true}, {"fsl,imx6sx-ecspi", true}, {"fsl,imx6ul-ecspi", true}, {NULL, false} }; static inline uint32_t RD4(struct spi_softc *sc, bus_size_t offset) { return (bus_read_4(sc->memres, offset)); } static inline void WR4(struct spi_softc *sc, bus_size_t offset, uint32_t value) { bus_write_4(sc->memres, offset, value); } static u_int spi_calc_clockdiv(struct spi_softc *sc, u_int busfreq) { u_int post, pre; /* Returning 0 effectively sets both dividers to 1. */ if (sc->basefreq <= busfreq) return (0); /* * Brute-force this; all real-world bus speeds are going to be found on * the 1st or 2nd time through this loop. */ for (post = 0; post < 16; ++post) { pre = ((sc->basefreq >> post) / busfreq) - 1; if (pre < 16) break; } if (post == 16) { /* The lowest we can go is ~115 Hz. */ pre = 15; post = 15; } if (sc->debug >= 2) { device_printf(sc->dev, "base %u bus %u; pre %u, post %u; actual busfreq %u\n", sc->basefreq, busfreq, pre, post, (sc->basefreq / (pre + 1)) / (1 << post)); } return (pre << CTLREG_PREDIV_SHIFT) | (post << CTLREG_POSTDIV_SHIFT); } static void spi_set_chipsel(struct spi_softc *sc, u_int cs, bool active) { bool pinactive; /* * This is kinda crazy... the gpio pins for chipsel are defined as * active-high in the dts, but are supposed to be treated as active-low * by this driver. So to turn on chipsel we have to invert the value * passed to gpio_pin_set_active(). Then, to make it more fun, any * slave can say its chipsel is active-high, so if that option is * on, we have to invert the value again. */ pinactive = !active ^ (bool)(cs & SPIBUS_CS_HIGH); if (sc->debug >= 2) { device_printf(sc->dev, "chipsel %u changed to %u\n", (cs & ~SPIBUS_CS_HIGH), pinactive); } /* * Change the pin, then do a dummy read of its current state to ensure * that the state change reaches the hardware before proceeding. */ gpio_pin_set_active(sc->cspins[cs & ~SPIBUS_CS_HIGH], pinactive); gpio_pin_is_active(sc->cspins[cs & ~SPIBUS_CS_HIGH], &pinactive); } static void spi_hw_setup(struct spi_softc *sc, u_int cs, u_int mode, u_int freq) { uint32_t reg; /* * Set up control register, and write it first to bring the device out * of reset. */ sc->ctlreg = CTLREG_EN | CTLREG_CMODES_MASTER | CTLREG_SMC; sc->ctlreg |= spi_calc_clockdiv(sc, freq); sc->ctlreg |= 7 << CTLREG_BLEN_SHIFT; /* XXX byte at a time */ WR4(sc, ECSPI_CTLREG, sc->ctlreg); /* * Set up the config register. Note that we do all transfers with the * SPI hardware's chip-select set to zero. The actual chip select is * handled with a gpio pin. */ reg = 0; if (cs & SPIBUS_CS_HIGH) reg |= 1u << CFGREG_SSPOL_SHIFT; if (mode & SPIBUS_MODE_CPHA) reg |= 1u << CFGREG_SCLKPHA_SHIFT; if (mode & SPIBUS_MODE_CPOL) { reg |= 1u << CFGREG_SCLKPOL_SHIFT; reg |= 1u << CFGREG_SCLKCTL_SHIFT; } WR4(sc, ECSPI_CFGREG, reg); /* * Set up the rx/tx FIFO interrupt thresholds. */ reg = (FIFO_RXTHRESH << DMA_RX_THRESH_SHIFT); reg |= (FIFO_TXTHRESH << DMA_TX_THRESH_SHIFT); WR4(sc, ECSPI_DMAREG, reg); /* * Do a dummy read, to make sure the preceding writes reach the spi * hardware before we assert any gpio chip select. */ (void)RD4(sc, ECSPI_CFGREG); } static void spi_empty_rxfifo(struct spi_softc *sc) { while (sc->rxidx < sc->rxlen && (RD4(sc, ECSPI_STATREG) & SREG_RR)) { sc->rxbuf[sc->rxidx++] = (uint8_t)RD4(sc, ECSPI_RXDATA); --sc->fifocnt; } } static void spi_fill_txfifo(struct spi_softc *sc) { while (sc->txidx < sc->txlen && sc->fifocnt < FIFO_SIZE) { WR4(sc, ECSPI_TXDATA, sc->txbuf[sc->txidx++]); ++sc->fifocnt; } /* * If we're out of data, disable tx data ready (threshold) interrupts, * and enable tx fifo empty interrupts. */ if (sc->txidx == sc->txlen) sc->intreg = (sc->intreg & ~INTREG_TDREN) | INTREG_TEEN; } static void spi_intr(void *arg) { struct spi_softc *sc = arg; uint32_t intreg, status; mtx_lock(&sc->mtx); sc = arg; intreg = sc->intreg; status = RD4(sc, ECSPI_STATREG); WR4(sc, ECSPI_STATREG, status); /* Clear w1c bits. */ /* * If we get an overflow error, just signal that the transfer is done * and wakeup the waiting thread, which will see that txidx != txlen and * return an IO error to the caller. */ if (__predict_false(status & SREG_RO)) { if (sc->debug || bootverbose) { device_printf(sc->dev, "rxoverflow rxidx %u txidx %u\n", sc->rxidx, sc->txidx); } sc->intreg = 0; wakeup(sc); mtx_unlock(&sc->mtx); return; } if (status & SREG_RR) spi_empty_rxfifo(sc); if (status & SREG_TDR) spi_fill_txfifo(sc); /* * If we're out of bytes to send... * - If Transfer Complete is set (shift register is empty) and we've * received everything we expect, we're all done. * - Else if Tx Fifo Empty is set, we need to stop waiting for that and * switch to waiting for Transfer Complete (wait for shift register * to empty out), and also for Receive Ready (last of incoming data). */ if (sc->txidx == sc->txlen) { if ((status & SREG_TC) && sc->fifocnt == 0) { sc->intreg = 0; wakeup(sc); } else if (status & SREG_TE) { sc->intreg &= ~(sc->intreg & ~INTREG_TEEN); sc->intreg |= INTREG_TCEN | INTREG_RREN; } } /* * If interrupt flags changed, write the new flags to the hardware and * do a dummy readback to ensure the changes reach the hardware before * we exit the isr. */ if (sc->intreg != intreg) { WR4(sc, ECSPI_INTREG, sc->intreg); (void)RD4(sc, ECSPI_INTREG); } if (sc->debug >= 3) { device_printf(sc->dev, "spi_intr, sreg 0x%08x intreg was 0x%08x now 0x%08x\n", status, intreg, sc->intreg); } mtx_unlock(&sc->mtx); } static int spi_xfer_buf(struct spi_softc *sc, void *rxbuf, void *txbuf, uint32_t len) { int err; if (sc->debug >= 1) { device_printf(sc->dev, "spi_xfer_buf, rxbuf %p txbuf %p len %u\n", rxbuf, txbuf, len); } if (len == 0) return (0); sc->rxbuf = rxbuf; sc->rxlen = len; sc->rxidx = 0; sc->txbuf = txbuf; sc->txlen = len; sc->txidx = 0; sc->intreg = INTREG_RDREN | INTREG_TDREN; spi_fill_txfifo(sc); /* Enable interrupts last; spi_fill_txfifo() can change sc->intreg */ WR4(sc, ECSPI_INTREG, sc->intreg); err = 0; while (err == 0 && sc->intreg != 0) err = msleep(sc, &sc->mtx, 0, "imxspi", 10 * hz); if (sc->rxidx != sc->rxlen || sc->txidx != sc->txlen) err = EIO; return (err); } static int spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct spi_softc *sc = device_get_softc(dev); uint32_t cs, mode, clock; int err; spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); if (cs > CS_MAX || sc->cspins[cs] == NULL) { if (sc->debug || bootverbose) device_printf(sc->dev, "Invalid chip select %u\n", cs); return (EINVAL); } mtx_lock(&sc->mtx); device_busy(sc->dev); if (sc->debug >= 1) { device_printf(sc->dev, "spi_transfer, cs 0x%x clock %u mode %u\n", cs, clock, mode); } /* Set up the hardware and select the device. */ spi_hw_setup(sc, cs, mode, clock); spi_set_chipsel(sc, cs, true); /* Transfer command then data bytes. */ err = 0; if (cmd->tx_cmd_sz > 0) err = spi_xfer_buf(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = spi_xfer_buf(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz); /* Deselect the device, turn off (and reset) hardware. */ spi_set_chipsel(sc, cs, false); WR4(sc, ECSPI_CTLREG, 0); device_unbusy(sc->dev); mtx_unlock(&sc->mtx); return (err); } static phandle_t spi_get_node(device_t bus, device_t dev) { /* * Share our controller node with our spibus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static int spi_detach(device_t dev) { struct spi_softc *sc = device_get_softc(dev); int error, idx; if ((error = bus_generic_detach(sc->dev)) != 0) return (error); - if (sc->spibus != NULL) - device_delete_child(dev, sc->spibus); - for (idx = 0; idx < nitems(sc->cspins); ++idx) { if (sc->cspins[idx] != NULL) gpio_pin_release(sc->cspins[idx]); } if (sc->inthandle != NULL) bus_teardown_intr(sc->dev, sc->intres, sc->inthandle); if (sc->intres != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->intres); if (sc->memres != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, 0, sc->memres); mtx_destroy(&sc->mtx); return (0); } static int spi_attach(device_t dev) { struct spi_softc *sc = device_get_softc(dev); phandle_t node; int err, idx, rid; sc->dev = dev; sc->basefreq = imx_ccm_ecspi_hz(); mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up debug-enable sysctl. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->debug, 0, "Enable debug, higher values = more info"); /* Allocate mmio register access resources. */ rid = 0; sc->memres = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->memres == NULL) { device_printf(sc->dev, "could not allocate registers\n"); spi_detach(sc->dev); return (ENXIO); } /* Allocate interrupt resources and set up handler. */ rid = 0; sc->intres = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->intres == NULL) { device_printf(sc->dev, "could not allocate interrupt\n"); device_detach(sc->dev); return (ENXIO); } err = bus_setup_intr(sc->dev, sc->intres, INTR_TYPE_MISC | INTR_MPSAFE, NULL, spi_intr, sc, &sc->inthandle); if (err != 0) { device_printf(sc->dev, "could not setup interrupt handler"); device_detach(sc->dev); return (ENXIO); } /* Allocate gpio pins for configured chip selects. */ node = ofw_bus_get_node(sc->dev); for (idx = 0; idx < nitems(sc->cspins); ++idx) { err = gpio_pin_get_by_ofw_propidx(sc->dev, node, "cs-gpios", idx, &sc->cspins[idx]); if (err == 0) { gpio_pin_setflags(sc->cspins[idx], GPIO_PIN_OUTPUT); } else if (sc->debug >= 2) { device_printf(sc->dev, "cannot configure gpio for chip select %u\n", idx); } } /* * Hardware init: put all channels into Master mode, turn off the enable * bit (gates off clocks); we only enable the hardware while xfers run. */ WR4(sc, ECSPI_CTLREG, CTLREG_CMODES_MASTER); /* * Add the spibus driver as a child, and setup a one-shot intrhook to * attach it after interrupts are working. It will attach actual SPI * devices as its children, and those devices may need to do IO during * their attach. We can't do IO until timers and interrupts are working. */ sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); bus_delayed_attach_children(dev); return (0); } static int spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "i.MX ECSPI Master"); return (BUS_PROBE_DEFAULT); } static device_method_t spi_methods[] = { DEVMETHOD(device_probe, spi_probe), DEVMETHOD(device_attach, spi_attach), DEVMETHOD(device_detach, spi_detach), /* spibus_if */ DEVMETHOD(spibus_transfer, spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, spi_get_node), DEVMETHOD_END }; static driver_t spi_driver = { "imx_spi", spi_methods, sizeof(struct spi_softc), }; DRIVER_MODULE(imx_spi, simplebus, spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, imx_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(imx_spi, ofw_spibus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm/mv/a37x0_iic.c b/sys/arm/mv/a37x0_iic.c index f9a71b97240a..7d71cac23a40 100644 --- a/sys/arm/mv/a37x0_iic.c +++ b/sys/arm/mv/a37x0_iic.c @@ -1,481 +1,479 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018, 2019 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include /* * Driver for Armada 37x0 i2c controller. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" struct a37x0_iic_softc { boolean_t sc_fast_mode; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; device_t sc_dev; device_t sc_iicbus; struct mtx sc_mtx; struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_intrhand; }; #define A37X0_IIC_WRITE(_sc, _off, _val) \ bus_space_write_4((_sc)->sc_bst, (_sc)->sc_bsh, _off, _val) #define A37X0_IIC_READ(_sc, _off) \ bus_space_read_4((_sc)->sc_bst, (_sc)->sc_bsh, _off) #define A37X0_IIC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define A37X0_IIC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) static struct ofw_compat_data compat_data[] = { { "marvell,armada-3700-i2c", 1 }, { NULL, 0 } }; #undef A37x0_IIC_DEBUG static void a37x0_iic_intr(void *); static int a37x0_iic_detach(device_t); static void a37x0_iic_rmw(struct a37x0_iic_softc *sc, uint32_t off, uint32_t mask, uint32_t value) { uint32_t reg; mtx_assert(&sc->sc_mtx, MA_OWNED); reg = A37X0_IIC_READ(sc, off); reg &= ~mask; reg |= value; A37X0_IIC_WRITE(sc, off, reg); } static int a37x0_iic_wait_clear(struct a37x0_iic_softc *sc, uint32_t mask) { int timeout; uint32_t status; mtx_assert(&sc->sc_mtx, MA_OWNED); timeout = 1000; do { DELAY(10); status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); if (--timeout == 0) return (0); } while ((status & mask) != 0); return (1); } static int a37x0_iic_wait_set(struct a37x0_iic_softc *sc, uint32_t mask) { int timeout; uint32_t status; mtx_assert(&sc->sc_mtx, MA_OWNED); timeout = 1000; do { DELAY(10); status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); if (--timeout == 0) return (0); } while ((status & mask) != mask); return (1); } #ifdef A37x0_IIC_DEBUG static void a37x0_iic_regdump(struct a37x0_iic_softc *sc) { mtx_assert(&sc->sc_mtx, MA_OWNED); printf("%s: IBMR: %#x\n", __func__, A37X0_IIC_READ(sc, A37X0_IIC_IBMR)); printf("%s: ICR: %#x\n", __func__, A37X0_IIC_READ(sc, A37X0_IIC_ICR)); printf("%s: ISR: %#x\n", __func__, A37X0_IIC_READ(sc, A37X0_IIC_ISR)); } #endif static void a37x0_iic_reset(struct a37x0_iic_softc *sc) { uint32_t mode, reg; mtx_assert(&sc->sc_mtx, MA_OWNED); /* Disable the controller. */ reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); mode = reg & ICR_MODE_MASK; A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg & ~ICR_IUE); A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg | ICR_UR); DELAY(100); A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg & ~ICR_IUE); /* Enable the controller. */ reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); reg |= mode | ICR_IUE | ICR_GCD | ICR_SCLE; A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg); #ifdef A37x0_IIC_DEBUG a37x0_iic_regdump(sc); #endif } static int a37x0_iic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell Armada 37x0 IIC controller"); return (BUS_PROBE_DEFAULT); } static int a37x0_iic_attach(device_t dev) { int rid; phandle_t node; struct a37x0_iic_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Hook up our interrupt handler. */ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, a37x0_iic_intr, sc, &sc->sc_intrhand)) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot setup the interrupt handler\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "a37x0_iic", NULL, MTX_DEF); node = ofw_bus_get_node(dev); if (OF_hasprop(node, "mrvl,i2c-fast-mode")) sc->sc_fast_mode = true; /* Enable the controller. */ A37X0_IIC_LOCK(sc); a37x0_iic_reset(sc); A37X0_IIC_UNLOCK(sc); sc->sc_iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->sc_iicbus == NULL) { a37x0_iic_detach(dev); return (ENXIO); } /* Probe and attach the iicbus. */ bus_attach_children(dev); return (0); } static int a37x0_iic_detach(device_t dev) { struct a37x0_iic_softc *sc; bus_generic_detach(dev); sc = device_get_softc(dev); - if (sc->sc_iicbus != NULL) - device_delete_child(dev, sc->sc_iicbus); mtx_destroy(&sc->sc_mtx); if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (0); } static void a37x0_iic_intr(void *arg) { struct a37x0_iic_softc *sc; uint32_t status; /* Not used, the interrupts are not enabled. */ sc = (struct a37x0_iic_softc *)arg; A37X0_IIC_LOCK(sc); status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); #ifdef A37x0_IIC_DEBUG a37x0_iic_regdump(sc); #endif /* Clear pending interrrupts. */ A37X0_IIC_WRITE(sc, A37X0_IIC_ISR, status); A37X0_IIC_UNLOCK(sc); } static int a37x0_iic_stop(device_t dev) { struct a37x0_iic_softc *sc; uint32_t reg; sc = device_get_softc(dev); A37X0_IIC_LOCK(sc); /* Clear the STOP condition. */ reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); if (reg & (ICR_ACKNAK | ICR_STOP)) { reg &= ~(ICR_START | ICR_ACKNAK | ICR_STOP); A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg); } /* Clear interrupts. */ reg = A37X0_IIC_READ(sc, A37X0_IIC_ISR); A37X0_IIC_WRITE(sc, A37X0_IIC_ISR, reg); A37X0_IIC_UNLOCK(sc); return (IIC_NOERR); } static int a37x0_iic_start(device_t dev, u_char slave, int timeout) { int rv; struct a37x0_iic_softc *sc; uint32_t reg, status; sc = device_get_softc(dev); A37X0_IIC_LOCK(sc); /* Wait for the bus to be free before start a transaction. */ if (a37x0_iic_wait_clear(sc, ISR_IBB) == 0) { A37X0_IIC_UNLOCK(sc); return (IIC_ETIMEOUT); } /* Write the slave address. */ A37X0_IIC_WRITE(sc, A37X0_IIC_IDBR, slave); /* Send Start condition (with slave address). */ reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); reg &= ~(ICR_STOP | ICR_ACKNAK); A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg | ICR_START | ICR_TB); rv = IIC_NOERR; if (a37x0_iic_wait_set(sc, ISR_ITE) == 0) rv = IIC_ETIMEOUT; if (rv == IIC_NOERR) { status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); A37X0_IIC_WRITE(sc, A37X0_IIC_ISR, status | ISR_ITE); if (a37x0_iic_wait_clear(sc, ISR_ACKNAK) == 0) rv = IIC_ENOACK; } A37X0_IIC_UNLOCK(sc); if (rv != IIC_NOERR) a37x0_iic_stop(dev); return (rv); } static int a37x0_iic_bus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct a37x0_iic_softc *sc; uint32_t busfreq; sc = device_get_softc(dev); A37X0_IIC_LOCK(sc); a37x0_iic_reset(sc); if (sc->sc_iicbus == NULL) busfreq = 100000; else busfreq = IICBUS_GET_FREQUENCY(sc->sc_iicbus, speed); a37x0_iic_rmw(sc, A37X0_IIC_ICR, ICR_MODE_MASK, (busfreq > 100000) ? ICR_FAST_MODE : 0); A37X0_IIC_UNLOCK(sc); return (IIC_ENOADDR); } static int a37x0_iic_read(device_t dev, char *buf, int len, int *read, int last, int delay) { int rv; struct a37x0_iic_softc *sc; uint32_t reg, status; sc = device_get_softc(dev); A37X0_IIC_LOCK(sc); reg = A37X0_IIC_READ(sc, A37X0_IIC_ISR); if ((reg & (ISR_UB | ISR_IBB)) != ISR_UB) { A37X0_IIC_UNLOCK(sc); return (IIC_EBUSERR); } *read = 0; rv = IIC_NOERR; while (*read < len) { reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); reg &= ~(ICR_START | ICR_STOP | ICR_ACKNAK); if (*read == len - 1) reg |= ICR_ACKNAK | ICR_STOP; A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg | ICR_TB); if (a37x0_iic_wait_set(sc, ISR_IRF) == 0) { rv = IIC_ETIMEOUT; break; } *buf++ = A37X0_IIC_READ(sc, A37X0_IIC_IDBR); (*read)++; status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); A37X0_IIC_WRITE(sc, A37X0_IIC_ISR, status | ISR_IRF); } A37X0_IIC_UNLOCK(sc); return (rv); } static int a37x0_iic_write(device_t dev, const char *buf, int len, int *sent, int timeout) { int rv; struct a37x0_iic_softc *sc; uint32_t reg, status; sc = device_get_softc(dev); A37X0_IIC_LOCK(sc); reg = A37X0_IIC_READ(sc, A37X0_IIC_ISR); if ((reg & (ISR_UB | ISR_IBB)) != ISR_UB) { A37X0_IIC_UNLOCK(sc); return (IIC_EBUSERR); } rv = IIC_NOERR; *sent = 0; while (*sent < len) { A37X0_IIC_WRITE(sc, A37X0_IIC_IDBR, *buf++); reg = A37X0_IIC_READ(sc, A37X0_IIC_ICR); reg &= ~(ICR_START | ICR_STOP | ICR_ACKNAK); if (*sent == len - 1) reg |= ICR_STOP; A37X0_IIC_WRITE(sc, A37X0_IIC_ICR, reg | ICR_TB); if (a37x0_iic_wait_set(sc, ISR_ITE) == 0) { rv = IIC_ETIMEOUT; break; } (*sent)++; status = A37X0_IIC_READ(sc, A37X0_IIC_ISR); A37X0_IIC_WRITE(sc, A37X0_IIC_ISR, status | ISR_ITE); if (a37x0_iic_wait_clear(sc, ISR_ACKNAK) == 0) { rv = IIC_ENOACK; break; } } A37X0_IIC_UNLOCK(sc); return (rv); } static phandle_t a37x0_iic_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static device_method_t a37x0_iic_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a37x0_iic_probe), DEVMETHOD(device_attach, a37x0_iic_attach), DEVMETHOD(device_detach, a37x0_iic_detach), /* iicbus interface */ DEVMETHOD(iicbus_reset, a37x0_iic_bus_reset), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), DEVMETHOD(iicbus_repeated_start, a37x0_iic_start), DEVMETHOD(iicbus_start, a37x0_iic_start), DEVMETHOD(iicbus_stop, a37x0_iic_stop), DEVMETHOD(iicbus_read, a37x0_iic_read), DEVMETHOD(iicbus_write, a37x0_iic_write), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, a37x0_iic_get_node), DEVMETHOD_END }; static driver_t a37x0_iic_driver = { "iichb", a37x0_iic_methods, sizeof(struct a37x0_iic_softc), }; DRIVER_MODULE(iicbus, a37x0_iic, iicbus_driver, 0, 0); DRIVER_MODULE(a37x0_iic, simplebus, a37x0_iic_driver, 0, 0); diff --git a/sys/arm/nvidia/tegra_i2c.c b/sys/arm/nvidia/tegra_i2c.c index bed1bf0ed7e3..6c4d20cb78ff 100644 --- a/sys/arm/nvidia/tegra_i2c.c +++ b/sys/arm/nvidia/tegra_i2c.c @@ -1,803 +1,801 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * I2C driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define I2C_CNFG 0x000 #define I2C_CNFG_MSTR_CLR_BUS_ON_TIMEOUT (1 << 15) #define I2C_CNFG_DEBOUNCE_CNT(x) (((x) & 0x07) << 12) #define I2C_CNFG_NEW_MASTER_FSM (1 << 11) #define I2C_CNFG_PACKET_MODE_EN (1 << 10) #define I2C_CNFG_SEND (1 << 9) #define I2C_CNFG_NOACK (1 << 8) #define I2C_CNFG_CMD2 (1 << 7) #define I2C_CNFG_CMD1 (1 << 6) #define I2C_CNFG_START (1 << 5) #define I2C_CNFG_SLV2 (1 << 4) #define I2C_CNFG_LENGTH_SHIFT 1 #define I2C_CNFG_LENGTH_MASK 0x7 #define I2C_CNFG_A_MOD (1 << 0) #define I2C_CMD_ADDR0 0x004 #define I2C_CMD_ADDR1 0x008 #define I2C_CMD_DATA1 0x00c #define I2C_CMD_DATA2 0x010 #define I2C_STATUS 0x01c #define I2C_SL_CNFG 0x020 #define I2C_SL_RCVD 0x024 #define I2C_SL_STATUS 0x028 #define I2C_SL_ADDR1 0x02c #define I2C_SL_ADDR2 0x030 #define I2C_TLOW_SEXT 0x034 #define I2C_SL_DELAY_COUNT 0x03c #define I2C_SL_INT_MASK 0x040 #define I2C_SL_INT_SOURCE 0x044 #define I2C_SL_INT_SET 0x048 #define I2C_TX_PACKET_FIFO 0x050 #define I2C_RX_FIFO 0x054 #define I2C_PACKET_TRANSFER_STATUS 0x058 #define I2C_FIFO_CONTROL 0x05c #define I2C_FIFO_CONTROL_SLV_TX_FIFO_TRIG(x) (((x) & 0x07) << 13) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_TRIG(x) (((x) & 0x07) << 10) #define I2C_FIFO_CONTROL_SLV_TX_FIFO_FLUSH (1 << 9) #define I2C_FIFO_CONTROL_SLV_RX_FIFO_FLUSH (1 << 8) #define I2C_FIFO_CONTROL_TX_FIFO_TRIG(x) (((x) & 0x07) << 5) #define I2C_FIFO_CONTROL_RX_FIFO_TRIG(x) (((x) & 0x07) << 2) #define I2C_FIFO_CONTROL_TX_FIFO_FLUSH (1 << 1) #define I2C_FIFO_CONTROL_RX_FIFO_FLUSH (1 << 0) #define I2C_FIFO_STATUS 0x060 #define I2C_FIFO_STATUS_SLV_XFER_ERR_REASON (1 << 25) #define I2C_FIFO_STATUS_TX_FIFO_SLV_EMPTY_CNT(x) (((x) >> 20) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_SLV_FULL_CNT(x) (((x) >> 16) & 0xF) #define I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(x) (((x) >> 4) & 0xF) #define I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(x) (((x) >> 0) & 0xF) #define I2C_INTERRUPT_MASK_REGISTER 0x064 #define I2C_INTERRUPT_STATUS_REGISTER 0x068 #define I2C_INT_SLV_ACK_WITHHELD (1 << 28) #define I2C_INT_SLV_RD2WR (1 << 27) #define I2C_INT_SLV_WR2RD (1 << 26) #define I2C_INT_SLV_PKT_XFER_ERR (1 << 25) #define I2C_INT_SLV_TX_BUFFER_REQ (1 << 24) #define I2C_INT_SLV_RX_BUFFER_FILLED (1 << 23) #define I2C_INT_SLV_PACKET_XFER_COMPLETE (1 << 22) #define I2C_INT_SLV_TFIFO_OVF (1 << 21) #define I2C_INT_SLV_RFIFO_UNF (1 << 20) #define I2C_INT_SLV_TFIFO_DATA_REQ (1 << 17) #define I2C_INT_SLV_RFIFO_DATA_REQ (1 << 16) #define I2C_INT_BUS_CLEAR_DONE (1 << 11) #define I2C_INT_TLOW_MEXT_TIMEOUT (1 << 10) #define I2C_INT_TLOW_SEXT_TIMEOUT (1 << 9) #define I2C_INT_TIMEOUT (1 << 8) #define I2C_INT_PACKET_XFER_COMPLETE (1 << 7) #define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1 << 6) #define I2C_INT_TFIFO_OVR (1 << 5) #define I2C_INT_RFIFO_UNF (1 << 4) #define I2C_INT_NOACK (1 << 3) #define I2C_INT_ARB_LOST (1 << 2) #define I2C_INT_TFIFO_DATA_REQ (1 << 1) #define I2C_INT_RFIFO_DATA_REQ (1 << 0) #define I2C_ERROR_MASK (I2C_INT_ARB_LOST | I2C_INT_NOACK | \ I2C_INT_RFIFO_UNF | I2C_INT_TFIFO_OVR) #define I2C_CLK_DIVISOR 0x06c #define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16 #define I2C_CLK_DIVISOR_STD_FAST_MODE_MASK 0xffff #define I2C_CLK_DIVISOR_HSMODE_SHIFT 0 #define I2C_CLK_DIVISOR_HSMODE_MASK 0xffff #define I2C_INTERRUPT_SOURCE_REGISTER 0x070 #define I2C_INTERRUPT_SET_REGISTER 0x074 #define I2C_SLV_TX_PACKET_FIFO 0x07c #define I2C_SLV_PACKET_STATUS 0x080 #define I2C_BUS_CLEAR_CONFIG 0x084 #define I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(x) (((x) & 0xFF) << 16) #define I2C_BUS_CLEAR_CONFIG_BC_STOP_COND (1 << 2) #define I2C_BUS_CLEAR_CONFIG_BC_TERMINATE (1 << 1) #define I2C_BUS_CLEAR_CONFIG_BC_ENABLE (1 << 0) #define I2C_BUS_CLEAR_STATUS 0x088 #define I2C_BUS_CLEAR_STATUS_BC_STATUS (1 << 0) #define I2C_CONFIG_LOAD 0x08c #define I2C_CONFIG_LOAD_TIMEOUT_CONFIG_LOAD (1 << 2) #define I2C_CONFIG_LOAD_SLV_CONFIG_LOAD (1 << 1) #define I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD (1 << 0) #define I2C_INTERFACE_TIMING_0 0x094 #define I2C_INTERFACE_TIMING_1 0x098 #define I2C_HS_INTERFACE_TIMING_0 0x09c #define I2C_HS_INTERFACE_TIMING_1 0x0a0 /* Protocol header 0 */ #define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 #define PACKET_HEADER0_HEADER_SIZE_MASK 0x3 #define PACKET_HEADER0_PACKET_ID_SHIFT 16 #define PACKET_HEADER0_PACKET_ID_MASK 0xff #define PACKET_HEADER0_CONT_ID_SHIFT 12 #define PACKET_HEADER0_CONT_ID_MASK 0xf #define PACKET_HEADER0_PROTOCOL_I2C (1 << 4) #define PACKET_HEADER0_TYPE_SHIFT 0 #define PACKET_HEADER0_TYPE_MASK 0x7 /* I2C header */ #define I2C_HEADER_HIGHSPEED_MODE (1 << 22) #define I2C_HEADER_CONT_ON_NAK (1 << 21) #define I2C_HEADER_SEND_START_BYTE (1 << 20) #define I2C_HEADER_READ (1 << 19) #define I2C_HEADER_10BIT_ADDR (1 << 18) #define I2C_HEADER_IE_ENABLE (1 << 17) #define I2C_HEADER_REPEAT_START (1 << 16) #define I2C_HEADER_CONTINUE_XFER (1 << 15) #define I2C_HEADER_MASTER_ADDR_SHIFT 12 #define I2C_HEADER_MASTER_ADDR_MASK 0x7 #define I2C_HEADER_SLAVE_ADDR_SHIFT 0 #define I2C_HEADER_SLAVE_ADDR_MASK 0x3ff #define I2C_CLK_DIVISOR_STD_FAST_MODE 0x19 #define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8 #define I2C_REQUEST_TIMEOUT (5 * hz) #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_i2c", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-i2c", 1}, {"nvidia,tegra210-i2c", 1}, {NULL, 0} }; enum tegra_i2c_xfer_type { XFER_STOP, /* Send stop condition after xfer */ XFER_REPEAT_START, /* Send repeated start after xfer */ XFER_CONTINUE /* Don't send nothing */ } ; struct tegra_i2c_softc { device_t dev; struct mtx mtx; struct resource *mem_res; struct resource *irq_res; void *irq_h; device_t iicbus; clk_t clk; hwreset_t reset; uint32_t core_freq; uint32_t bus_freq; int bus_inuse; struct iic_msg *msg; int msg_idx; uint32_t bus_err; int done; }; static int tegra_i2c_flush_fifo(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg; reg = RD4(sc, I2C_FIFO_CONTROL); reg |= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; WR4(sc, I2C_FIFO_CONTROL, reg); timeout = 10; while (timeout > 0) { reg = RD4(sc, I2C_FIFO_CONTROL); reg &= I2C_FIFO_CONTROL_TX_FIFO_FLUSH | I2C_FIFO_CONTROL_RX_FIFO_FLUSH; if (reg == 0) break; DELAY(10); } if (timeout <= 0) { device_printf(sc->dev, "FIFO flush timedout\n"); return (ETIMEDOUT); } return (0); } static void tegra_i2c_setup_clk(struct tegra_i2c_softc *sc, int clk_freq) { int div; div = ((sc->core_freq / clk_freq) / 10) - 1; if ((sc->core_freq / (10 * (div + 1))) > clk_freq) div++; if (div > 65535) div = 65535; WR4(sc, I2C_CLK_DIVISOR, (1 << I2C_CLK_DIVISOR_HSMODE_SHIFT) | (div << I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT)); } static void tegra_i2c_bus_clear(struct tegra_i2c_softc *sc) { int timeout; uint32_t reg, status; WR4(sc, I2C_BUS_CLEAR_CONFIG, I2C_BUS_CLEAR_CONFIG_BC_SCLK_THRESHOLD(18) | I2C_BUS_CLEAR_CONFIG_BC_STOP_COND | I2C_BUS_CLEAR_CONFIG_BC_TERMINATE); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); reg = RD4(sc, I2C_BUS_CLEAR_CONFIG); reg |= I2C_BUS_CLEAR_CONFIG_BC_ENABLE; WR4(sc, I2C_BUS_CLEAR_CONFIG,reg); for (timeout = 1000; timeout > 0; timeout--) { if ((RD4(sc, I2C_BUS_CLEAR_CONFIG) & I2C_BUS_CLEAR_CONFIG_BC_ENABLE) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "bus clear timeouted\n"); status = RD4(sc, I2C_BUS_CLEAR_STATUS); if ((status & I2C_BUS_CLEAR_STATUS_BC_STATUS) == 0) device_printf(sc->dev, "bus clear failed\n"); } static int tegra_i2c_hw_init(struct tegra_i2c_softc *sc) { int rv, timeout; /* Reset the core. */ rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot assert reset\n"); return (rv); } DELAY(10); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot clear reset\n"); return (rv); } WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); WR4(sc, I2C_CNFG, I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | I2C_CNFG_DEBOUNCE_CNT(2)); tegra_i2c_setup_clk(sc, sc->bus_freq); WR4(sc, I2C_FIFO_CONTROL, I2C_FIFO_CONTROL_TX_FIFO_TRIG(7) | I2C_FIFO_CONTROL_RX_FIFO_TRIG(0)); WR4(sc, I2C_CONFIG_LOAD, I2C_CONFIG_LOAD_MSTR_CONFIG_LOAD); for (timeout = 1000; timeout > 0; timeout--) { if (RD4(sc, I2C_CONFIG_LOAD) == 0) break; DELAY(10); } if (timeout <= 0) device_printf(sc->dev, "config load timeouted\n"); tegra_i2c_bus_clear(sc); return (0); } static int tegra_i2c_tx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_tx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_TX_FIFO_EMPTY_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = 0; for (i = 0; i < cnt; i++) { reg |= sc->msg->buf[sc->msg_idx] << (i * 8); sc->msg_idx++; } WR4(sc, I2C_TX_PACKET_FIFO, reg); } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static int tegra_i2c_rx(struct tegra_i2c_softc *sc) { uint32_t reg; int cnt, i; if (sc->msg_idx >= sc->msg->len) panic("Invalid call to tegra_i2c_rx\n"); while(sc->msg_idx < sc->msg->len) { reg = RD4(sc, I2C_FIFO_STATUS); if (I2C_FIFO_STATUS_RX_FIFO_FULL_CNT(reg) == 0) break; cnt = min(4, sc->msg->len - sc->msg_idx); reg = RD4(sc, I2C_RX_FIFO); for (i = 0; i < cnt; i++) { sc->msg->buf[sc->msg_idx] = (reg >> (i * 8)) & 0xFF; sc->msg_idx++; } } if (sc->msg_idx >= sc->msg->len) return (0); return (sc->msg->len - sc->msg_idx - 1); } static void tegra_i2c_intr(void *arg) { struct tegra_i2c_softc *sc; uint32_t status, reg; int rv; sc = (struct tegra_i2c_softc *)arg; LOCK(sc); status = RD4(sc, I2C_INTERRUPT_SOURCE_REGISTER); if (sc->msg == NULL) { /* Unexpected interrupt - disable FIFOs, clear reset. */ reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); UNLOCK(sc); return; } if ((status & I2C_ERROR_MASK) != 0) { if (status & I2C_INT_NOACK) sc->bus_err = IIC_ENOACK; if (status & I2C_INT_ARB_LOST) sc->bus_err = IIC_EBUSERR; if ((status & I2C_INT_TFIFO_OVR) || (status & I2C_INT_RFIFO_UNF)) sc->bus_err = IIC_EBUSERR; sc->done = 1; } else if ((status & I2C_INT_RFIFO_DATA_REQ) && (sc->msg != NULL) && (sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_rx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_TFIFO_DATA_REQ) && (sc->msg != NULL) && !(sc->msg->flags & IIC_M_RD)) { rv = tegra_i2c_tx(sc); if (rv == 0) { reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } } else if ((status & I2C_INT_RFIFO_DATA_REQ) || (status & I2C_INT_TFIFO_DATA_REQ)) { device_printf(sc->dev, "Unexpected data interrupt: 0x%08X\n", status); reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER); reg &= ~I2C_INT_TFIFO_DATA_REQ; reg &= ~I2C_INT_RFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg); } if (status & I2C_INT_PACKET_XFER_COMPLETE) sc->done = 1; WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status); if (sc->done) { WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); wakeup(&(sc->done)); } UNLOCK(sc); } static void tegra_i2c_start_msg(struct tegra_i2c_softc *sc, struct iic_msg *msg, enum tegra_i2c_xfer_type xtype) { uint32_t tmp, mask; /* Packet header. */ tmp = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | PACKET_HEADER0_PROTOCOL_I2C | (1 << PACKET_HEADER0_CONT_ID_SHIFT) | (1 << PACKET_HEADER0_PACKET_ID_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Packet size. */ WR4(sc, I2C_TX_PACKET_FIFO, msg->len - 1); /* I2C header. */ tmp = I2C_HEADER_IE_ENABLE; if (xtype == XFER_CONTINUE) tmp |= I2C_HEADER_CONTINUE_XFER; else if (xtype == XFER_REPEAT_START) tmp |= I2C_HEADER_REPEAT_START; tmp |= msg->slave << I2C_HEADER_SLAVE_ADDR_SHIFT; if (msg->flags & IIC_M_RD) { tmp |= I2C_HEADER_READ; tmp |= 1 << I2C_HEADER_SLAVE_ADDR_SHIFT; } else tmp &= ~(1 << I2C_HEADER_SLAVE_ADDR_SHIFT); WR4(sc, I2C_TX_PACKET_FIFO, tmp); /* Interrupt mask. */ mask = I2C_INT_NOACK | I2C_INT_ARB_LOST | I2C_INT_PACKET_XFER_COMPLETE; if (msg->flags & IIC_M_RD) mask |= I2C_INT_RFIFO_DATA_REQ; else mask |= I2C_INT_TFIFO_DATA_REQ; WR4(sc, I2C_INTERRUPT_MASK_REGISTER, mask); } static int tegra_i2c_poll(struct tegra_i2c_softc *sc) { int timeout; for(timeout = 10000; timeout > 0; timeout--) { UNLOCK(sc); tegra_i2c_intr(sc); LOCK(sc); if (sc->done != 0) break; DELAY(1); } if (timeout <= 0) return (ETIMEDOUT); return (0); } static int tegra_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { int rv, i; struct tegra_i2c_softc *sc; enum tegra_i2c_xfer_type xtype; sc = device_get_softc(dev); LOCK(sc); /* Get the bus. */ while (sc->bus_inuse == 1) SLEEP(sc, 0); sc->bus_inuse = 1; rv = 0; for (i = 0; i < nmsgs; i++) { sc->msg = &msgs[i]; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Check for valid parameters. */ if (sc->msg == NULL || sc->msg->buf == NULL || sc->msg->len == 0) { rv = EINVAL; break; } /* Get flags for next transfer. */ if (i == (nmsgs - 1)) { if (msgs[i].flags & IIC_M_NOSTOP) xtype = XFER_CONTINUE; else xtype = XFER_STOP; } else { if (msgs[i + 1].flags & IIC_M_NOSTART) xtype = XFER_CONTINUE; else xtype = XFER_REPEAT_START; } tegra_i2c_start_msg(sc, sc->msg, xtype); if (cold) rv = tegra_i2c_poll(sc); else rv = msleep(&sc->done, &sc->mtx, PZERO, "iic", I2C_REQUEST_TIMEOUT); WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0); WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, 0xFFFFFFFF); if (rv == 0) rv = sc->bus_err; if (rv != 0) break; } if (rv != 0) { tegra_i2c_hw_init(sc); tegra_i2c_flush_fifo(sc); } sc->msg = NULL; sc->msg_idx = 0; sc->bus_err = 0; sc->done = 0; /* Wake up the processes that are waiting for the bus. */ sc->bus_inuse = 0; wakeup(sc); UNLOCK(sc); return (rv); } static int tegra_i2c_iicbus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct tegra_i2c_softc *sc; int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); sc = device_get_softc(dev); LOCK(sc); tegra_i2c_setup_clk(sc, busfreq); UNLOCK(sc); return (0); } static int tegra_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); return (BUS_PROBE_DEFAULT); } static int tegra_i2c_attach(device_t dev) { int rv, rid; phandle_t node; struct tegra_i2c_softc *sc; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources. */ rv = clk_get_by_ofw_name(dev, 0, "div-clk", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "i2c", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get i2c reset\n"); return (ENXIO); } rv = OF_getencprop(node, "clock-frequency", &sc->bus_freq, sizeof(sc->bus_freq)); if (rv != sizeof(sc->bus_freq)) { sc->bus_freq = 100000; } /* Request maximum frequency for I2C block 136MHz (408MHz / 3). */ rv = clk_set_freq(sc->clk, 136000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock frequency\n"); goto fail; } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } sc->core_freq = (uint32_t)freq; rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ rv = tegra_i2c_hw_init(sc); if (rv) { device_printf(dev, "tegra_i2c_activate failed\n"); goto fail; } /* Setup interrupt. */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_i2c_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Attach the iicbus. */ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(dev, "Could not allocate iicbus instance.\n"); rv = ENXIO; goto fail; } /* Probe and attach the iicbus. */ bus_attach_children(dev); return (0); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); } static int tegra_i2c_detach(device_t dev) { struct tegra_i2c_softc *sc; int error; error = bus_generic_detach(dev); if (error != 0) return (error); sc = device_get_softc(dev); tegra_i2c_hw_init(sc); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); - if (sc->iicbus) - device_delete_child(dev, sc->iicbus); return (0); } static phandle_t tegra_i2c_get_node(device_t bus, device_t dev) { /* Share controller node with iibus device. */ return (ofw_bus_get_node(bus)); } static device_method_t tegra_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_i2c_probe), DEVMETHOD(device_attach, tegra_i2c_attach), DEVMETHOD(device_detach, tegra_i2c_detach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, tegra_i2c_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, tegra_i2c_iicbus_reset), DEVMETHOD(iicbus_transfer, tegra_i2c_transfer), DEVMETHOD_END }; static DEFINE_CLASS_0(iichb, tegra_i2c_driver, tegra_i2c_methods, sizeof(struct tegra_i2c_softc)); EARLY_DRIVER_MODULE(tegra_iic, simplebus, tegra_i2c_driver, NULL, NULL, 73); diff --git a/sys/arm/ti/am335x/am335x_ehrpwm.c b/sys/arm/ti/am335x/am335x_ehrpwm.c index 95301f9cfed3..59ef0931439d 100644 --- a/sys/arm/ti/am335x/am335x_ehrpwm.c +++ b/sys/arm/ti/am335x/am335x_ehrpwm.c @@ -1,599 +1,596 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" #include "am335x_pwm.h" /******************************************************************************* * Enhanced resolution PWM driver. Many of the advanced featues of the hardware * are not supported by this driver. What is implemented here is simple * variable-duty-cycle PWM output. ******************************************************************************/ /* In ticks */ #define DEFAULT_PWM_PERIOD 1000 #define PWM_CLOCK 100000000UL #define NS_PER_SEC 1000000000 #define PWM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define PWM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define PWM_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define PWM_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \ device_get_nameunit(_sc->sc_dev), "am335x_ehrpwm softc", MTX_DEF) #define PWM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) #define EPWM_READ2(_sc, reg) bus_read_2((_sc)->sc_mem_res, reg) #define EPWM_WRITE2(_sc, reg, value) \ bus_write_2((_sc)->sc_mem_res, reg, value) #define EPWM_TBCTL 0x00 /* see 15.2.2.11 for the first two, used in debug situations */ #define TBCTL_FREERUN_STOP_NEXT_TBC_INCREMENT (0 << 14) #define TBCTL_FREERUN_STOP_COMPLETE_CYCLE (1 << 14) /* ignore suspend control signal */ #define TBCTL_FREERUN (2 << 14) #define TBCTL_PHDIR_UP (1 << 13) #define TBCTL_PHDIR_DOWN (0 << 13) #define TBCTL_CLKDIV(x) ((x) << 10) #define TBCTL_CLKDIV_MASK (7 << 10) #define TBCTL_HSPCLKDIV(x) ((x) << 7) #define TBCTL_HSPCLKDIV_MASK (7 << 7) #define TBCTL_SYNCOSEL_DISABLED (3 << 4) #define TBCTL_PRDLD_SHADOW (0 << 3) #define TBCTL_PRDLD_IMMEDIATE (1 << 3) #define TBCTL_PHSEN_DISABLED (0 << 2) #define TBCTL_PHSEN_ENABLED (1 << 2) #define TBCTL_CTRMODE_MASK (3) #define TBCTL_CTRMODE_UP (0 << 0) #define TBCTL_CTRMODE_DOWN (1 << 0) #define TBCTL_CTRMODE_UPDOWN (2 << 0) #define TBCTL_CTRMODE_FREEZE (3 << 0) #define EPWM_TBSTS 0x02 #define EPWM_TBPHSHR 0x04 #define EPWM_TBPHS 0x06 #define EPWM_TBCNT 0x08 #define EPWM_TBPRD 0x0a /* Counter-compare */ #define EPWM_CMPCTL 0x0e #define CMPCTL_SHDWBMODE_SHADOW (1 << 6) #define CMPCTL_SHDWBMODE_IMMEDIATE (0 << 6) #define CMPCTL_SHDWAMODE_SHADOW (1 << 4) #define CMPCTL_SHDWAMODE_IMMEDIATE (0 << 4) #define CMPCTL_LOADBMODE_ZERO (0 << 2) #define CMPCTL_LOADBMODE_PRD (1 << 2) #define CMPCTL_LOADBMODE_EITHER (2 << 2) #define CMPCTL_LOADBMODE_FREEZE (3 << 2) #define CMPCTL_LOADAMODE_ZERO (0 << 0) #define CMPCTL_LOADAMODE_PRD (1 << 0) #define CMPCTL_LOADAMODE_EITHER (2 << 0) #define CMPCTL_LOADAMODE_FREEZE (3 << 0) #define EPWM_CMPAHR 0x10 #define EPWM_CMPA 0x12 #define EPWM_CMPB 0x14 /* CMPCTL_LOADAMODE_ZERO */ #define EPWM_AQCTLA 0x16 #define EPWM_AQCTLB 0x18 #define AQCTL_CBU_NONE (0 << 8) #define AQCTL_CBU_CLEAR (1 << 8) #define AQCTL_CBU_SET (2 << 8) #define AQCTL_CBU_TOGGLE (3 << 8) #define AQCTL_CAU_NONE (0 << 4) #define AQCTL_CAU_CLEAR (1 << 4) #define AQCTL_CAU_SET (2 << 4) #define AQCTL_CAU_TOGGLE (3 << 4) #define AQCTL_ZRO_NONE (0 << 0) #define AQCTL_ZRO_CLEAR (1 << 0) #define AQCTL_ZRO_SET (2 << 0) #define AQCTL_ZRO_TOGGLE (3 << 0) #define EPWM_AQSFRC 0x1a #define EPWM_AQCSFRC 0x1c #define AQCSFRC_OFF 0 #define AQCSFRC_LO 1 #define AQCSFRC_HI 2 #define AQCSFRC_MASK 3 #define AQCSFRC(chan, hilo) ((hilo) << (2 * chan)) /* Trip-Zone module */ #define EPWM_TZSEL 0x24 #define EPWM_TZCTL 0x28 #define EPWM_TZFLG 0x2C /* Dead band */ #define EPWM_DBCTL 0x1E #define DBCTL_MASK (3 << 0) #define DBCTL_BYPASS 0 #define DBCTL_RISING_EDGE 1 #define DBCTL_FALLING_EDGE 2 #define DBCTL_BOTH_EDGE 3 /* PWM-chopper */ #define EPWM_PCCTL 0x3C #define PCCTL_CHPEN_MASK (1 << 0) #define PCCTL_CHPEN_DISABLE 0 #define PCCTL_CHPEN_ENABLE 1 /* High-Resolution PWM */ #define EPWM_HRCTL 0x40 #define HRCTL_DELMODE_BOTH 3 #define HRCTL_DELMODE_FALL 2 #define HRCTL_DELMODE_RISE 1 static device_probe_t am335x_ehrpwm_probe; static device_attach_t am335x_ehrpwm_attach; static device_detach_t am335x_ehrpwm_detach; struct ehrpwm_channel { u_int duty; /* on duration, in ns */ bool enabled; /* channel enabled? */ bool inverted; /* signal inverted? */ }; #define NUM_CHANNELS 2 struct am335x_ehrpwm_softc { device_t sc_dev; device_t sc_busdev; struct mtx sc_mtx; struct resource *sc_mem_res; int sc_mem_rid; /* Things used for configuration via pwm(9) api. */ u_int sc_clkfreq; /* frequency in Hz */ u_int sc_clktick; /* duration in ns */ u_int sc_period; /* duration in ns */ struct ehrpwm_channel sc_channels[NUM_CHANNELS]; }; static struct ofw_compat_data compat_data[] = { {"ti,am3352-ehrpwm", true}, {"ti,am33xx-ehrpwm", true}, {NULL, false}, }; SIMPLEBUS_PNP_INFO(compat_data); static void am335x_ehrpwm_cfg_duty(struct am335x_ehrpwm_softc *sc, u_int chan, u_int duty) { u_int tbcmp; if (duty == 0) tbcmp = 0; else tbcmp = max(1, duty / sc->sc_clktick); sc->sc_channels[chan].duty = tbcmp * sc->sc_clktick; PWM_LOCK_ASSERT(sc); EPWM_WRITE2(sc, (chan == 0) ? EPWM_CMPA : EPWM_CMPB, tbcmp); } static void am335x_ehrpwm_cfg_enable(struct am335x_ehrpwm_softc *sc, u_int chan, bool enable) { uint16_t regval; sc->sc_channels[chan].enabled = enable; /* * Turn off any existing software-force of the channel, then force * it in the right direction (high or low) if it's not being enabled. */ PWM_LOCK_ASSERT(sc); regval = EPWM_READ2(sc, EPWM_AQCSFRC); regval &= ~AQCSFRC(chan, AQCSFRC_MASK); if (!sc->sc_channels[chan].enabled) { if (sc->sc_channels[chan].inverted) regval |= AQCSFRC(chan, AQCSFRC_HI); else regval |= AQCSFRC(chan, AQCSFRC_LO); } EPWM_WRITE2(sc, EPWM_AQCSFRC, regval); } static bool am335x_ehrpwm_cfg_period(struct am335x_ehrpwm_softc *sc, u_int period) { uint16_t regval; u_int clkdiv, hspclkdiv, pwmclk, pwmtick, tbprd; /* Can't do a period shorter than 2 clock ticks. */ if (period < 2 * NS_PER_SEC / PWM_CLOCK) { sc->sc_clkfreq = 0; sc->sc_clktick = 0; sc->sc_period = 0; return (false); } /* * Figure out how much we have to divide down the base 100MHz clock so * that we can express the requested period as a 16-bit tick count. */ tbprd = 0; for (clkdiv = 0; clkdiv < 8; ++clkdiv) { const u_int cd = 1 << clkdiv; for (hspclkdiv = 0; hspclkdiv < 8; ++hspclkdiv) { const u_int cdhs = max(1, hspclkdiv * 2); pwmclk = PWM_CLOCK / (cd * cdhs); pwmtick = NS_PER_SEC / pwmclk; if (period / pwmtick < 65536) { tbprd = period / pwmtick; break; } } if (tbprd != 0) break; } /* Handle requested period too long for available clock divisors. */ if (tbprd == 0) return (false); /* * If anything has changed from the current settings, reprogram the * clock divisors and period register. */ if (sc->sc_clkfreq != pwmclk || sc->sc_clktick != pwmtick || sc->sc_period != tbprd * pwmtick) { sc->sc_clkfreq = pwmclk; sc->sc_clktick = pwmtick; sc->sc_period = tbprd * pwmtick; PWM_LOCK_ASSERT(sc); regval = EPWM_READ2(sc, EPWM_TBCTL); regval &= ~(TBCTL_CLKDIV_MASK | TBCTL_HSPCLKDIV_MASK); regval |= TBCTL_CLKDIV(clkdiv) | TBCTL_HSPCLKDIV(hspclkdiv); EPWM_WRITE2(sc, EPWM_TBCTL, regval); EPWM_WRITE2(sc, EPWM_TBPRD, tbprd - 1); #if 0 device_printf(sc->sc_dev, "clkdiv %u hspclkdiv %u tbprd %u " "clkfreq %u Hz clktick %u ns period got %u requested %u\n", clkdiv, hspclkdiv, tbprd - 1, sc->sc_clkfreq, sc->sc_clktick, sc->sc_period, period); #endif /* * If the period changed, that invalidates the current CMP * registers (duty values), just zero them out. */ am335x_ehrpwm_cfg_duty(sc, 0, 0); am335x_ehrpwm_cfg_duty(sc, 1, 0); } return (true); } static int am335x_ehrpwm_channel_count(device_t dev, u_int *nchannel) { *nchannel = NUM_CHANNELS; return (0); } static int am335x_ehrpwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty) { struct am335x_ehrpwm_softc *sc; bool status; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); status = am335x_ehrpwm_cfg_period(sc, period); if (status) am335x_ehrpwm_cfg_duty(sc, channel, duty); PWM_UNLOCK(sc); return (status ? 0 : EINVAL); } static int am335x_ehrpwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); *period = sc->sc_period; *duty = sc->sc_channels[channel].duty; return (0); } static int am335x_ehrpwm_channel_set_flags(device_t dev, u_int channel, uint32_t flags) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); if (flags & PWM_POLARITY_INVERTED) { sc->sc_channels[channel].inverted = true; /* Action-Qualifier 15.2.2.5 */ if (channel == 0) EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_CLEAR | AQCTL_CAU_SET)); else EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_CLEAR | AQCTL_CBU_SET)); } else { sc->sc_channels[channel].inverted = false; if (channel == 0) EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_SET | AQCTL_CAU_CLEAR)); else EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_SET | AQCTL_CBU_CLEAR)); } PWM_UNLOCK(sc); return (0); } static int am335x_ehrpwm_channel_get_flags(device_t dev, u_int channel, uint32_t *flags) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_channels[channel].inverted == true) *flags = PWM_POLARITY_INVERTED; else *flags = 0; return (0); } static int am335x_ehrpwm_channel_enable(device_t dev, u_int channel, bool enable) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); am335x_ehrpwm_cfg_enable(sc, channel, enable); PWM_UNLOCK(sc); return (0); } static int am335x_ehrpwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); *enabled = sc->sc_channels[channel].enabled; return (0); } static int am335x_ehrpwm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "AM335x EHRPWM"); return (BUS_PROBE_DEFAULT); } static int am335x_ehrpwm_attach(device_t dev) { struct am335x_ehrpwm_softc *sc; uint16_t reg; sc = device_get_softc(dev); sc->sc_dev = dev; PWM_LOCK_INIT(sc); sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot allocate memory resources\n"); goto fail; } /* CONFIGURE EPWM */ reg = EPWM_READ2(sc, EPWM_TBCTL); reg &= ~(TBCTL_CLKDIV_MASK | TBCTL_HSPCLKDIV_MASK); EPWM_WRITE2(sc, EPWM_TBCTL, reg); EPWM_WRITE2(sc, EPWM_TBPRD, DEFAULT_PWM_PERIOD - 1); EPWM_WRITE2(sc, EPWM_CMPA, 0); EPWM_WRITE2(sc, EPWM_CMPB, 0); /* Action-Qualifier 15.2.2.5 */ EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_SET | AQCTL_CAU_CLEAR)); EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_SET | AQCTL_CBU_CLEAR)); /* Dead band 15.2.2.6 */ reg = EPWM_READ2(sc, EPWM_DBCTL); reg &= ~DBCTL_MASK; reg |= DBCTL_BYPASS; EPWM_WRITE2(sc, EPWM_DBCTL, reg); /* PWM-chopper described in 15.2.2.7 */ /* Acc. TRM used in pulse transformerbased gate drivers * to control the power switching-elements */ reg = EPWM_READ2(sc, EPWM_PCCTL); reg &= ~PCCTL_CHPEN_MASK; reg |= PCCTL_CHPEN_DISABLE; EPWM_WRITE2(sc, EPWM_PCCTL, PCCTL_CHPEN_DISABLE); /* Trip zone are described in 15.2.2.8. * Essential its used to detect faults and can be configured * to react on such faults.. */ /* disable TZn as one-shot / CVC trip source 15.2.4.18 */ EPWM_WRITE2(sc, EPWM_TZSEL, 0x0); /* reg described in 15.2.4.19 */ EPWM_WRITE2(sc, EPWM_TZCTL, 0xf); reg = EPWM_READ2(sc, EPWM_TZFLG); /* START EPWM */ reg &= ~TBCTL_CTRMODE_MASK; reg |= TBCTL_CTRMODE_UP | TBCTL_FREERUN; EPWM_WRITE2(sc, EPWM_TBCTL, reg); if ((sc->sc_busdev = device_add_child(dev, "pwmbus", -1)) == NULL) { device_printf(dev, "Cannot add child pwmbus\n"); // This driver can still do things even without the bus child. } bus_identify_children(dev); bus_attach_children(dev); return (0); fail: PWM_LOCK_DESTROY(sc); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); return(ENXIO); } static int am335x_ehrpwm_detach(device_t dev) { struct am335x_ehrpwm_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(sc->sc_dev)) != 0) return (error); PWM_LOCK(sc); - if (sc->sc_busdev != NULL) - device_delete_child(dev, sc->sc_busdev); - if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); PWM_UNLOCK(sc); PWM_LOCK_DESTROY(sc); return (0); } static phandle_t am335x_ehrpwm_get_node(device_t bus, device_t dev) { /* * Share our controller node with our pwmbus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static device_method_t am335x_ehrpwm_methods[] = { DEVMETHOD(device_probe, am335x_ehrpwm_probe), DEVMETHOD(device_attach, am335x_ehrpwm_attach), DEVMETHOD(device_detach, am335x_ehrpwm_detach), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, am335x_ehrpwm_get_node), /* pwm interface */ DEVMETHOD(pwmbus_channel_count, am335x_ehrpwm_channel_count), DEVMETHOD(pwmbus_channel_config, am335x_ehrpwm_channel_config), DEVMETHOD(pwmbus_channel_get_config, am335x_ehrpwm_channel_get_config), DEVMETHOD(pwmbus_channel_set_flags, am335x_ehrpwm_channel_set_flags), DEVMETHOD(pwmbus_channel_get_flags, am335x_ehrpwm_channel_get_flags), DEVMETHOD(pwmbus_channel_enable, am335x_ehrpwm_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, am335x_ehrpwm_channel_is_enabled), DEVMETHOD_END }; static driver_t am335x_ehrpwm_driver = { "pwm", am335x_ehrpwm_methods, sizeof(struct am335x_ehrpwm_softc), }; DRIVER_MODULE(am335x_ehrpwm, am335x_pwmss, am335x_ehrpwm_driver, 0, 0); MODULE_VERSION(am335x_ehrpwm, 1); MODULE_DEPEND(am335x_ehrpwm, am335x_pwmss, 1, 1, 1); MODULE_DEPEND(am335x_ehrpwm, pwmbus, 1, 1, 1); diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c index 732b5a81857a..674a7ed8575a 100644 --- a/sys/arm/ti/cpsw/if_cpsw.c +++ b/sys/arm/ti/cpsw/if_cpsw.c @@ -1,3016 +1,3014 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Damjan Marion * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * TI Common Platform Ethernet Switch (CPSW) Driver * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. * * This controller is documented in the AM335x Technical Reference * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. * * It is basically a single Ethernet port (port 0) wired internally to * a 3-port store-and-forward switch connected to two independent * "sliver" controllers (port 1 and port 2). You can operate the * controller in a variety of different ways by suitably configuring * the slivers and the Address Lookup Engine (ALE) that routes packets * between the ports. * * This code was developed and tested on a BeagleBone with * an AM335x SoC. */ #include #include "opt_cpsw.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include #include #include #include #include #include #ifdef CPSW_ETHERSWITCH #include #include "etherswitch_if.h" #endif #include "if_cpswreg.h" #include "if_cpswvar.h" #include "miibus_if.h" /* Device probe/attach/detach. */ static int cpsw_probe(device_t); static int cpsw_attach(device_t); static int cpsw_detach(device_t); static int cpswp_probe(device_t); static int cpswp_attach(device_t); static int cpswp_detach(device_t); static phandle_t cpsw_get_node(device_t, device_t); /* Device Init/shutdown. */ static int cpsw_shutdown(device_t); static void cpswp_init(void *); static void cpswp_init_locked(void *); static void cpswp_stop_locked(struct cpswp_softc *); /* Device Suspend/Resume. */ static int cpsw_suspend(device_t); static int cpsw_resume(device_t); /* Ioctl. */ static int cpswp_ioctl(if_t, u_long command, caddr_t data); static int cpswp_miibus_readreg(device_t, int phy, int reg); static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); static void cpswp_miibus_statchg(device_t); /* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); static void cpsw_rx_enqueue(struct cpsw_softc *); static void cpswp_start(if_t); static void cpsw_intr_tx(void *); static void cpswp_tx_enqueue(struct cpswp_softc *); static int cpsw_tx_dequeue(struct cpsw_softc *); /* Misc interrupts and watchdog. */ static void cpsw_intr_rx_thresh(void *); static void cpsw_intr_misc(void *); static void cpswp_tick(void *); static void cpswp_ifmedia_sts(if_t, struct ifmediareq *); static int cpswp_ifmedia_upd(if_t); static void cpsw_tx_watchdog(void *); /* ALE support */ static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); static void cpsw_ale_dump_table(struct cpsw_softc *); static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, int); static int cpswp_ale_update_addresses(struct cpswp_softc *, int); /* Statistics and sysctls. */ static void cpsw_add_sysctls(struct cpsw_softc *); static void cpsw_stats_collect(struct cpsw_softc *); static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); #ifdef CPSW_ETHERSWITCH static etherswitch_info_t *cpsw_getinfo(device_t); static int cpsw_getport(device_t, etherswitch_port_t *); static int cpsw_setport(device_t, etherswitch_port_t *); static int cpsw_getconf(device_t, etherswitch_conf_t *); static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_readreg(device_t, int); static int cpsw_writereg(device_t, int, int); static int cpsw_readphy(device_t, int, int); static int cpsw_writephy(device_t, int, int, int); #endif /* * Arbitrary limit on number of segments in an mbuf to be transmitted. * Packets with more segments than this will be defragmented before * they are queued. */ #define CPSW_TXFRAGS 16 /* Shared resources. */ static device_method_t cpsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpsw_probe), DEVMETHOD(device_attach, cpsw_attach), DEVMETHOD(device_detach, cpsw_detach), DEVMETHOD(device_shutdown, cpsw_shutdown), DEVMETHOD(device_suspend, cpsw_suspend), DEVMETHOD(device_resume, cpsw_resume), /* Bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, cpsw_get_node), #ifdef CPSW_ETHERSWITCH /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), DEVMETHOD(etherswitch_readreg, cpsw_readreg), DEVMETHOD(etherswitch_writereg, cpsw_writereg), DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), DEVMETHOD(etherswitch_getport, cpsw_getport), DEVMETHOD(etherswitch_setport, cpsw_setport), DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), DEVMETHOD(etherswitch_getconf, cpsw_getconf), #endif DEVMETHOD_END }; static driver_t cpsw_driver = { "cpswss", cpsw_methods, sizeof(struct cpsw_softc), }; DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); /* Port/Slave resources. */ static device_method_t cpswp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpswp_probe), DEVMETHOD(device_attach, cpswp_attach), DEVMETHOD(device_detach, cpswp_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), DEVMETHOD_END }; static driver_t cpswp_driver = { "cpsw", cpswp_methods, sizeof(struct cpswp_softc), }; #ifdef CPSW_ETHERSWITCH DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); #endif DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); MODULE_DEPEND(cpsw, miibus, 1, 1, 1); #ifdef CPSW_ETHERSWITCH static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; #endif static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; static struct resource_spec irq_res_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct { void (*cb)(void *); } cpsw_intr_cb[] = { { cpsw_intr_rx_thresh }, { cpsw_intr_rx }, { cpsw_intr_tx }, { cpsw_intr_misc }, }; /* Number of entries here must match size of stats * array in struct cpswp_softc. */ static struct cpsw_stat { int reg; char *oid; } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { {0x00, "GoodRxFrames"}, {0x04, "BroadcastRxFrames"}, {0x08, "MulticastRxFrames"}, {0x0C, "PauseRxFrames"}, {0x10, "RxCrcErrors"}, {0x14, "RxAlignErrors"}, {0x18, "OversizeRxFrames"}, {0x1c, "RxJabbers"}, {0x20, "ShortRxFrames"}, {0x24, "RxFragments"}, {0x30, "RxOctets"}, {0x34, "GoodTxFrames"}, {0x38, "BroadcastTxFrames"}, {0x3c, "MulticastTxFrames"}, {0x40, "PauseTxFrames"}, {0x44, "DeferredTxFrames"}, {0x48, "CollisionsTxFrames"}, {0x4c, "SingleCollisionTxFrames"}, {0x50, "MultipleCollisionTxFrames"}, {0x54, "ExcessiveCollisions"}, {0x58, "LateCollisions"}, {0x5c, "TxUnderrun"}, {0x60, "CarrierSenseErrors"}, {0x64, "TxOctets"}, {0x68, "RxTx64OctetFrames"}, {0x6c, "RxTx65to127OctetFrames"}, {0x70, "RxTx128to255OctetFrames"}, {0x74, "RxTx256to511OctetFrames"}, {0x78, "RxTx512to1024OctetFrames"}, {0x7c, "RxTx1024upOctetFrames"}, {0x80, "NetOctets"}, {0x84, "RxStartOfFrameOverruns"}, {0x88, "RxMiddleOfFrameOverruns"}, {0x8c, "RxDmaOverruns"} }; /* * Basic debug support. */ static void cpsw_debugf_head(const char *funcname) { int t = (int)(time_second % (24 * 60 * 60)); printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); } static void cpsw_debugf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } #define CPSW_DEBUGF(_sc, a) do { \ if ((_sc)->debug) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) /* * Locking macros */ #define CPSW_TX_LOCK(sc) do { \ mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->tx.lock); \ } while (0) #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_PORT_LOCK(_sc) do { \ mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ mtx_lock(&(_sc)->lock); \ } while (0) #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) /* * Read/Write macros */ #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) #define cpsw_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_reg), (_val)) #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) #define cpsw_cpdma_bd_paddr(sc, slot) \ BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) #define cpsw_cpdma_read_bd(sc, slot, val) \ bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd(sc, slot, val) \ bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ bus_write_2(sc->mem_res, slot->bd_offset + 14, val) #define cpsw_cpdma_read_bd_flags(sc, slot) \ bus_read_2(sc->mem_res, slot->bd_offset + 14) #define cpsw_write_hdp_slot(sc, queue, slot) \ cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) #define cpsw_read_cp(sc, queue) \ cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) #define cpsw_write_cp(sc, queue, val) \ cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) #define cpsw_write_cp_slot(sc, queue, slot) \ cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) #if 0 /* XXX temporary function versions for debugging. */ static void cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t reg = queue->hdp_offset; uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); cpsw_write_4(sc, reg, v); } static void cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); cpsw_write_cp(sc, queue, v); } #endif /* * Expanded dump routines for verbose debugging. */ static void cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", "Port0"}; struct cpsw_cpdma_bd bd; const char *sep; int i; cpsw_cpdma_read_bd(sc, slot, &bd); printf("BD Addr : 0x%08x Next : 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); printf(" Flags: "); sep = ""; for (i = 0; i < 16; ++i) { if (bd.flags & (1 << (15 - i))) { printf("%s%s", sep, flags[i]); sep = ","; } } printf("\n"); if (slot->mbuf) { printf(" Ether: %14D\n", (char *)(slot->mbuf->m_data), " "); printf(" Packet: %16D\n", (char *)(slot->mbuf->m_data) + 14, " "); } } #define CPSW_DUMP_SLOT(cs, slot) do { \ IF_DEBUG(sc) { \ cpsw_dump_slot(sc, slot); \ } \ } while (0) static void cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) { struct cpsw_slot *slot; int i = 0; int others = 0; STAILQ_FOREACH(slot, q, next) { if (i > CPSW_TXFRAGS) ++others; else cpsw_dump_slot(sc, slot); ++i; } if (others) printf(" ... and %d more.\n", others); printf("\n"); } #define CPSW_DUMP_QUEUE(sc, q) do { \ IF_DEBUG(sc) { \ cpsw_dump_queue(sc, q); \ } \ } while (0) static void cpsw_init_slots(struct cpsw_softc *sc) { struct cpsw_slot *slot; int i; STAILQ_INIT(&sc->avail); /* Put the slot descriptors onto the global avail list. */ for (i = 0; i < nitems(sc->_slots); i++) { slot = &sc->_slots[i]; slot->bd_offset = cpsw_cpdma_bd_offset(i); STAILQ_INSERT_TAIL(&sc->avail, slot, next); } } static int cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) { const int max_slots = nitems(sc->_slots); struct cpsw_slot *slot; int i; if (requested < 0) requested = max_slots; for (i = 0; i < requested; ++i) { slot = STAILQ_FIRST(&sc->avail); if (slot == NULL) return (0); if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { device_printf(sc->dev, "failed to create dmamap\n"); return (ENOMEM); } STAILQ_REMOVE_HEAD(&sc->avail, next); STAILQ_INSERT_TAIL(&queue->avail, slot, next); ++queue->avail_queue_len; ++queue->queue_slots; } return (0); } static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { int error __diagused; if (slot->dmamap) { if (slot->mbuf) bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); KASSERT(error == 0, ("Mapping still active")); slot->dmamap = NULL; } if (slot->mbuf) { m_freem(slot->mbuf); slot->mbuf = NULL; } } static void cpsw_reset(struct cpsw_softc *sc) { int i; callout_stop(&sc->watchdog.callout); /* Reset RMII/RGMII wrapper. */ cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) ; /* Disable TX and RX interrupts for all cores. */ for (i = 0; i < 3; ++i) { cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); } /* Reset CPSW subsystem. */ cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) ; /* Reset Sliver port 1 and 2 */ for (i = 0; i < 2; i++) { /* Reset */ cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) ; } /* Reset DMA controller. */ cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) ; /* Disable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); /* Clear all queues. */ for (i = 0; i < 8; i++) { cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); } /* Clear all interrupt Masks */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } static void cpsw_init(struct cpsw_softc *sc) { struct cpsw_slot *slot; uint32_t reg; /* Disable the interrupt pacing. */ reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); /* Clear ALE */ cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); /* Enable ALE */ reg = CPSW_ALE_CTL_ENABLE; if (sc->dualemac) reg |= CPSW_ALE_CTL_VLAN_AWARE; cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); /* Set Host Port Mapping. */ cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); /* Initialize ALE: set host port to forwarding(3). */ cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpsw_write_4(sc, CPSW_SS_PTYPE, 0); /* Enable statistics for ports 0, 1 and 2 */ cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); /* Turn off flow control. */ cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); /* Make IP hdr aligned with 4 */ cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); /* Initialize RX Buffer Descriptors */ cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); /* Enable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); /* Enable Interrupts for core 0 */ cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); /* Enable host Error Interrupt */ cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); /* Enable interrupts for RX and TX on Channel 0 */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); /* Select MII in GMII_SEL, Internal Delay mode */ //ti_scm_reg_write_4(0x650, 0); /* Initialize active queues. */ slot = STAILQ_FIRST(&sc->tx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->tx, slot); slot = STAILQ_FIRST(&sc->rx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->rx, slot); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); /* Activate network interface. */ sc->rx.running = 1; sc->tx.running = 1; sc->watchdog.timer = 0; callout_init(&sc->watchdog.callout, 0); callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * Device Probe, Attach, Detach. * */ static int cpsw_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,cpsw")) return (ENXIO); device_set_desc(dev, "3-port Switch Ethernet Subsystem"); return (BUS_PROBE_DEFAULT); } static int cpsw_intr_attach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (bus_setup_intr(sc->dev, sc->irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { return (-1); } } return (0); } static void cpsw_intr_detach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (sc->ih_cookie[i]) { bus_teardown_intr(sc->dev, sc->irq_res[i], sc->ih_cookie[i]); } } } static int cpsw_get_fdt_data(struct cpsw_softc *sc, int port) { char *name; int len, phy, vlan; pcell_t phy_id[3], vlan_id; phandle_t child; unsigned long mdio_child_addr; /* Find any slave with phy-handle/phy_id */ phy = -1; vlan = -1; for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { if (OF_getprop_alloc(child, "name", (void **)&name) < 0) continue; if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { OF_prop_free(name); continue; } OF_prop_free(name); if (mdio_child_addr != slave_mdio_addr[port] && mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) continue; if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ /* Users with old DTB will have phy_id instead */ phy = -1; len = OF_getproplen(child, "phy_id"); if (len / sizeof(pcell_t) == 2) { /* Get phy address from fdt */ if (OF_getencprop(child, "phy_id", phy_id, len) > 0) phy = phy_id[1]; } } len = OF_getproplen(child, "dual_emac_res_vlan"); if (len / sizeof(pcell_t) == 1) { /* Get phy address from fdt */ if (OF_getencprop(child, "dual_emac_res_vlan", &vlan_id, len) > 0) { vlan = vlan_id; } } break; } if (phy == -1) return (ENXIO); sc->port[port].phy = phy; sc->port[port].vlan = vlan; return (0); } static int cpsw_attach(device_t dev) { int error, i; struct cpsw_softc *sc; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); getbinuptime(&sc->attach_uptime); if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, sizeof(sc->active_slave)) <= 0) { sc->active_slave = 0; } if (sc->active_slave > 1) sc->active_slave = 1; if (OF_hasprop(sc->node, "dual_emac")) sc->dualemac = 1; for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; if (cpsw_get_fdt_data(sc, i) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } } /* Initialize mutexes */ mtx_init(&sc->tx.lock, device_get_nameunit(dev), "cpsw TX lock", MTX_DEF); mtx_init(&sc->rx.lock, device_get_nameunit(dev), "cpsw RX lock", MTX_DEF); /* Allocate IRQ resources */ error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); if (error) { device_printf(dev, "could not allocate IRQ resources\n"); cpsw_detach(dev); return (ENXIO); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(sc->dev, "failed to allocate memory resource\n"); cpsw_detach(dev); return (ENXIO); } reg = cpsw_read_4(sc, CPSW_SS_IDVER); device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); return (error); } /* Allocate a NULL buffer for padding. */ sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); cpsw_init_slots(sc); /* Allocate slots to TX and RX queues. */ STAILQ_INIT(&sc->rx.avail); STAILQ_INIT(&sc->rx.active); STAILQ_INIT(&sc->tx.avail); STAILQ_INIT(&sc->tx.active); // For now: 128 slots to TX, rest to RX. // XXX TODO: start with 32/64 and grow dynamically based on demand. if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { device_printf(dev, "failed to allocate dmamaps\n"); cpsw_detach(dev); return (ENOMEM); } device_printf(dev, "Initial queue size TX=%d RX=%d\n", sc->tx.queue_slots, sc->rx.queue_slots); sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); if (cpsw_intr_attach(sc) == -1) { device_printf(dev, "failed to setup interrupts\n"); cpsw_detach(dev); return (ENXIO); } #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) cpsw_vgroups[i].vid = -1; #endif /* Reset the controller. */ cpsw_reset(sc); cpsw_init(sc); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; sc->port[i].dev = device_add_child(dev, "cpsw", i); if (sc->port[i].dev == NULL) { cpsw_detach(dev); return (ENXIO); } } bus_identify_children(dev); bus_attach_children(dev); return (0); } static int cpsw_detach(device_t dev) { struct cpsw_softc *sc; int error, i; - bus_generic_detach(dev); - sc = device_get_softc(dev); + error = bus_generic_detach(dev); + if (error != 0) + return (error); - for (i = 0; i < CPSW_PORTS; i++) { - if (sc->port[i].dev) - device_delete_child(dev, sc->port[i].dev); - } + sc = device_get_softc(dev); if (device_is_attached(dev)) { callout_stop(&sc->watchdog.callout); callout_drain(&sc->watchdog.callout); } /* Stop and release all interrupts */ cpsw_intr_detach(sc); /* Free dmamaps and mbufs */ for (i = 0; i < nitems(sc->_slots); ++i) cpsw_free_slot(sc, &sc->_slots[i]); /* Free null padding buffer. */ if (sc->nullpad) free(sc->nullpad, M_DEVBUF); /* Free DMA tag */ if (sc->mbuf_dtag) { error = bus_dma_tag_destroy(sc->mbuf_dtag); KASSERT(error == 0, ("Unable to destroy DMA tag")); } /* Free IO memory handler */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resources(dev, irq_res_spec, sc->irq_res); /* Destroy mutexes */ mtx_destroy(&sc->rx.lock); mtx_destroy(&sc->tx.lock); /* Detach the switch device, if present. */ error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } static phandle_t cpsw_get_node(device_t bus, device_t dev) { /* Share controller node with port device. */ return (ofw_bus_get_node(bus)); } static int cpswp_probe(device_t dev) { if (device_get_unit(dev) > 1) { device_printf(dev, "Only two ports are supported.\n"); return (ENXIO); } device_set_desc(dev, "Ethernet Switch Port"); return (BUS_PROBE_DEFAULT); } static int cpswp_attach(device_t dev) { int error; if_t ifp; struct cpswp_softc *sc; uint32_t reg; uint8_t mac_addr[ETHER_ADDR_LEN]; phandle_t opp_table; struct syscon *syscon; sc = device_get_softc(dev); sc->dev = dev; sc->pdev = device_get_parent(dev); sc->swsc = device_get_softc(sc->pdev); sc->unit = device_get_unit(dev); sc->phy = sc->swsc->port[sc->unit].phy; sc->vlan = sc->swsc->port[sc->unit].vlan; if (sc->swsc->dualemac && sc->vlan == -1) sc->vlan = sc->unit + 1; if (sc->unit == 0) { sc->physel = MDIOUSERPHYSEL0; sc->phyaccess = MDIOUSERACCESS0; } else { sc->physel = MDIOUSERPHYSEL1; sc->phyaccess = MDIOUSERACCESS1; } mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", MTX_DEF); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if_initname(ifp, device_get_name(sc->dev), sc->unit); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setinitfn(ifp, cpswp_init); if_setstartfn(ifp, cpswp_start); if_setioctlfn(ifp, cpswp_ioctl); if_setsendqlen(ifp, sc->swsc->tx.queue_slots); if_setsendqready(ifp); /* FIXME: For now; Go and kidnap syscon from opp-table */ /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ opp_table = OF_finddevice("/opp-table"); if (opp_table == -1) { device_printf(dev, "Cant find /opp-table\n"); cpswp_detach(dev); return (ENXIO); } if (!OF_hasprop(opp_table, "syscon")) { device_printf(dev, "/opp-table doesnt have required syscon property\n"); cpswp_detach(dev); return (ENXIO); } if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { device_printf(dev, "Failed to get syscon\n"); cpswp_detach(dev); return (ENXIO); } /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); mac_addr[0] = reg & 0xFF; mac_addr[1] = (reg >> 8) & 0xFF; mac_addr[2] = (reg >> 16) & 0xFF; mac_addr[3] = (reg >> 24) & 0xFF; /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); mac_addr[4] = reg & 0xFF; mac_addr[5] = (reg >> 8) & 0xFF; error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); cpswp_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Select PHY and enable interrupts */ cpsw_write_4(sc->swsc, sc->physel, MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); ether_ifattach(sc->ifp, mac_addr); callout_init(&sc->mii_callout, 0); return (0); } static int cpswp_detach(device_t dev) { struct cpswp_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_PORT_LOCK(sc); cpswp_stop_locked(sc); CPSW_PORT_UNLOCK(sc); callout_drain(&sc->mii_callout); } bus_generic_detach(dev); if_free(sc->ifp); mtx_destroy(&sc->lock); return (0); } /* * * Init/Shutdown. * */ static int cpsw_ports_down(struct cpsw_softc *sc) { struct cpswp_softc *psc; if_t ifp1, ifp2; if (!sc->dualemac) return (1); psc = device_get_softc(sc->port[0].dev); ifp1 = psc->ifp; psc = device_get_softc(sc->port[1].dev); ifp2 = psc->ifp; if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0) return (1); return (0); } static void cpswp_init(void *arg) { struct cpswp_softc *sc = arg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); cpswp_init_locked(arg); CPSW_PORT_UNLOCK(sc); } static void cpswp_init_locked(void *arg) { #ifdef CPSW_ETHERSWITCH int i; #endif struct cpswp_softc *sc = arg; if_t ifp; uint32_t reg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); ifp = sc->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; getbinuptime(&sc->init_uptime); if (!sc->swsc->rx.running && !sc->swsc->tx.running) { /* Reset the controller. */ cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } /* Set Slave Mapping. */ cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 0x33221100); cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); /* Enable MAC RX/TX modules. */ /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg |= CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); /* Initialize ALE: set port to forwarding, initialize addrs */ cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpswp_ale_update_addresses(sc, 1); if (sc->swsc->dualemac) { /* Set Port VID. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), sc->vlan & 0xfff); cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) { if (cpsw_vgroups[i].vid != -1) continue; cpsw_vgroups[i].vid = sc->vlan; break; } #endif } mii_mediachg(sc->mii); callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } static int cpsw_shutdown(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static void cpsw_rx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_RX_LOCK(sc); CPSW_DEBUGF(sc, ("starting RX teardown")); sc->rx.teardown = 1; cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); CPSW_RX_UNLOCK(sc); while (sc->rx.running) { if (++i > 10) { device_printf(sc->dev, "Unable to cleanly shutdown receiver\n"); return; } DELAY(200); } if (!sc->rx.running) CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); } static void cpsw_tx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_TX_LOCK(sc); CPSW_DEBUGF(sc, ("starting TX teardown")); /* Start the TX queue teardown if queue is not empty. */ if (STAILQ_FIRST(&sc->tx.active) != NULL) cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); else sc->tx.teardown = 1; cpsw_tx_dequeue(sc); while (sc->tx.running && ++i < 10) { DELAY(200); cpsw_tx_dequeue(sc); } if (sc->tx.running) { device_printf(sc->dev, "Unable to cleanly shutdown transmitter\n"); } CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", i, sc->tx.active_queue_len)); CPSW_TX_UNLOCK(sc); } static void cpswp_stop_locked(struct cpswp_softc *sc) { if_t ifp; uint32_t reg; ifp = sc->ifp; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); /* Stop ticker */ callout_stop(&sc->mii_callout); /* Tear down the RX/TX queues. */ if (cpsw_ports_down(sc->swsc)) { cpsw_rx_teardown(sc->swsc); cpsw_tx_teardown(sc->swsc); } /* Stop MAC RX/TX modules. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg &= ~CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); if (cpsw_ports_down(sc->swsc)) { /* Capture stats before we reset controller. */ cpsw_stats_collect(sc->swsc); cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } } /* * Suspend/Resume. */ static int cpsw_suspend(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static int cpsw_resume(device_t dev) { struct cpsw_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); return (0); } /* * * IOCTL * */ static void cpsw_set_promisc(struct cpswp_softc *sc, int set) { uint32_t reg; /* * Enabling promiscuous mode requires ALE_BYPASS to be enabled. * That disables the ALE forwarding logic and causes every * packet to be sent only to the host port. In bypass mode, * the ALE processes host port transmit packets the same as in * normal mode. */ reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); reg &= ~CPSW_ALE_CTL_BYPASS; if (set) reg |= CPSW_ALE_CTL_BYPASS; cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); } static void cpsw_set_allmulti(struct cpswp_softc *sc, int set) { if (set) { printf("All-multicast mode unimplemented\n"); } } static int cpswp_ioctl(if_t ifp, u_long command, caddr_t data) { struct cpswp_softc *sc; struct ifreq *ifr; int error; uint32_t changed; error = 0; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (command) { case SIOCSIFCAP: changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); else if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); } error = 0; break; case SIOCSIFFLAGS: CPSW_PORT_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { changed = if_getflags(ifp) ^ sc->if_flags; CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); if (changed & IFF_PROMISC) cpsw_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC); if (changed & IFF_ALLMULTI) cpsw_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI); } else { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: starting up")); cpswp_init_locked(sc); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); cpswp_stop_locked(sc); } sc->if_flags = if_getflags(ifp); CPSW_PORT_UNLOCK(sc); break; case SIOCADDMULTI: cpswp_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: /* Ugh. DELMULTI doesn't provide the specific address being removed, so the best we can do is remove everything and rebuild it all. */ cpswp_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * * MIIBUS * */ static int cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) { uint32_t r, retries = CPSW_MIIBUS_RETRIES; while (--retries) { r = cpsw_read_4(sc, reg); if ((r & MDIO_PHYACCESS_GO) == 0) return (1); DELAY(CPSW_MIIBUS_DELAY); } return (0); } static int cpswp_miibus_readreg(device_t dev, int phy, int reg) { struct cpswp_softc *sc; uint32_t cmd, r; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to read\n"); return (0); } /* Set GO, reg, phy */ cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during read\n"); return (0); } r = cpsw_read_4(sc->swsc, sc->phyaccess); if ((r & MDIO_PHYACCESS_ACK) == 0) { device_printf(dev, "Failed to read from PHY.\n"); r = 0; } return (r & 0xFFFF); } static int cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct cpswp_softc *sc; uint32_t cmd; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to write\n"); return (0); } /* Set GO, WRITE, reg, phy, and value */ cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during write\n"); return (0); } return (0); } static void cpswp_miibus_statchg(device_t dev) { struct cpswp_softc *sc; uint32_t mac_control, reg; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); reg = CPSW_SL_MACCONTROL(sc->unit); mac_control = cpsw_read_4(sc->swsc, reg); mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: mac_control |= CPSW_SL_MACTL_GIG; break; case IFM_100_TX: mac_control |= CPSW_SL_MACTL_IFCTL_A; break; } if (sc->mii->mii_media_active & IFM_FDX) mac_control |= CPSW_SL_MACTL_FULLDUPLEX; cpsw_write_4(sc->swsc, reg, mac_control); } /* * * Transmit/Receive Packets. * */ static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); if (sc->rx.teardown) { sc->rx.running = 0; sc->rx.teardown = 0; cpsw_write_cp(sc, &sc->rx, 0xfffffffc); } received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static struct mbuf * cpsw_rx_dequeue(struct cpsw_softc *sc) { int nsegs, port, removed; struct cpsw_cpdma_bd bd; struct cpsw_slot *last, *slot; struct cpswp_softc *psc; struct mbuf *m, *m0, *mb_head, *mb_tail; uint16_t m0_flags; nsegs = 0; m0 = NULL; last = NULL; mb_head = NULL; mb_tail = NULL; removed = 0; /* Pull completed packets off hardware RX queue. */ while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { cpsw_cpdma_read_bd(sc, slot, &bd); /* * Stop on packets still in use by hardware, but do not stop * on packets with the teardown complete flag, they will be * discarded later. */ if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == CPDMA_BD_OWNER) break; last = slot; ++removed; STAILQ_REMOVE_HEAD(&sc->rx.active, next); STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m = slot->mbuf; slot->mbuf = NULL; if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(sc, ("RX teardown is complete")); m_freem(m); sc->rx.running = 0; sc->rx.teardown = 0; break; } port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; KASSERT(port >= 0 && port <= 1, ("patcket received with invalid port: %d", port)); psc = device_get_softc(sc->port[port].dev); /* Set up mbuf */ m->m_data += bd.bufoff; m->m_len = bd.buflen; if (bd.flags & CPDMA_BD_SOP) { m->m_pkthdr.len = bd.pktlen; m->m_pkthdr.rcvif = psc->ifp; m->m_flags |= M_PKTHDR; m0_flags = bd.flags; m0 = m; } nsegs++; m->m_next = NULL; m->m_nextpkt = NULL; if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { if (m0_flags & CPDMA_BD_PASS_CRC) m_adj(m0, -ETHER_CRC_LEN); m0_flags = 0; m0 = NULL; if (nsegs > sc->rx.longest_chain) sc->rx.longest_chain = nsegs; nsegs = 0; } if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ if ((bd.flags & (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == CPDMA_BD_SOP) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } } if (STAILQ_FIRST(&sc->rx.active) != NULL && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->rx, STAILQ_FIRST(&sc->rx.active)); sc->rx.queue_restart++; } /* Add mbuf to packet list to be returned. */ if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { mb_tail->m_nextpkt = m; } else if (mb_tail != NULL) { mb_tail->m_next = m; } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { if (bootverbose) printf( "%s: %s: discanding fragment packet w/o header\n", __func__, if_name(psc->ifp)); m_freem(m); continue; } else { mb_head = m; } mb_tail = m; } if (removed != 0) { cpsw_write_cp_slot(sc, &sc->rx, last); sc->rx.queue_removes += removed; sc->rx.avail_queue_len += removed; sc->rx.active_queue_len -= removed; if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); } return (mb_head); } static void cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; int error, nsegs, added = 0; /* Register new mbufs with hardware. */ first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (first_new_slot == NULL) first_new_slot = slot; if (slot->mbuf == NULL) { slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (slot->mbuf == NULL) { device_printf(sc->dev, "Unable to fill RX queue\n"); break; } slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; } error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); if (error != 0 || nsegs != 1) { device_printf(sc->dev, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); /* Create and submit new rx descriptor. */ if ((next = STAILQ_NEXT(slot, next)) != NULL) bd.next = cpsw_cpdma_bd_paddr(sc, next); else bd.next = 0; bd.bufptr = seg->ds_addr; bd.bufoff = 0; bd.buflen = MCLBYTES - 1; bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; cpsw_cpdma_write_bd(sc, slot, &bd); ++added; STAILQ_REMOVE_HEAD(&sc->rx.avail, next); STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); } if (added == 0 || first_new_slot == NULL) return; CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); /* Link new entries to hardware RX queue. */ if (last_old_slot == NULL) { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); } sc->rx.queue_adds += added; sc->rx.avail_queue_len -= added; sc->rx.active_queue_len += added; cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) sc->rx.max_active_queue_len = sc->rx.active_queue_len; } static void cpswp_start(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || sc->swsc->tx.running == 0) { return; } CPSW_TX_LOCK(sc->swsc); cpswp_tx_enqueue(sc); cpsw_tx_dequeue(sc->swsc); CPSW_TX_UNLOCK(sc->swsc); } static void cpsw_intr_tx(void *arg) { struct cpsw_softc *sc; sc = (struct cpsw_softc *)arg; CPSW_TX_LOCK(sc); if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) cpsw_write_cp(sc, &sc->tx, 0xfffffffc); cpsw_tx_dequeue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); CPSW_TX_UNLOCK(sc); } static void cpswp_tx_enqueue(struct cpswp_softc *sc) { bus_dma_segment_t segs[CPSW_TXFRAGS]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; struct mbuf *m0; int error, nsegs, seg, added = 0, padlen; /* Pull pending packets from IF queue and prep them for DMA. */ last = NULL; first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { m0 = if_dequeue(sc->ifp); if (m0 == NULL) break; slot->mbuf = m0; padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; if (padlen < 0) padlen = 0; else if (padlen > 0) m_append(slot->mbuf, padlen, sc->swsc->nullpad); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); /* If the packet is too fragmented, try to simplify. */ if (error == EFBIG || (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m0 = m_defrag(slot->mbuf, M_NOWAIT); if (m0 == NULL) { device_printf(sc->dev, "Can't defragment packet; dropping\n"); m_freem(slot->mbuf); } else { CPSW_DEBUGF(sc->swsc, ("Requeueing defragmented packet")); if_sendq_prepend(sc->ifp, m0); } slot->mbuf = NULL; continue; } if (error != 0) { device_printf(sc->dev, "%s: Can't setup DMA (error=%d), dropping packet\n", __func__, error); bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREWRITE); CPSW_DEBUGF(sc->swsc, ("Queueing TX packet: %d segments + %d pad bytes", nsegs, padlen)); if (first_new_slot == NULL) first_new_slot = slot; /* Link from the previous descriptor. */ if (last != NULL) cpsw_cpdma_write_bd_next(sc->swsc, last, slot); slot->ifp = sc->ifp; /* If there is only one segment, the for() loop * gets skipped and the single buffer gets set up * as both SOP and EOP. */ if (nsegs > 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; /* Start by setting up the first buffer. */ bd.bufptr = segs[0].ds_addr; bd.bufoff = 0; bd.buflen = segs[0].ds_len; bd.pktlen = m_length(slot->mbuf, NULL); bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; if (sc->swsc->dualemac) { bd.flags |= CPDMA_BD_TO_PORT; bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); } for (seg = 1; seg < nsegs; ++seg) { /* Save the previous buffer (which isn't EOP) */ cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); slot = STAILQ_FIRST(&sc->swsc->tx.avail); /* Setup next buffer (which isn't SOP) */ if (nsegs > seg + 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; bd.bufptr = segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = segs[seg].ds_len; bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER; } /* Save the final buffer. */ bd.flags |= CPDMA_BD_EOP; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); last = slot; added += nsegs; if (nsegs > sc->swsc->tx.longest_chain) sc->swsc->tx.longest_chain = nsegs; BPF_MTAP(sc->ifp, m0); } if (first_new_slot == NULL) return; /* Attach the list of new buffers to the hardware TX queue. */ if (last_old_slot != NULL && (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & CPDMA_BD_EOQ) == 0) { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, first_new_slot); } else { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } sc->swsc->tx.queue_adds += added; sc->swsc->tx.avail_queue_len -= added; sc->swsc->tx.active_queue_len += added; if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; } CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); } static int cpsw_tx_dequeue(struct cpsw_softc *sc) { struct cpsw_slot *slot, *last_removed_slot = NULL; struct cpsw_cpdma_bd bd; uint32_t flags, removed = 0; /* Pull completed buffers off the hardware TX queue. */ slot = STAILQ_FIRST(&sc->tx.active); while (slot != NULL) { flags = cpsw_cpdma_read_bd_flags(sc, slot); /* TearDown complete is only marked on the SOP for the packet. */ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { sc->tx.teardown = 1; } if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) break; /* Hardware is still using this packet. */ bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; if (slot->ifp) { if (sc->tx.teardown == 0) if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); } /* Dequeue any additional buffers used by this packet. */ while (slot != NULL && slot->mbuf == NULL) { STAILQ_REMOVE_HEAD(&sc->tx.active, next); STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); ++removed; last_removed_slot = slot; slot = STAILQ_FIRST(&sc->tx.active); } cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); /* Restart the TX queue if necessary. */ cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); if (slot != NULL && bd.next != 0 && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->tx, slot); sc->tx.queue_restart++; break; } } if (removed != 0) { sc->tx.queue_removes += removed; sc->tx.active_queue_len -= removed; sc->tx.avail_queue_len += removed; if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); } if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { CPSW_DEBUGF(sc, ("TX teardown is complete")); sc->tx.teardown = 0; sc->tx.running = 0; } return (removed); } /* * * Miscellaneous interrupts. * */ static void cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static void cpsw_intr_misc_host_error(struct cpsw_softc *sc) { uint32_t intstat; uint32_t dmastat; int txerr, rxerr, txchan, rxchan; printf("\n\n"); device_printf(sc->dev, "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); printf("\n\n"); intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); txerr = (dmastat >> 20) & 15; txchan = (dmastat >> 16) & 7; rxerr = (dmastat >> 12) & 15; rxchan = (dmastat >> 8) & 7; switch (txerr) { case 0: break; case 1: printf("SOP error on TX channel %d\n", txchan); break; case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); break; case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); break; case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); break; case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); break; case 6: printf("Packet length error on TX channel %d\n", txchan); break; default: printf("Unknown error on TX channel %d\n", txchan); break; } if (txerr != 0) { printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); cpsw_dump_queue(sc, &sc->tx.active); } switch (rxerr) { case 0: break; case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); break; case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); break; case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); break; case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); break; default: printf("Unknown RX error on RX channel %d\n", rxchan); break; } if (rxerr != 0) { printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); printf("CPSW_CPDMA_RX%d_CP=0x%x\n", rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); cpsw_dump_queue(sc, &sc->rx.active); } printf("\nALE Table\n"); cpsw_ale_dump_table(sc); // XXX do something useful here?? panic("CPSW HOST ERROR INTERRUPT"); // Suppress this interrupt in the future. cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); // The watchdog will probably reset the controller // in a little while. It will probably fail again. } static void cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); if (stat & CPSW_WR_C_MISC_EVNT_PEND) CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); if (stat & CPSW_WR_C_MISC_STAT_PEND) cpsw_stats_collect(sc); if (stat & CPSW_WR_C_MISC_HOST_PEND) cpsw_intr_misc_host_error(sc); if (stat & CPSW_WR_C_MISC_MDIOLINK) { cpsw_write_4(sc, MDIOLINKINTMASKED, cpsw_read_4(sc, MDIOLINKINTMASKED)); } if (stat & CPSW_WR_C_MISC_MDIOUSER) { CPSW_DEBUGF(sc, ("MDIO operation completed interrupt unimplemented")); } cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } /* * * Periodic Checks and Watchdog. * */ static void cpswp_tick(void *msc) { struct cpswp_softc *sc = msc; /* Check for media type change */ mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); cpswp_ifmedia_upd(sc->ifp); } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); } static void cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct cpswp_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CPSW_PORT_UNLOCK(sc); } static int cpswp_ifmedia_upd(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; CPSW_PORT_UNLOCK(sc); return (0); } static void cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) { struct cpswp_softc *psc; int i; cpsw_debugf_head("CPSW watchdog"); device_printf(sc->dev, "watchdog timeout\n"); printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); cpsw_dump_queue(sc, &sc->tx.active); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } } static void cpsw_tx_watchdog(void *msc) { struct cpsw_softc *sc; sc = msc; CPSW_TX_LOCK(sc); if (sc->tx.active_queue_len == 0 || !sc->tx.running) { sc->watchdog.timer = 0; /* Nothing to do. */ } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ } else if (cpsw_tx_dequeue(sc) > 0) { sc->watchdog.timer = 0; /* We just did something. */ } else { /* There was something to do but it didn't get done. */ ++sc->watchdog.timer; if (sc->watchdog.timer > 5) { sc->watchdog.timer = 0; ++sc->watchdog.resets; cpsw_tx_watchdog_full_reset(sc); } } sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; CPSW_TX_UNLOCK(sc); /* Schedule another timeout one second from now */ callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * ALE support routines. * */ static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static void cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; /* First four entries are link address and broadcast. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); } } } static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, uint8_t *mac) { int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3], ale_type; /* Find a matching entry or a free entry. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && (((ale_entry[0] >>24) & 0xFF) == mac[2]) && (((ale_entry[0] >>16) & 0xFF) == mac[3]) && (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (ENOMEM); i = free_index; } if (vlan != -1) ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; else ale_type = ALE_TYPE_ADDR << 28; /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ ale_entry[1] |= ALE_MCAST_FWD | ale_type; /* Set portmask [68:66] */ ale_entry[2] = (portmap & 7) << 2; cpsw_ale_write_entry(sc, i, ale_entry); return 0; } static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); switch (ALE_TYPE(ale_entry)) { case ALE_TYPE_VLAN: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); printf("\n"); break; case ALE_TYPE_ADDR: case ALE_TYPE_VLAN_ADDR: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", (ale_entry[1] >> 8) & 0xFF, (ale_entry[1] >> 0) & 0xFF, (ale_entry[0] >>24) & 0xFF, (ale_entry[0] >>16) & 0xFF, (ale_entry[0] >> 8) & 0xFF, (ale_entry[0] >> 0) & 0xFF); printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("port: %u ", ALE_PORTS(ale_entry)); printf("\n"); break; } } printf("\n"); } static u_int cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct cpswp_softc *sc = arg; uint32_t portmask; if (sc->swsc->dualemac) portmask = 1 << (sc->unit + 1) | 1 << 0; else portmask = 7; cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); return (1); } static int cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) { uint8_t *mac; uint32_t ale_entry[3], ale_type, portmask; if (sc->swsc->dualemac) { ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; portmask = 1 << (sc->unit + 1) | 1 << 0; } else { ale_type = ALE_TYPE_ADDR << 28; portmask = 7; } /* * Route incoming packets for our MAC address to Port 0 (host). * For simplicity, keep this entry at table index 0 for port 1 and * at index 2 for port 2 in the ALE. */ mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr); ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ ale_entry[2] = 0; /* port = 0 */ cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); /* Set outgoing MAC Address for slave port. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), mac[5] << 8 | mac[4]); /* Keep the broadcast address at table entry 1 (or 3). */ ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; ale_entry[2] = portmask << 2; cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); /* SIOCDELMULTI doesn't specify the particular address being removed, so we have to remove all and rebuild. */ if (purge) cpsw_ale_remove_all_mc_entries(sc->swsc); /* Set other multicast addrs desired. */ if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); return (0); } static int cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, int untag, int mcregflood, int mcunregflood) { int free_index, i, matching_index; uint32_t ale_entry[3]; free_index = matching_index = -1; /* Find a matching entry or a free entry. */ for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if (ALE_VLAN(ale_entry) == vlan) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (-1); i = free_index; } ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | (mcunregflood & 7) << 8 | (ports & 7); ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); return (0); } /* * * Statistics and Sysctls. * */ #if 0 static void cpsw_stats_dump(struct cpsw_softc *sc) { int i; uint32_t r; for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, (intmax_t)sc->shadow_stats[i], r, (intmax_t)sc->shadow_stats[i] + r)); } } #endif static void cpsw_stats_collect(struct cpsw_softc *sc) { int i; uint32_t r; CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); sc->shadow_stats[i] += r; cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); } } static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct cpsw_stat *stat; uint64_t result; sc = (struct cpsw_softc *)arg1; stat = &cpsw_stat_sysctls[oidp->oid_number]; result = sc->shadow_stats[oidp->oid_number]; result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); return (sysctl_handle_64(oidp, &result, 0, req)); } static int cpsw_stat_attached(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct bintime t; unsigned result; sc = (struct cpsw_softc *)arg1; getbinuptime(&t); bintime_sub(&t, &sc->attach_uptime); result = t.sec; return (sysctl_handle_int(oidp, &result, 0, req)); } static int cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) { int error; struct cpsw_softc *sc; uint32_t ctrl, intr_per_ms; sc = (struct cpsw_softc *)arg1; error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); if (error != 0 || req->newptr == NULL) return (error); ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); if (sc->coal_us == 0) { /* Disable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); return (0); } if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) sc->coal_us = CPSW_WR_C_IMAX_US_MAX; if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) sc->coal_us = CPSW_WR_C_IMAX_US_MIN; intr_per_ms = 1000 / sc->coal_us; /* Just to make sure... */ if (intr_per_ms > CPSW_WR_C_IMAX_MAX) intr_per_ms = CPSW_WR_C_IMAX_MAX; if (intr_per_ms < CPSW_WR_C_IMAX_MIN) intr_per_ms = CPSW_WR_C_IMAX_MIN; /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; /* Enable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); return (0); } static int cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *swsc; struct cpswp_softc *sc; struct bintime t; unsigned result; swsc = arg1; sc = device_get_softc(swsc->port[arg2].dev); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { getbinuptime(&t); bintime_sub(&t, &sc->init_uptime); result = t.sec; } else result = 0; return (sysctl_handle_int(oidp, &result, 0, req)); } static void cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", CTLFLAG_RD, &queue->queue_slots, 0, "Total buffers currently assigned to this queue"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", CTLFLAG_RD, &queue->active_queue_len, 0, "Buffers currently registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", CTLFLAG_RD, &queue->max_active_queue_len, 0, "Max value of activeBuffers since last driver reset"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", CTLFLAG_RD, &queue->avail_queue_len, 0, "Buffers allocated to this queue but not currently " "registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", CTLFLAG_RD, &queue->max_avail_queue_len, 0, "Max value of availBuffers since last driver reset"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", CTLFLAG_RD, &queue->queue_adds, 0, "Total buffers added to queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", CTLFLAG_RD, &queue->queue_removes, 0, "Total buffers removed from queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", CTLFLAG_RD, &queue->queue_restart, 0, "Total times the queue has been restarted"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", CTLFLAG_RD, &queue->longest_chain, 0, "Max buffers used for a single packet"); } static void cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", CTLFLAG_RD, &sc->watchdog.resets, 0, "Total number of watchdog resets"); } static void cpsw_add_sysctls(struct cpsw_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *stats_node, *queue_node, *node; struct sysctl_oid_list *parent, *stats_parent, *queue_parent; struct sysctl_oid_list *ports_parent, *port_parent; char port[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stat_attached, "IU", "Time since driver attach"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, cpsw_intr_coalesce, "IU", "minimum time between interrupts"); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); ports_parent = SYSCTL_CHILDREN(node); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; port[0] = '0' + i; port[1] = '\0'; node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Port Statistics"); port_parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, cpsw_stat_uptime, "IU", "Seconds since driver init"); } stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); stats_parent = SYSCTL_CHILDREN(stats_node); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { SYSCTL_ADD_PROC(ctx, stats_parent, i, cpsw_stat_sysctls[i].oid, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stats_sysctl, "IU", cpsw_stat_sysctls[i].oid); } queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); queue_parent = SYSCTL_CHILDREN(queue_node); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->tx); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->rx); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); cpsw_add_watchdog_sysctls(ctx, node, sc); } #ifdef CPSW_ETHERSWITCH static etherswitch_info_t etherswitch_info = { .es_nports = CPSW_PORTS + 1, .es_nvlangroups = CPSW_VLANS, .es_name = "TI Common Platform Ethernet Switch (CPSW)", .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, }; static etherswitch_info_t * cpsw_getinfo(device_t dev) { return (ðerswitch_info); } static int cpsw_getport(device_t dev, etherswitch_port_t *p) { int err; struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmediareq *ifmr; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); err = 0; sc = device_get_softc(dev); if (p->es_port == CPSW_CPU_PORT) { p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; } else { psc = device_get_softc(sc->port[p->es_port - 1].dev); err = ifmedia_ioctl(psc->ifp, &p->es_ifr, &psc->mii->mii_media, SIOCGIFMEDIA); } reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); p->es_pvid = reg & ETHERSWITCH_VID_MASK; reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (reg & ALE_PORTCTL_DROP_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; if (reg & ALE_PORTCTL_INGRESS) p->es_flags |= ETHERSWITCH_PORT_INGRESS; return (err); } static int cpsw_setport(device_t dev, etherswitch_port_t *p) { struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmedia *ifm; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); sc = device_get_softc(dev); if (p->es_pvid != 0) { cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), p->es_pvid & ETHERSWITCH_VID_MASK); } reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= ALE_PORTCTL_DROP_UNTAGGED; else reg &= ~ALE_PORTCTL_DROP_UNTAGGED; if (p->es_flags & ETHERSWITCH_PORT_INGRESS) reg |= ALE_PORTCTL_INGRESS; else reg &= ~ALE_PORTCTL_INGRESS; cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); /* CPU port does not allow media settings. */ if (p->es_port == CPSW_CPU_PORT) return (0); psc = device_get_softc(sc->port[p->es_port - 1].dev); ifm = &psc->mii->mii_media; return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static int cpsw_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i, vid; uint32_t ale_entry[3]; struct cpsw_softc *sc; sc = device_get_softc(dev); if (vg->es_vlangroup >= CPSW_VLANS) return (EINVAL); vg->es_vid = 0; vid = cpsw_vgroups[vg->es_vlangroup].vid; if (vid == -1) return (0); for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vid != ALE_VLAN(ale_entry)) continue; vg->es_fid = 0; vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); } return (0); } static void cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vlan != ALE_VLAN(ale_entry)) continue; ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); break; } } static int cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i; struct cpsw_softc *sc; sc = device_get_softc(dev); for (i = 0; i < CPSW_VLANS; i++) { /* Is this Vlan ID in use by another vlangroup ? */ if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) return (EINVAL); } if (vg->es_vid == 0) { if (cpsw_vgroups[vg->es_vlangroup].vid == -1) return (0); cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); cpsw_vgroups[vg->es_vlangroup].vid = -1; vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_vid = 0; return (0); } vg->es_vid &= ETHERSWITCH_VID_MASK; vg->es_member_ports &= CPSW_PORTS_MASK; vg->es_untagged_ports &= CPSW_PORTS_MASK; if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) return (EINVAL); cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, vg->es_untagged_ports, vg->es_member_ports, 0); return (0); } static int cpsw_readreg(device_t dev, int addr) { /* Not supported. */ return (0); } static int cpsw_writereg(device_t dev, int addr, int value) { /* Not supported. */ return (0); } static int cpsw_readphy(device_t dev, int phy, int reg) { /* Not supported. */ return (0); } static int cpsw_writephy(device_t dev, int phy, int reg, int data) { /* Not supported. */ return (0); } #endif diff --git a/sys/arm/ti/ti_i2c.c b/sys/arm/ti/ti_i2c.c index a6c397a87e8b..e2f114505015 100644 --- a/sys/arm/ti/ti_i2c.c +++ b/sys/arm/ti/ti_i2c.c @@ -1,972 +1,968 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Ben Gray . * Copyright (c) 2014 Luiz Otavio O Souza . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Driver for the I2C module on the TI SoC. * * This driver is heavily based on the TWI driver for the AT91 (at91_twi.c). * * CAUTION: The I2Ci registers are limited to 16 bit and 8 bit data accesses, * 32 bit data access is not allowed and can corrupt register content. * * This driver currently doesn't use DMA for the transfer, although I hope to * incorporate that sometime in the future. The idea being that for transaction * larger than a certain size the DMA engine is used, for anything less the * normal interrupt/fifo driven option is used. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" /** * I2C device driver context, a pointer to this is stored in the device * driver structure. */ struct ti_i2c_softc { device_t sc_dev; struct resource* sc_irq_res; struct resource* sc_mem_res; device_t sc_iicbus; void* sc_irq_h; struct mtx sc_mtx; struct iic_msg* sc_buffer; int sc_bus_inuse; int sc_buffer_pos; int sc_error; int sc_fifo_trsh; int sc_timeout; uint16_t sc_con_reg; uint16_t sc_rev; }; struct ti_i2c_clock_config { u_int frequency; /* Bus frequency in Hz */ uint8_t psc; /* Fast/Standard mode prescale divider */ uint8_t scll; /* Fast/Standard mode SCL low time */ uint8_t sclh; /* Fast/Standard mode SCL high time */ uint8_t hsscll; /* High Speed mode SCL low time */ uint8_t hssclh; /* High Speed mode SCL high time */ }; #if defined(SOC_OMAP4) /* * OMAP4 i2c bus clock is 96MHz / ((psc + 1) * (scll + 7 + sclh + 5)). * The prescaler values for 100KHz and 400KHz modes come from the table in the * OMAP4 TRM. The table doesn't list 1MHz; these values should give that speed. */ static struct ti_i2c_clock_config ti_omap4_i2c_clock_configs[] = { { 100000, 23, 13, 15, 0, 0}, { 400000, 9, 5, 7, 0, 0}, { 1000000, 3, 5, 7, 0, 0}, /* { 3200000, 1, 113, 115, 7, 10}, - HS mode */ { 0 /* Table terminator */ } }; #endif #if defined(SOC_TI_AM335X) /* * AM335x i2c bus clock is 48MHZ / ((psc + 1) * (scll + 7 + sclh + 5)) * In all cases we prescale the clock to 24MHz as recommended in the manual. */ static struct ti_i2c_clock_config ti_am335x_i2c_clock_configs[] = { { 100000, 1, 111, 117, 0, 0}, { 400000, 1, 23, 25, 0, 0}, { 1000000, 1, 5, 7, 0, 0}, { 0 /* Table terminator */ } }; #endif /** * Locking macros used throughout the driver */ #define TI_I2C_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define TI_I2C_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define TI_I2C_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ "ti_i2c", MTX_DEF) #define TI_I2C_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx) #define TI_I2C_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED) #define TI_I2C_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED) #ifdef DEBUG #define ti_i2c_dbg(_sc, fmt, args...) \ device_printf((_sc)->sc_dev, fmt, ##args) #else #define ti_i2c_dbg(_sc, fmt, args...) #endif /** * ti_i2c_read_2 - reads a 16-bit value from one of the I2C registers * @sc: I2C device context * @off: the byte offset within the register bank to read from. * * * LOCKING: * No locking required * * RETURNS: * 16-bit value read from the register. */ static inline uint16_t ti_i2c_read_2(struct ti_i2c_softc *sc, bus_size_t off) { return (bus_read_2(sc->sc_mem_res, off)); } /** * ti_i2c_write_2 - writes a 16-bit value to one of the I2C registers * @sc: I2C device context * @off: the byte offset within the register bank to read from. * @val: the value to write into the register * * LOCKING: * No locking required * * RETURNS: * 16-bit value read from the register. */ static inline void ti_i2c_write_2(struct ti_i2c_softc *sc, bus_size_t off, uint16_t val) { bus_write_2(sc->sc_mem_res, off, val); } static int ti_i2c_transfer_intr(struct ti_i2c_softc* sc, uint16_t status) { int amount, done, i; done = 0; amount = 0; /* Check for the error conditions. */ if (status & I2C_STAT_NACK) { /* No ACK from slave. */ ti_i2c_dbg(sc, "NACK\n"); ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_NACK); sc->sc_error = ENXIO; } else if (status & I2C_STAT_AL) { /* Arbitration lost. */ ti_i2c_dbg(sc, "Arbitration lost\n"); ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_AL); sc->sc_error = ENXIO; } /* Check if we have finished. */ if (status & I2C_STAT_ARDY) { /* Register access ready - transaction complete basically. */ ti_i2c_dbg(sc, "ARDY transaction complete\n"); if (sc->sc_error != 0 && sc->sc_buffer->flags & IIC_M_NOSTOP) { ti_i2c_write_2(sc, I2C_REG_CON, sc->sc_con_reg | I2C_CON_STP); } ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_ARDY | I2C_STAT_RDR | I2C_STAT_RRDY | I2C_STAT_XDR | I2C_STAT_XRDY); return (1); } if (sc->sc_buffer->flags & IIC_M_RD) { /* Read some data. */ if (status & I2C_STAT_RDR) { /* * Receive draining interrupt - last data received. * The set FIFO threshold won't be reached to trigger * RRDY. */ ti_i2c_dbg(sc, "Receive draining interrupt\n"); /* * Drain the FIFO. Read the pending data in the FIFO. */ amount = sc->sc_buffer->len - sc->sc_buffer_pos; } else if (status & I2C_STAT_RRDY) { /* * Receive data ready interrupt - FIFO has reached the * set threshold. */ ti_i2c_dbg(sc, "Receive data ready interrupt\n"); amount = min(sc->sc_fifo_trsh, sc->sc_buffer->len - sc->sc_buffer_pos); } /* Read the bytes from the fifo. */ for (i = 0; i < amount; i++) sc->sc_buffer->buf[sc->sc_buffer_pos++] = (uint8_t)(ti_i2c_read_2(sc, I2C_REG_DATA) & 0xff); if (status & I2C_STAT_RDR) ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_RDR); if (status & I2C_STAT_RRDY) ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_RRDY); } else { /* Write some data. */ if (status & I2C_STAT_XDR) { /* * Transmit draining interrupt - FIFO level is below * the set threshold and the amount of data still to * be transferred won't reach the set FIFO threshold. */ ti_i2c_dbg(sc, "Transmit draining interrupt\n"); /* * Drain the TX data. Write the pending data in the * FIFO. */ amount = sc->sc_buffer->len - sc->sc_buffer_pos; } else if (status & I2C_STAT_XRDY) { /* * Transmit data ready interrupt - the FIFO level * is below the set threshold. */ ti_i2c_dbg(sc, "Transmit data ready interrupt\n"); amount = min(sc->sc_fifo_trsh, sc->sc_buffer->len - sc->sc_buffer_pos); } /* Write the bytes from the fifo. */ for (i = 0; i < amount; i++) ti_i2c_write_2(sc, I2C_REG_DATA, sc->sc_buffer->buf[sc->sc_buffer_pos++]); if (status & I2C_STAT_XDR) ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_XDR); if (status & I2C_STAT_XRDY) ti_i2c_write_2(sc, I2C_REG_STATUS, I2C_STAT_XRDY); } return (done); } /** * ti_i2c_intr - interrupt handler for the I2C module * @dev: i2c device handle * * * * LOCKING: * Called from timer context * * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ static void ti_i2c_intr(void *arg) { int done; struct ti_i2c_softc *sc; uint16_t events, status; sc = (struct ti_i2c_softc *)arg; TI_I2C_LOCK(sc); status = ti_i2c_read_2(sc, I2C_REG_STATUS); if (status == 0) { TI_I2C_UNLOCK(sc); return; } /* Save enabled interrupts. */ events = ti_i2c_read_2(sc, I2C_REG_IRQENABLE_SET); /* We only care about enabled interrupts. */ status &= events; done = 0; if (sc->sc_buffer != NULL) done = ti_i2c_transfer_intr(sc, status); else { ti_i2c_dbg(sc, "Transfer interrupt without buffer\n"); sc->sc_error = EINVAL; done = 1; } if (done) /* Wakeup the process that started the transaction. */ wakeup(sc); TI_I2C_UNLOCK(sc); } /** * ti_i2c_transfer - called to perform the transfer * @dev: i2c device handle * @msgs: the messages to send/receive * @nmsgs: the number of messages in the msgs array * * * LOCKING: * Internally locked * * RETURNS: * 0 on function succeeded * EINVAL if invalid message is passed as an arg */ static int ti_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { int err, i, repstart, timeout; struct ti_i2c_softc *sc; uint16_t reg; sc = device_get_softc(dev); TI_I2C_LOCK(sc); /* If the controller is busy wait until it is available. */ while (sc->sc_bus_inuse == 1) mtx_sleep(sc, &sc->sc_mtx, 0, "i2cbuswait", 0); /* Now we have control over the I2C controller. */ sc->sc_bus_inuse = 1; err = 0; repstart = 0; for (i = 0; i < nmsgs; i++) { sc->sc_buffer = &msgs[i]; sc->sc_buffer_pos = 0; sc->sc_error = 0; /* Zero byte transfers aren't allowed. */ if (sc->sc_buffer == NULL || sc->sc_buffer->buf == NULL || sc->sc_buffer->len == 0) { err = EINVAL; break; } /* Check if the i2c bus is free. */ if (repstart == 0) { /* * On repeated start we send the START condition while * the bus _is_ busy. */ timeout = 0; while (ti_i2c_read_2(sc, I2C_REG_STATUS_RAW) & I2C_STAT_BB) { if (timeout++ > 100) { err = EBUSY; goto out; } DELAY(1000); } timeout = 0; } else repstart = 0; if (sc->sc_buffer->flags & IIC_M_NOSTOP) repstart = 1; /* Set the slave address. */ ti_i2c_write_2(sc, I2C_REG_SA, msgs[i].slave >> 1); /* Write the data length. */ ti_i2c_write_2(sc, I2C_REG_CNT, sc->sc_buffer->len); /* Clear the RX and the TX FIFO. */ reg = ti_i2c_read_2(sc, I2C_REG_BUF); reg |= I2C_BUF_RXFIFO_CLR | I2C_BUF_TXFIFO_CLR; ti_i2c_write_2(sc, I2C_REG_BUF, reg); reg = sc->sc_con_reg | I2C_CON_STT; if (repstart == 0) reg |= I2C_CON_STP; if ((sc->sc_buffer->flags & IIC_M_RD) == 0) reg |= I2C_CON_TRX; ti_i2c_write_2(sc, I2C_REG_CON, reg); /* Wait for an event. */ err = mtx_sleep(sc, &sc->sc_mtx, 0, "i2ciowait", sc->sc_timeout); if (err == 0) err = sc->sc_error; if (err) break; } out: if (timeout == 0) { while (ti_i2c_read_2(sc, I2C_REG_STATUS_RAW) & I2C_STAT_BB) { if (timeout++ > 100) break; DELAY(1000); } } /* Put the controller in master mode again. */ if ((ti_i2c_read_2(sc, I2C_REG_CON) & I2C_CON_MST) == 0) ti_i2c_write_2(sc, I2C_REG_CON, sc->sc_con_reg); sc->sc_buffer = NULL; sc->sc_bus_inuse = 0; /* Wake up the processes that are waiting for the bus. */ wakeup(sc); TI_I2C_UNLOCK(sc); return (err); } static int ti_i2c_reset(struct ti_i2c_softc *sc, u_char speed) { int timeout; struct ti_i2c_clock_config *clkcfg; u_int busfreq; uint16_t fifo_trsh, reg, scll, sclh; switch (ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: clkcfg = ti_omap4_i2c_clock_configs; break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: clkcfg = ti_am335x_i2c_clock_configs; break; #endif default: panic("Unknown TI SoC, unable to reset the i2c"); } /* * If we haven't attached the bus yet, just init at the default slow * speed. This lets us get the hardware initialized enough to attach * the bus which is where the real speed configuration is handled. After * the bus is attached, get the configured speed from it. Search the * configuration table for the best speed we can do that doesn't exceed * the requested speed. */ if (sc->sc_iicbus == NULL) busfreq = 100000; else busfreq = IICBUS_GET_FREQUENCY(sc->sc_iicbus, speed); for (;;) { if (clkcfg[1].frequency == 0 || clkcfg[1].frequency > busfreq) break; clkcfg++; } /* * 23.1.4.3 - HS I2C Software Reset * From OMAP4 TRM at page 4068. * * 1. Ensure that the module is disabled. */ sc->sc_con_reg = 0; ti_i2c_write_2(sc, I2C_REG_CON, sc->sc_con_reg); /* 2. Issue a softreset to the controller. */ bus_write_2(sc->sc_mem_res, I2C_REG_SYSC, I2C_REG_SYSC_SRST); /* * 3. Enable the module. * The I2Ci.I2C_SYSS[0] RDONE bit is asserted only after the module * is enabled by setting the I2Ci.I2C_CON[15] I2C_EN bit to 1. */ ti_i2c_write_2(sc, I2C_REG_CON, I2C_CON_I2C_EN); /* 4. Wait for the software reset to complete. */ timeout = 0; while ((ti_i2c_read_2(sc, I2C_REG_SYSS) & I2C_SYSS_RDONE) == 0) { if (timeout++ > 100) return (EBUSY); DELAY(100); } /* * Disable the I2C controller once again, now that the reset has * finished. */ ti_i2c_write_2(sc, I2C_REG_CON, sc->sc_con_reg); /* * The following sequence is taken from the OMAP4 TRM at page 4077. * * 1. Enable the functional and interface clocks (see Section * 23.1.5.1.1.1.1). Done at ti_i2c_activate(). * * 2. Program the prescaler to obtain an approximately 12MHz internal * sampling clock (I2Ci_INTERNAL_CLK) by programming the * corresponding value in the I2Ci.I2C_PSC[3:0] PSC field. * This value depends on the frequency of the functional clock * (I2Ci_FCLK). Because this frequency is 96MHz, the * I2Ci.I2C_PSC[7:0] PSC field value is 0x7. */ ti_i2c_write_2(sc, I2C_REG_PSC, clkcfg->psc); /* * 3. Program the I2Ci.I2C_SCLL[7:0] SCLL and I2Ci.I2C_SCLH[7:0] SCLH * bit fields to obtain a bit rate of 100 Kbps, 400 Kbps or 1Mbps. * These values depend on the internal sampling clock frequency * (see Table 23-8). */ scll = clkcfg->scll & I2C_SCLL_MASK; sclh = clkcfg->sclh & I2C_SCLH_MASK; /* * 4. (Optional) Program the I2Ci.I2C_SCLL[15:8] HSSCLL and * I2Ci.I2C_SCLH[15:8] HSSCLH fields to obtain a bit rate of * 400K bps or 3.4M bps (for the second phase of HS mode). These * values depend on the internal sampling clock frequency (see * Table 23-8). * * 5. (Optional) If a bit rate of 3.4M bps is used and the bus line * capacitance exceeds 45 pF, (see Section 18.4.8, PAD Functional * Multiplexing and Configuration). */ switch (ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: if ((clkcfg->hsscll + clkcfg->hssclh) > 0) { scll |= clkcfg->hsscll << I2C_HSSCLL_SHIFT; sclh |= clkcfg->hssclh << I2C_HSSCLH_SHIFT; sc->sc_con_reg |= I2C_CON_OPMODE_HS; } break; #endif } /* Write the selected bit rate. */ ti_i2c_write_2(sc, I2C_REG_SCLL, scll); ti_i2c_write_2(sc, I2C_REG_SCLH, sclh); /* * 6. Configure the Own Address of the I2C controller by storing it in * the I2Ci.I2C_OA0 register. Up to four Own Addresses can be * programmed in the I2Ci.I2C_OAi registers (where i = 0, 1, 2, 3) * for each I2C controller. * * Note: For a 10-bit address, set the corresponding expand Own Address * bit in the I2Ci.I2C_CON register. * * Driver currently always in single master mode so ignore this step. */ /* * 7. Set the TX threshold (in transmitter mode) and the RX threshold * (in receiver mode) by setting the I2Ci.I2C_BUF[5:0]XTRSH field to * (TX threshold - 1) and the I2Ci.I2C_BUF[13:8]RTRSH field to (RX * threshold - 1), where the TX and RX thresholds are greater than * or equal to 1. * * The threshold is set to 5 for now. */ fifo_trsh = (sc->sc_fifo_trsh - 1) & I2C_BUF_TRSH_MASK; reg = fifo_trsh | (fifo_trsh << I2C_BUF_RXTRSH_SHIFT); ti_i2c_write_2(sc, I2C_REG_BUF, reg); /* * 8. Take the I2C controller out of reset by setting the * I2Ci.I2C_CON[15] I2C_EN bit to 1. * * 23.1.5.1.1.1.2 - Initialize the I2C Controller * * To initialize the I2C controller, perform the following steps: * * 1. Configure the I2Ci.I2C_CON register: * . For master or slave mode, set the I2Ci.I2C_CON[10] MST bit * (0: slave, 1: master). * . For transmitter or receiver mode, set the I2Ci.I2C_CON[9] TRX * bit (0: receiver, 1: transmitter). */ /* Enable the I2C controller in master mode. */ sc->sc_con_reg |= I2C_CON_I2C_EN | I2C_CON_MST; ti_i2c_write_2(sc, I2C_REG_CON, sc->sc_con_reg); /* * 2. If using an interrupt to transmit/receive data, set the * corresponding bit in the I2Ci.I2C_IE register (the I2Ci.I2C_IE[4] * XRDY_IE bit for the transmit interrupt, the I2Ci.I2C_IE[3] RRDY * bit for the receive interrupt). */ /* Set the interrupts we want to be notified. */ reg = I2C_IE_XDR | /* Transmit draining interrupt. */ I2C_IE_XRDY | /* Transmit Data Ready interrupt. */ I2C_IE_RDR | /* Receive draining interrupt. */ I2C_IE_RRDY | /* Receive Data Ready interrupt. */ I2C_IE_ARDY | /* Register Access Ready interrupt. */ I2C_IE_NACK | /* No Acknowledgment interrupt. */ I2C_IE_AL; /* Arbitration lost interrupt. */ /* Enable the interrupts. */ ti_i2c_write_2(sc, I2C_REG_IRQENABLE_SET, reg); /* * 3. If using DMA to receive/transmit data, set to 1 the corresponding * bit in the I2Ci.I2C_BUF register (the I2Ci.I2C_BUF[15] RDMA_EN * bit for the receive DMA channel, the I2Ci.I2C_BUF[7] XDMA_EN bit * for the transmit DMA channel). * * Not using DMA for now, so ignore this. */ return (0); } static int ti_i2c_iicbus_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct ti_i2c_softc *sc; int err; sc = device_get_softc(dev); TI_I2C_LOCK(sc); err = ti_i2c_reset(sc, speed); TI_I2C_UNLOCK(sc); if (err) return (err); return (IIC_ENOADDR); } static int ti_i2c_activate(device_t dev) { int err; struct ti_i2c_softc *sc; sc = (struct ti_i2c_softc*)device_get_softc(dev); /* * 1. Enable the functional and interface clocks (see Section * 23.1.5.1.1.1.1). */ err = ti_sysc_clock_enable(device_get_parent(dev)); if (err) return (err); return (ti_i2c_reset(sc, IIC_UNKNOWN)); } /** * ti_i2c_deactivate - deactivates the controller and releases resources * @dev: i2c device handle * * * * LOCKING: * Assumed called in an atomic context. * * RETURNS: * nothing */ static void ti_i2c_deactivate(device_t dev) { struct ti_i2c_softc *sc = device_get_softc(dev); /* Disable the controller - cancel all transactions. */ ti_i2c_write_2(sc, I2C_REG_IRQENABLE_CLR, 0xffff); ti_i2c_write_2(sc, I2C_REG_STATUS, 0xffff); ti_i2c_write_2(sc, I2C_REG_CON, 0); /* Release the interrupt handler. */ if (sc->sc_irq_h != NULL) { bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h); sc->sc_irq_h = NULL; } /* Unmap the I2C controller registers. */ if (sc->sc_mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); sc->sc_mem_res = NULL; } /* Release the IRQ resource. */ if (sc->sc_irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); sc->sc_irq_res = NULL; } /* Finally disable the functional and interface clocks. */ ti_sysc_clock_disable(device_get_parent(dev)); } static int ti_i2c_sysctl_clk(SYSCTL_HANDLER_ARGS) { int clk, psc, sclh, scll; struct ti_i2c_softc *sc; sc = arg1; TI_I2C_LOCK(sc); /* Get the system prescaler value. */ psc = (int)ti_i2c_read_2(sc, I2C_REG_PSC) + 1; /* Get the bitrate. */ scll = (int)ti_i2c_read_2(sc, I2C_REG_SCLL) & I2C_SCLL_MASK; sclh = (int)ti_i2c_read_2(sc, I2C_REG_SCLH) & I2C_SCLH_MASK; clk = I2C_CLK / psc / (scll + 7 + sclh + 5); TI_I2C_UNLOCK(sc); return (sysctl_handle_int(oidp, &clk, 0, req)); } static int ti_i2c_sysctl_timeout(SYSCTL_HANDLER_ARGS) { struct ti_i2c_softc *sc; unsigned int val; int err; sc = arg1; /* * MTX_DEF lock can't be held while doing uimove in * sysctl_handle_int */ TI_I2C_LOCK(sc); val = sc->sc_timeout; TI_I2C_UNLOCK(sc); err = sysctl_handle_int(oidp, &val, 0, req); /* Write request? */ if ((err == 0) && (req->newptr != NULL)) { TI_I2C_LOCK(sc); sc->sc_timeout = val; TI_I2C_UNLOCK(sc); } return (err); } static int ti_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,omap4-i2c")) return (ENXIO); device_set_desc(dev, "TI I2C Controller"); return (0); } static int ti_i2c_attach(device_t dev) { int err, rid; struct ti_i2c_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *tree; uint16_t fifosz; sc = device_get_softc(dev); sc->sc_dev = dev; /* Get the memory resource for the register mapping. */ rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); return (ENXIO); } /* Allocate our IRQ resource. */ rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_irq_res == NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "Cannot allocate interrupt.\n"); return (ENXIO); } TI_I2C_LOCK_INIT(sc); /* First of all, we _must_ activate the H/W. */ err = ti_i2c_activate(dev); if (err) { device_printf(dev, "ti_i2c_activate failed\n"); goto out; } /* Read the version number of the I2C module */ sc->sc_rev = ti_i2c_read_2(sc, I2C_REG_REVNB_HI) & 0xff; /* Get the fifo size. */ fifosz = ti_i2c_read_2(sc, I2C_REG_BUFSTAT); fifosz >>= I2C_BUFSTAT_FIFODEPTH_SHIFT; fifosz &= I2C_BUFSTAT_FIFODEPTH_MASK; device_printf(dev, "I2C revision %d.%d FIFO size: %d bytes\n", sc->sc_rev >> 4, sc->sc_rev & 0xf, 8 << fifosz); /* Set the FIFO threshold to 5 for now. */ sc->sc_fifo_trsh = 5; /* Set I2C bus timeout */ sc->sc_timeout = 5*hz; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "i2c_clock", CTLFLAG_RD | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, 0, ti_i2c_sysctl_clk, "IU", "I2C bus clock"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "i2c_timeout", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, 0, ti_i2c_sysctl_timeout, "IU", "I2C bus timeout (in ticks)"); /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ti_i2c_intr, sc, &sc->sc_irq_h); if (err) goto out; /* Attach the iicbus. */ if ((sc->sc_iicbus = device_add_child(dev, "iicbus", -1)) == NULL) { device_printf(dev, "could not allocate iicbus instance\n"); err = ENXIO; goto out; } /* Probe and attach the iicbus when interrupts are available. */ bus_delayed_attach_children(dev); out: if (err) { ti_i2c_deactivate(dev); TI_I2C_LOCK_DESTROY(sc); } return (err); } static int ti_i2c_detach(device_t dev) { struct ti_i2c_softc *sc; int rv; sc = device_get_softc(dev); if ((rv = bus_generic_detach(dev)) != 0) { device_printf(dev, "cannot detach child devices\n"); return (rv); } - if (sc->sc_iicbus && - (rv = device_delete_child(dev, sc->sc_iicbus)) != 0) - return (rv); - ti_i2c_deactivate(dev); TI_I2C_LOCK_DESTROY(sc); return (0); } static phandle_t ti_i2c_get_node(device_t bus, device_t dev) { /* Share controller node with iibus device. */ return (ofw_bus_get_node(bus)); } static device_method_t ti_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_i2c_probe), DEVMETHOD(device_attach, ti_i2c_attach), DEVMETHOD(device_detach, ti_i2c_detach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, ti_i2c_get_node), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, ti_i2c_iicbus_reset), DEVMETHOD(iicbus_transfer, ti_i2c_transfer), DEVMETHOD_END }; static driver_t ti_i2c_driver = { "iichb", ti_i2c_methods, sizeof(struct ti_i2c_softc), }; DRIVER_MODULE(ti_iic, simplebus, ti_i2c_driver, 0, 0); DRIVER_MODULE(iicbus, ti_iic, iicbus_driver, 0, 0); MODULE_DEPEND(ti_iic, ti_sysc, 1, 1, 1); MODULE_DEPEND(ti_iic, iicbus, 1, 1, 1); diff --git a/sys/arm/xilinx/zy7_qspi.c b/sys/arm/xilinx/zy7_qspi.c index c033fd29a7db..53559571f7db 100644 --- a/sys/arm/xilinx/zy7_qspi.c +++ b/sys/arm/xilinx/zy7_qspi.c @@ -1,751 +1,747 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * This is a driver for the Quad-SPI Flash Controller in the Xilinx * Zynq-7000 SoC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static struct ofw_compat_data compat_data[] = { {"xlnx,zy7_qspi", 1}, {"xlnx,zynq-qspi-1.0", 1}, {NULL, 0} }; struct zy7_qspi_softc { device_t dev; device_t child; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint32_t cfg_reg_shadow; uint32_t lqspi_cfg_shadow; uint32_t spi_clock; uint32_t ref_clock; unsigned int spi_clk_real_freq; unsigned int rx_overflows; unsigned int tx_underflows; unsigned int interrupts; unsigned int stray_ints; struct spi_command *cmd; int tx_bytes; /* tx_cmd_sz + tx_data_sz */ int tx_bytes_sent; int rx_bytes; /* rx_cmd_sz + rx_data_sz */ int rx_bytes_rcvd; int busy; int is_dual; int is_stacked; int is_dio; }; #define ZY7_QSPI_DEFAULT_SPI_CLOCK 50000000 #define QSPI_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define QSPI_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define QSPI_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define QSPI_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define QSPI_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) /* * QSPI device registers. * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.2) July 1, 2018. Xilinx doc UG585. */ #define ZY7_QSPI_CONFIG_REG 0x0000 #define ZY7_QSPI_CONFIG_IFMODE (1U << 31) #define ZY7_QSPI_CONFIG_ENDIAN (1 << 26) #define ZY7_QSPI_CONFIG_HOLDB_DR (1 << 19) #define ZY7_QSPI_CONFIG_RSVD1 (1 << 17) /* must be 1 */ #define ZY7_QSPI_CONFIG_MANSTRT (1 << 16) #define ZY7_QSPI_CONFIG_MANSTRTEN (1 << 15) #define ZY7_QSPI_CONFIG_SSFORCE (1 << 14) #define ZY7_QSPI_CONFIG_PCS (1 << 10) #define ZY7_QSPI_CONFIG_REF_CLK (1 << 8) #define ZY7_QSPI_CONFIG_FIFO_WIDTH_MASK (3 << 6) #define ZY7_QSPI_CONFIG_FIFO_WIDTH32 (3 << 6) #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV_MASK (7 << 3) #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV_SHIFT 3 #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV(x) ((x) << 3) /* divide by 2< 0) { nvalid = MIN(4, nbytes); data = 0xffffffff; /* * A hardware bug forces us to wait until the tx fifo is * empty before writing partial words. We'll come back * next tx interrupt. */ if (nvalid < 4 && (RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_TX_FIFO_NOT_FULL) == 0) return; if (sc->tx_bytes_sent < sc->cmd->tx_cmd_sz) { /* Writing command. */ n = MIN(nvalid, sc->cmd->tx_cmd_sz - sc->tx_bytes_sent); memcpy(&data, (uint8_t *)sc->cmd->tx_cmd + sc->tx_bytes_sent, n); if (nvalid > n) { /* Writing start of data. */ memcpy((uint8_t *)&data + n, sc->cmd->tx_data, nvalid - n); } } else /* Writing data. */ memcpy(&data, (uint8_t *)sc->cmd->tx_data + (sc->tx_bytes_sent - sc->cmd->tx_cmd_sz), nvalid); switch (nvalid) { case 1: WR4(sc, ZY7_QSPI_TXD1_REG, data); break; case 2: WR4(sc, ZY7_QSPI_TXD2_REG, data); break; case 3: WR4(sc, ZY7_QSPI_TXD3_REG, data); break; case 4: WR4(sc, ZY7_QSPI_TXD0_REG, data); break; } sc->tx_bytes_sent += nvalid; nbytes -= nvalid; } } /* Read hardware fifo data into command response and data buffers. */ static void zy7_qspi_read_fifo(struct zy7_qspi_softc *sc) { int n, nbytes; uint32_t data; do { data = RD4(sc, ZY7_QSPI_RX_DATA_REG); nbytes = MIN(4, sc->rx_bytes - sc->rx_bytes_rcvd); /* * Last word in non-word-multiple transfer is packed * non-intuitively. */ if (nbytes < 4) data >>= 8 * (4 - nbytes); if (sc->rx_bytes_rcvd < sc->cmd->rx_cmd_sz) { /* Reading command. */ n = MIN(nbytes, sc->cmd->rx_cmd_sz - sc->rx_bytes_rcvd); memcpy((uint8_t *)sc->cmd->rx_cmd + sc->rx_bytes_rcvd, &data, n); sc->rx_bytes_rcvd += n; nbytes -= n; data >>= 8 * n; } if (nbytes > 0) { /* Reading data. */ memcpy((uint8_t *)sc->cmd->rx_data + (sc->rx_bytes_rcvd - sc->cmd->rx_cmd_sz), &data, nbytes); sc->rx_bytes_rcvd += nbytes; } } while (sc->rx_bytes_rcvd < sc->rx_bytes && (RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0); } /* End a transfer early by draining rx fifo and disabling interrupts. */ static void zy7_qspi_abort_transfer(struct zy7_qspi_softc *sc) { /* Drain receive fifo. */ while ((RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0) (void)RD4(sc, ZY7_QSPI_RX_DATA_REG); /* Shut down interrupts. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_RX_OVERFLOW | ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_QSPI_INTR_TX_FIFO_NOT_FULL); } static void zy7_qspi_intr(void *arg) { struct zy7_qspi_softc *sc = (struct zy7_qspi_softc *)arg; uint32_t istatus; QSPI_SC_LOCK(sc); sc->interrupts++; istatus = RD4(sc, ZY7_QSPI_INTR_STAT_REG); /* Stray interrupts can happen if a transfer gets interrupted. */ if (!sc->busy) { sc->stray_ints++; QSPI_SC_UNLOCK(sc); return; } if ((istatus & ZY7_QSPI_INTR_RX_OVERFLOW) != 0) { device_printf(sc->dev, "rx fifo overflow!\n"); sc->rx_overflows++; /* Clear status bit. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ZY7_QSPI_INTR_RX_OVERFLOW); } /* Empty receive fifo before any more transmit data is sent. */ if (sc->rx_bytes_rcvd < sc->rx_bytes && (istatus & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0) { zy7_qspi_read_fifo(sc); if (sc->rx_bytes_rcvd == sc->rx_bytes) /* Disable receive interrupts. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_QSPI_INTR_RX_OVERFLOW); } /* * Transmit underflows aren't really a bug because a hardware * bug forces us to allow the tx fifo to go empty between full * and partial fifo writes. Why bother counting? */ if ((istatus & ZY7_QSPI_INTR_TX_FIFO_UNDERFLOW) != 0) { sc->tx_underflows++; /* Clear status bit. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ZY7_QSPI_INTR_TX_FIFO_UNDERFLOW); } /* Fill transmit fifo. */ if (sc->tx_bytes_sent < sc->tx_bytes && (istatus & ZY7_QSPI_INTR_TX_FIFO_NOT_FULL) != 0) { zy7_qspi_write_fifo(sc, MIN(240, sc->tx_bytes - sc->tx_bytes_sent)); if (sc->tx_bytes_sent == sc->tx_bytes) { /* * Disable transmit FIFO interrupt, enable receive * FIFO interrupt. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_TX_FIFO_NOT_FULL); WR4(sc, ZY7_QSPI_INTR_EN_REG, ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY); } } /* Finished with transfer? */ if (sc->tx_bytes_sent == sc->tx_bytes && sc->rx_bytes_rcvd == sc->rx_bytes) { /* De-assert CS. */ sc->cfg_reg_shadow |= ZY7_QSPI_CONFIG_PCS; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); wakeup(sc->dev); } QSPI_SC_UNLOCK(sc); } /* Initialize hardware. */ static int zy7_qspi_init_hw(struct zy7_qspi_softc *sc) { uint32_t baud_div; /* Configure LQSPI Config register. Disable linear mode. */ sc->lqspi_cfg_shadow = RD4(sc, ZY7_QSPI_LQSPI_CFG_REG); sc->lqspi_cfg_shadow &= ~(ZY7_QSPI_LQSPI_CFG_LINEAR | ZY7_QSPI_LQSPI_CFG_TWO_MEM | ZY7_QSPI_LQSPI_CFG_SEP_BUS); if (sc->is_dual) { sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_TWO_MEM; if (sc->is_stacked) { sc->lqspi_cfg_shadow &= ~ZY7_QSPI_LQSPI_CFG_INST_CODE_MASK; sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_INST_CODE(sc->is_dio ? CMD_READ_DUAL_IO : CMD_READ_QUAD_OUTPUT); } else sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_SEP_BUS; } WR4(sc, ZY7_QSPI_LQSPI_CFG_REG, sc->lqspi_cfg_shadow); /* Find best clock divider. */ baud_div = 0; while ((sc->ref_clock >> (baud_div + 1)) > sc->spi_clock && baud_div < 8) baud_div++; if (baud_div >= 8) { device_printf(sc->dev, "cannot configure clock divider: ref=%d" " spi=%d.\n", sc->ref_clock, sc->spi_clock); return (EINVAL); } sc->spi_clk_real_freq = sc->ref_clock >> (baud_div + 1); /* * If divider is 2 (the max speed), use internal loopback master * clock for read data. (See section 12.3.1 in ref man.) */ if (baud_div == 0) WR4(sc, ZY7_QSPI_LPBK_DLY_ADJ_REG, ZY7_QSPI_LPBK_DLY_ADJ_USE_LPBK | ZY7_QSPI_LPBK_DLY_ADJ_DLY1(0) | ZY7_QSPI_LPBK_DLY_ADJ_DLY0(0)); else WR4(sc, ZY7_QSPI_LPBK_DLY_ADJ_REG, 0); /* Set up configuration register. */ sc->cfg_reg_shadow = ZY7_QSPI_CONFIG_IFMODE | ZY7_QSPI_CONFIG_HOLDB_DR | ZY7_QSPI_CONFIG_RSVD1 | ZY7_QSPI_CONFIG_SSFORCE | ZY7_QSPI_CONFIG_PCS | ZY7_QSPI_CONFIG_FIFO_WIDTH32 | ZY7_QSPI_CONFIG_BAUD_RATE_DIV(baud_div) | ZY7_QSPI_CONFIG_MODE_SEL; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); /* * Set thresholds. We must use 1 for tx threshold because there * is no fifo empty flag and we need one to implement a bug * workaround. */ WR4(sc, ZY7_QSPI_TX_THRESH_REG, 1); WR4(sc, ZY7_QSPI_RX_THRESH_REG, 1); /* Clear and disable all interrupts. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_QSPI_INTR_DIS_REG, ~0); /* Enable SPI. */ WR4(sc, ZY7_QSPI_EN_REG, ZY7_SPI_ENABLE); return (0); } static void zy7_qspi_add_sysctls(device_t dev) { struct zy7_qspi_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "spi_clk_real_freq", CTLFLAG_RD, &sc->spi_clk_real_freq, 0, "SPI clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overflows", CTLFLAG_RD, &sc->rx_overflows, 0, "RX FIFO overflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_underflows", CTLFLAG_RD, &sc->tx_underflows, 0, "TX FIFO underflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stray_ints", CTLFLAG_RD, &sc->stray_ints, 0, "stray interrupts"); } static int zy7_qspi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Zynq Quad-SPI Flash Controller"); return (BUS_PROBE_DEFAULT); } static int zy7_qspi_attach(device_t dev) { struct zy7_qspi_softc *sc; int rid, err; phandle_t node; pcell_t cell; sc = device_get_softc(dev); sc->dev = dev; QSPI_SC_LOCK_INIT(sc); /* Get ref-clock, spi-clock, and other properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock = fdt32_to_cpu(cell); else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "spi-clock", &cell, sizeof(cell)) > 0) sc->spi_clock = fdt32_to_cpu(cell); else sc->spi_clock = ZY7_QSPI_DEFAULT_SPI_CLOCK; if (OF_getprop(node, "is-stacked", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) { sc->is_dual = 1; sc->is_stacked = 1; } else if (OF_getprop(node, "is-dual", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) sc->is_dual = 1; if (OF_getprop(node, "is-dio", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) sc->is_dio = 1; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); zy7_qspi_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); zy7_qspi_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, zy7_qspi_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); zy7_qspi_detach(dev); return (err); } /* Configure the device. */ err = zy7_qspi_init_hw(sc); if (err) { zy7_qspi_detach(dev); return (err); } sc->child = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); zy7_qspi_add_sysctls(dev); /* Attach spibus driver as a child later when interrupts work. */ bus_delayed_attach_children(dev); return (0); } static int zy7_qspi_detach(device_t dev) { struct zy7_qspi_softc *sc = device_get_softc(dev); if (device_is_attached(dev)) bus_generic_detach(dev); - /* Delete child bus. */ - if (sc->child) - device_delete_child(dev, sc->child); - /* Disable hardware. */ if (sc->mem_res != NULL) { /* Disable SPI. */ WR4(sc, ZY7_QSPI_EN_REG, 0); /* Clear and disable all interrupts. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_QSPI_INTR_DIS_REG, ~0); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); QSPI_SC_LOCK_DESTROY(sc); return (0); } static phandle_t zy7_qspi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static int zy7_qspi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct zy7_qspi_softc *sc = device_get_softc(dev); int err = 0; KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); if (sc->is_dual && cmd->tx_data_sz % 2 != 0) { device_printf(dev, "driver does not support odd byte data " "transfers in dual mode. (sz=%d)\n", cmd->tx_data_sz); return (EINVAL); } QSPI_SC_LOCK(sc); /* Wait for controller available. */ while (sc->busy != 0) { err = mtx_sleep(dev, &sc->sc_mtx, 0, "zqspi0", 0); if (err) { QSPI_SC_UNLOCK(sc); return (err); } } /* Start transfer. */ sc->busy = 1; sc->cmd = cmd; sc->tx_bytes = sc->cmd->tx_cmd_sz + sc->cmd->tx_data_sz; sc->tx_bytes_sent = 0; sc->rx_bytes = sc->cmd->rx_cmd_sz + sc->cmd->rx_data_sz; sc->rx_bytes_rcvd = 0; /* Enable interrupts. zy7_qspi_intr() will handle transfer. */ WR4(sc, ZY7_QSPI_INTR_EN_REG, ZY7_QSPI_INTR_TX_FIFO_NOT_FULL | ZY7_QSPI_INTR_RX_OVERFLOW); #ifdef SPI_XFER_U_PAGE /* XXX: future support for stacked memories. */ if (sc->is_stacked) { if ((cmd->flags & SPI_XFER_U_PAGE) != 0) sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_U_PAGE; else sc->lqspi_cfg_shadow &= ~ZY7_QSPI_LQSPI_CFG_U_PAGE; WR4(sc, ZY7_QSPI_LQSPI_CFG_REG, sc->lqspi_cfg_shadow); } #endif /* Assert CS. */ sc->cfg_reg_shadow &= ~ZY7_QSPI_CONFIG_PCS; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); /* Wait for completion. */ err = mtx_sleep(dev, &sc->sc_mtx, 0, "zqspi1", hz * 2); if (err) zy7_qspi_abort_transfer(sc); /* Release controller. */ sc->busy = 0; wakeup_one(dev); QSPI_SC_UNLOCK(sc); return (err); } static device_method_t zy7_qspi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zy7_qspi_probe), DEVMETHOD(device_attach, zy7_qspi_attach), DEVMETHOD(device_detach, zy7_qspi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, zy7_qspi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, zy7_qspi_get_node), DEVMETHOD_END }; static driver_t zy7_qspi_driver = { "zy7_qspi", zy7_qspi_methods, sizeof(struct zy7_qspi_softc), }; DRIVER_MODULE(zy7_qspi, simplebus, zy7_qspi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, zy7_qspi, ofw_spibus_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); MODULE_DEPEND(zy7_qspi, ofw_spibus, 1, 1, 1); diff --git a/sys/arm/xilinx/zy7_spi.c b/sys/arm/xilinx/zy7_spi.c index a24ce542a860..d032deabf8b7 100644 --- a/sys/arm/xilinx/zy7_spi.c +++ b/sys/arm/xilinx/zy7_spi.c @@ -1,590 +1,586 @@ /*- * Copyright (c) 2018 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static struct ofw_compat_data compat_data[] = { {"xlnx,zy7_spi", 1}, {"xlnx,zynq-spi-1.0", 1}, {"cdns,spi-r1p6", 1}, {NULL, 0} }; struct zy7_spi_softc { device_t dev; device_t child; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint32_t cfg_reg_shadow; uint32_t spi_clock; uint32_t ref_clock; unsigned int spi_clk_real_freq; unsigned int rx_overflows; unsigned int tx_underflows; unsigned int interrupts; unsigned int stray_ints; struct spi_command *cmd; int tx_bytes; /* tx_cmd_sz + tx_data_sz */ int tx_bytes_sent; int rx_bytes; /* rx_cmd_sz + rx_data_sz */ int rx_bytes_rcvd; int busy; }; #define ZY7_SPI_DEFAULT_SPI_CLOCK 50000000 #define SPI_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define SPI_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define SPI_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define SPI_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define SPI_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) /* * SPI device registers. * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.1) December 6, 2017. Xilinx doc UG585. */ #define ZY7_SPI_CONFIG_REG 0x0000 #define ZY7_SPI_CONFIG_MODEFAIL_GEN_EN (1 << 17) #define ZY7_SPI_CONFIG_MAN_STRT (1 << 16) #define ZY7_SPI_CONFIG_MAN_STRT_EN (1 << 15) #define ZY7_SPI_CONFIG_MAN_CS (1 << 14) #define ZY7_SPI_CONFIG_CS_MASK (0xf << 10) #define ZY7_SPI_CONFIG_CS(x) ((0xf ^ (1 << (x))) << 10) #define ZY7_SPI_CONFIG_PERI_SEL (1 << 9) #define ZY7_SPI_CONFIG_REF_CLK (1 << 8) #define ZY7_SPI_CONFIG_BAUD_RATE_DIV_MASK (7 << 3) #define ZY7_SPI_CONFIG_BAUD_RATE_DIV_SHIFT 3 #define ZY7_SPI_CONFIG_BAUD_RATE_DIV(x) ((x) << 3) /* divide by 2< 0) { if (sc->tx_bytes_sent < sc->cmd->tx_cmd_sz) /* Writing command. */ byte = *((uint8_t *)sc->cmd->tx_cmd + sc->tx_bytes_sent); else /* Writing data. */ byte = *((uint8_t *)sc->cmd->tx_data + (sc->tx_bytes_sent - sc->cmd->tx_cmd_sz)); WR4(sc, ZY7_SPI_TX_DATA_REG, (uint32_t)byte); sc->tx_bytes_sent++; nbytes--; } } /* Read hardware fifo data into command response and data buffers. */ static void zy7_spi_read_fifo(struct zy7_spi_softc *sc) { uint8_t byte; do { byte = RD4(sc, ZY7_SPI_RX_DATA_REG) & 0xff; if (sc->rx_bytes_rcvd < sc->cmd->rx_cmd_sz) /* Reading command. */ *((uint8_t *)sc->cmd->rx_cmd + sc->rx_bytes_rcvd) = byte; else /* Reading data. */ *((uint8_t *)sc->cmd->rx_data + (sc->rx_bytes_rcvd - sc->cmd->rx_cmd_sz)) = byte; sc->rx_bytes_rcvd++; } while (sc->rx_bytes_rcvd < sc->rx_bytes && (RD4(sc, ZY7_SPI_INTR_STAT_REG) & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0); } /* End a transfer early by draining rx fifo and disabling interrupts. */ static void zy7_spi_abort_transfer(struct zy7_spi_softc *sc) { /* Drain receive fifo. */ while ((RD4(sc, ZY7_SPI_INTR_STAT_REG) & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0) (void)RD4(sc, ZY7_SPI_RX_DATA_REG); /* Shut down interrupts. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_RX_OVERFLOW | ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_SPI_INTR_TX_FIFO_NOT_FULL); } static void zy7_spi_intr(void *arg) { struct zy7_spi_softc *sc = (struct zy7_spi_softc *)arg; uint32_t istatus; SPI_SC_LOCK(sc); sc->interrupts++; istatus = RD4(sc, ZY7_SPI_INTR_STAT_REG); /* Stray interrupts can happen if a transfer gets interrupted. */ if (!sc->busy) { sc->stray_ints++; SPI_SC_UNLOCK(sc); return; } if ((istatus & ZY7_SPI_INTR_RX_OVERFLOW) != 0) { device_printf(sc->dev, "rx fifo overflow!\n"); sc->rx_overflows++; /* Clear status bit. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ZY7_SPI_INTR_RX_OVERFLOW); } /* Empty receive fifo before any more transmit data is sent. */ if (sc->rx_bytes_rcvd < sc->rx_bytes && (istatus & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0) { zy7_spi_read_fifo(sc); if (sc->rx_bytes_rcvd == sc->rx_bytes) /* Disable receive interrupts. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_SPI_INTR_RX_OVERFLOW); } /* Count tx underflows. They probably shouldn't happen. */ if ((istatus & ZY7_SPI_INTR_TX_FIFO_UNDERFLOW) != 0) { sc->tx_underflows++; /* Clear status bit. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ZY7_SPI_INTR_TX_FIFO_UNDERFLOW); } /* Fill transmit fifo. */ if (sc->tx_bytes_sent < sc->tx_bytes && (istatus & ZY7_SPI_INTR_TX_FIFO_NOT_FULL) != 0) { zy7_spi_write_fifo(sc, MIN(96, sc->tx_bytes - sc->tx_bytes_sent)); if (sc->tx_bytes_sent == sc->tx_bytes) { /* Disable transmit FIFO interrupt, enable receive * FIFO interrupt. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_TX_FIFO_NOT_FULL); WR4(sc, ZY7_SPI_INTR_EN_REG, ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY); } } /* Finished with transfer? */ if (sc->tx_bytes_sent == sc->tx_bytes && sc->rx_bytes_rcvd == sc->rx_bytes) { /* De-assert CS. */ sc->cfg_reg_shadow &= ~(ZY7_SPI_CONFIG_CLK_PH | ZY7_SPI_CONFIG_CLK_POL); sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CS_MASK; WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); wakeup(sc->dev); } SPI_SC_UNLOCK(sc); } /* Initialize hardware. */ static int zy7_spi_init_hw(struct zy7_spi_softc *sc) { uint32_t baud_div; /* Find best clock divider. Divide by 2 not supported. */ baud_div = 1; while ((sc->ref_clock >> (baud_div + 1)) > sc->spi_clock && baud_div < 8) baud_div++; if (baud_div >= 8) { device_printf(sc->dev, "cannot configure clock divider: ref=%d" " spi=%d.\n", sc->ref_clock, sc->spi_clock); return (EINVAL); } sc->spi_clk_real_freq = sc->ref_clock >> (baud_div + 1); /* Set up configuration register. */ sc->cfg_reg_shadow = ZY7_SPI_CONFIG_MAN_CS | ZY7_SPI_CONFIG_CS_MASK | ZY7_SPI_CONFIG_BAUD_RATE_DIV(baud_div) | ZY7_SPI_CONFIG_MODE_SEL; WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); /* Set thresholds. */ WR4(sc, ZY7_SPI_TX_THRESH_REG, 32); WR4(sc, ZY7_SPI_RX_THRESH_REG, 1); /* Clear and disable all interrupts. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_SPI_INTR_DIS_REG, ~0); /* Enable SPI. */ WR4(sc, ZY7_SPI_EN_REG, ZY7_SPI_ENABLE); return (0); } static void zy7_spi_add_sysctls(device_t dev) { struct zy7_spi_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "spi_clk_real_freq", CTLFLAG_RD, &sc->spi_clk_real_freq, 0, "SPI clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overflows", CTLFLAG_RD, &sc->rx_overflows, 0, "RX FIFO overflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_underflows", CTLFLAG_RD, &sc->tx_underflows, 0, "TX FIFO underflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stray_ints", CTLFLAG_RD, &sc->stray_ints, 0, "stray interrupts"); } static int zy7_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Zynq SPI Controller"); return (BUS_PROBE_DEFAULT); } static int zy7_spi_detach(device_t); static int zy7_spi_attach(device_t dev) { struct zy7_spi_softc *sc; int rid, err; phandle_t node; pcell_t cell; sc = device_get_softc(dev); sc->dev = dev; SPI_SC_LOCK_INIT(sc); /* Get ref-clock and spi-clock properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock = fdt32_to_cpu(cell); else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "spi-clock", &cell, sizeof(cell)) > 0) sc->spi_clock = fdt32_to_cpu(cell); else sc->spi_clock = ZY7_SPI_DEFAULT_SPI_CLOCK; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); zy7_spi_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); zy7_spi_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, zy7_spi_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); zy7_spi_detach(dev); return (err); } /* Configure the device. */ err = zy7_spi_init_hw(sc); if (err) { zy7_spi_detach(dev); return (err); } sc->child = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); zy7_spi_add_sysctls(dev); /* Attach spibus driver as a child later when interrupts work. */ bus_delayed_attach_children(dev); return (0); } static int zy7_spi_detach(device_t dev) { struct zy7_spi_softc *sc = device_get_softc(dev); if (device_is_attached(dev)) bus_generic_detach(dev); - /* Delete child bus. */ - if (sc->child) - device_delete_child(dev, sc->child); - /* Disable hardware. */ if (sc->mem_res != NULL) { /* Disable SPI. */ WR4(sc, ZY7_SPI_EN_REG, 0); /* Clear and disable all interrupts. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_SPI_INTR_DIS_REG, ~0); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); SPI_SC_LOCK_DESTROY(sc); return (0); } static phandle_t zy7_spi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static int zy7_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct zy7_spi_softc *sc = device_get_softc(dev); uint32_t cs; uint32_t mode; int err = 0; KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); /* Get chip select and mode for this child. */ spibus_get_cs(child, &cs); cs &= ~SPIBUS_CS_HIGH; if (cs > 2) { device_printf(dev, "Invalid chip select %d requested by %s", cs, device_get_nameunit(child)); return (EINVAL); } spibus_get_mode(child, &mode); SPI_SC_LOCK(sc); /* Wait for controller available. */ while (sc->busy != 0) { err = mtx_sleep(dev, &sc->sc_mtx, 0, "zspi0", 0); if (err) { SPI_SC_UNLOCK(sc); return (err); } } /* Start transfer. */ sc->busy = 1; sc->cmd = cmd; sc->tx_bytes = sc->cmd->tx_cmd_sz + sc->cmd->tx_data_sz; sc->tx_bytes_sent = 0; sc->rx_bytes = sc->cmd->rx_cmd_sz + sc->cmd->rx_data_sz; sc->rx_bytes_rcvd = 0; /* Enable interrupts. zy7_spi_intr() will handle transfer. */ WR4(sc, ZY7_SPI_INTR_EN_REG, ZY7_SPI_INTR_TX_FIFO_NOT_FULL | ZY7_SPI_INTR_RX_OVERFLOW); /* Handle polarity and phase. */ if (mode == SPIBUS_MODE_CPHA || mode == SPIBUS_MODE_CPOL_CPHA) sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CLK_PH; if (mode == SPIBUS_MODE_CPOL || mode == SPIBUS_MODE_CPOL_CPHA) sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CLK_POL; /* Assert CS. */ sc->cfg_reg_shadow &= ~ZY7_SPI_CONFIG_CS_MASK; sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CS(cs); WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); /* Wait for completion. */ err = mtx_sleep(dev, &sc->sc_mtx, 0, "zspi1", hz * 2); if (err) zy7_spi_abort_transfer(sc); /* Release controller. */ sc->busy = 0; wakeup_one(dev); SPI_SC_UNLOCK(sc); return (err); } static device_method_t zy7_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zy7_spi_probe), DEVMETHOD(device_attach, zy7_spi_attach), DEVMETHOD(device_detach, zy7_spi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, zy7_spi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, zy7_spi_get_node), DEVMETHOD_END }; static driver_t zy7_spi_driver = { "zy7_spi", zy7_spi_methods, sizeof(struct zy7_spi_softc), }; DRIVER_MODULE(zy7_spi, simplebus, zy7_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, zy7_spi, ofw_spibus_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); MODULE_DEPEND(zy7_spi, ofw_spibus, 1, 1, 1); diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c index 226fca16ac28..73af49015e52 100644 --- a/sys/dev/bce/if_bce.c +++ b/sys/dev/bce/if_bce.c @@ -1,11393 +1,11392 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006-2014 QLogic Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include /* * The following controllers are supported by this driver: * BCM5706C A2, A3 * BCM5706S A2, A3 * BCM5708C B1, B2 * BCM5708S B1, B2 * BCM5709C A1, C0 * BCM5709S A1, C0 * BCM5716C C0 * BCM5716S C0 * * The following controllers are not supported by this driver: * BCM5706C A0, A1 (pre-production) * BCM5706S A0, A1 (pre-production) * BCM5708C A0, B0 (pre-production) * BCM5708S A0, B0 (pre-production) * BCM5709C A0 B0, B1, B2 (pre-production) * BCM5709S A0, B0, B1, B2 (pre-production) */ #include "opt_bce.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include "miibus_if.h" #include #include /****************************************************************************/ /* BCE Debug Options */ /****************************************************************************/ #ifdef BCE_DEBUG u32 bce_debug = BCE_WARN; /* 0 = Never */ /* 1 = 1 in 2,147,483,648 */ /* 256 = 1 in 8,388,608 */ /* 2048 = 1 in 1,048,576 */ /* 65536 = 1 in 32,768 */ /* 1048576 = 1 in 2,048 */ /* 268435456 = 1 in 8 */ /* 536870912 = 1 in 4 */ /* 1073741824 = 1 in 2 */ /* Controls how often the l2_fhdr frame error check will fail. */ int l2fhdr_error_sim_control = 0; /* Controls how often the unexpected attention check will fail. */ int unexpected_attention_sim_control = 0; /* Controls how often to simulate an mbuf allocation failure. */ int mbuf_alloc_failed_sim_control = 0; /* Controls how often to simulate a DMA mapping failure. */ int dma_map_addr_failed_sim_control = 0; /* Controls how often to simulate a bootcode failure. */ int bootcode_running_failure_sim_control = 0; #endif /****************************************************************************/ /* PCI Device ID Table */ /* */ /* Used by bce_probe() to identify the devices supported by this driver. */ /****************************************************************************/ #define BCE_DEVDESC_MAX 64 static const struct bce_type bce_devs[] = { /* BCM5706C Controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101, "HP NC370T Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106, "HP NC370i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070, "HP NC380T PCIe DP Multifunc Gig Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709, "HP NC371i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5706 1000Base-T" }, /* BCM5706S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102, "HP NC370F Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5706 1000Base-SX" }, /* BCM5708C controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037, "HP NC373T PCIe Multifunction Gig Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038, "HP NC373i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045, "HP NC374m PCIe Multifunction Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5708 1000Base-T" }, /* BCM5708S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706, "HP NC373m Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b, "HP NC373i Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d, "HP NC373F PCIe Multifunc Giga Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5708 1000Base-SX" }, /* BCM5709C controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055, "HP NC382i DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059, "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5709 1000Base-T" }, /* BCM5709S controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d, "HP NC382m DP 1GbE Multifunction BL-c Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056, "HP NC382i DP Multifunction Gigabit Server Adapter" }, { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5709 1000Base-SX" }, /* BCM5716 controllers and OEM boards. */ { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM5716 1000Base-T" }, { 0, 0, 0, 0, NULL } }; /****************************************************************************/ /* Supported Flash NVRAM device data. */ /****************************************************************************/ static const struct flash_spec flash_table[] = { #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE) #define NONBUFFERED_FLAGS (BCE_NV_WREN) /* Slow EEPROM */ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - slow"}, /* Expansion entry 0001 */ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0001"}, /* Saifun SA25F010 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, "Non-buffered flash (128kB)"}, /* Saifun SA25F020 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, "Non-buffered flash (256kB)"}, /* Expansion entry 0100 */ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0100"}, /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, "Entry 0101: ST M45PE10 (128kB non-buffered)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, "Entry 0110: ST M45PE20 (256kB non-buffered)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, "Non-buffered flash (64kB)"}, /* Fast EEPROM */ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - fast"}, /* Expansion entry 1001 */ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1001"}, /* Expansion entry 1010 */ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1010"}, /* ATMEL AT45DB011B (buffered flash) */ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, "Buffered flash (128kB)"}, /* Expansion entry 1100 */ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1100"}, /* Expansion entry 1101 */ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1101"}, /* Ateml Expansion entry 1110 */ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, 0, "Entry 1110 (Atmel)"}, /* ATMEL AT45DB021B (buffered flash) */ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, "Buffered flash (256kB)"}, }; /* * The BCM5709 controllers transparently handle the * differences between Atmel 264 byte pages and all * flash devices which use 256 byte pages, so no * logical-to-physical mapping is required in the * driver. */ static const struct flash_spec flash_5709 = { .flags = BCE_NV_BUFFERED, .page_bits = BCM5709_FLASH_PAGE_BITS, .page_size = BCM5709_FLASH_PAGE_SIZE, .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2, .name = "5709/5716 buffered flash (256kB)", }; /****************************************************************************/ /* FreeBSD device entry points. */ /****************************************************************************/ static int bce_probe (device_t); static int bce_attach (device_t); static int bce_detach (device_t); static int bce_shutdown (device_t); /****************************************************************************/ /* BCE Debug Data Structure Dump Routines */ /****************************************************************************/ #ifdef BCE_DEBUG static u32 bce_reg_rd (struct bce_softc *, u32); static void bce_reg_wr (struct bce_softc *, u32, u32); static void bce_reg_wr16 (struct bce_softc *, u32, u16); static u32 bce_ctx_rd (struct bce_softc *, u32, u32); static void bce_dump_enet (struct bce_softc *, struct mbuf *); static void bce_dump_mbuf (struct bce_softc *, struct mbuf *); static void bce_dump_tx_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_rx_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_pg_mbuf_chain (struct bce_softc *, u16, int); static void bce_dump_txbd (struct bce_softc *, int, struct tx_bd *); static void bce_dump_rxbd (struct bce_softc *, int, struct rx_bd *); static void bce_dump_pgbd (struct bce_softc *, int, struct rx_bd *); static void bce_dump_l2fhdr (struct bce_softc *, int, struct l2_fhdr *); static void bce_dump_ctx (struct bce_softc *, u16); static void bce_dump_ftqs (struct bce_softc *); static void bce_dump_tx_chain (struct bce_softc *, u16, int); static void bce_dump_rx_bd_chain (struct bce_softc *, u16, int); static void bce_dump_pg_chain (struct bce_softc *, u16, int); static void bce_dump_status_block (struct bce_softc *); static void bce_dump_stats_block (struct bce_softc *); static void bce_dump_driver_state (struct bce_softc *); static void bce_dump_hw_state (struct bce_softc *); static void bce_dump_shmem_state (struct bce_softc *); static void bce_dump_mq_regs (struct bce_softc *); static void bce_dump_bc_state (struct bce_softc *); static void bce_dump_txp_state (struct bce_softc *, int); static void bce_dump_rxp_state (struct bce_softc *, int); static void bce_dump_tpat_state (struct bce_softc *, int); static void bce_dump_cp_state (struct bce_softc *, int); static void bce_dump_com_state (struct bce_softc *, int); static void bce_dump_rv2p_state (struct bce_softc *); static void bce_breakpoint (struct bce_softc *); #endif /*BCE_DEBUG */ /****************************************************************************/ /* BCE Register/Memory Access Routines */ /****************************************************************************/ static u32 bce_reg_rd_ind (struct bce_softc *, u32); static void bce_reg_wr_ind (struct bce_softc *, u32, u32); static void bce_shmem_wr (struct bce_softc *, u32, u32); static u32 bce_shmem_rd (struct bce_softc *, u32); static void bce_ctx_wr (struct bce_softc *, u32, u32, u32); static int bce_miibus_read_reg (device_t, int, int); static int bce_miibus_write_reg (device_t, int, int, int); static void bce_miibus_statchg (device_t); #ifdef BCE_DEBUG static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS); #ifdef BCE_NVRAM_WRITE_SUPPORT static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS); #endif #endif /****************************************************************************/ /* BCE NVRAM Access Routines */ /****************************************************************************/ static int bce_acquire_nvram_lock (struct bce_softc *); static int bce_release_nvram_lock (struct bce_softc *); static void bce_enable_nvram_access(struct bce_softc *); static void bce_disable_nvram_access(struct bce_softc *); static int bce_nvram_read_dword (struct bce_softc *, u32, u8 *, u32); static int bce_init_nvram (struct bce_softc *); static int bce_nvram_read (struct bce_softc *, u32, u8 *, int); static int bce_nvram_test (struct bce_softc *); #ifdef BCE_NVRAM_WRITE_SUPPORT static int bce_enable_nvram_write (struct bce_softc *); static void bce_disable_nvram_write(struct bce_softc *); static int bce_nvram_erase_page (struct bce_softc *, u32); static int bce_nvram_write_dword (struct bce_softc *, u32, u8 *, u32); static int bce_nvram_write (struct bce_softc *, u32, u8 *, int); #endif /****************************************************************************/ /* */ /****************************************************************************/ static void bce_get_rx_buffer_sizes(struct bce_softc *, int); static void bce_get_media (struct bce_softc *); static void bce_init_media (struct bce_softc *); static u32 bce_get_rphy_link (struct bce_softc *); static void bce_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int bce_dma_alloc (device_t); static void bce_dma_free (struct bce_softc *); static void bce_release_resources (struct bce_softc *); /****************************************************************************/ /* BCE Firmware Synchronization and Load */ /****************************************************************************/ static void bce_fw_cap_init (struct bce_softc *); static int bce_fw_sync (struct bce_softc *, u32); static void bce_load_rv2p_fw (struct bce_softc *, const u32 *, u32, u32); static void bce_load_cpu_fw (struct bce_softc *, struct cpu_reg *, struct fw_info *); static void bce_start_cpu (struct bce_softc *, struct cpu_reg *); static void bce_halt_cpu (struct bce_softc *, struct cpu_reg *); static void bce_start_rxp_cpu (struct bce_softc *); static void bce_init_rxp_cpu (struct bce_softc *); static void bce_init_txp_cpu (struct bce_softc *); static void bce_init_tpat_cpu (struct bce_softc *); static void bce_init_cp_cpu (struct bce_softc *); static void bce_init_com_cpu (struct bce_softc *); static void bce_init_cpus (struct bce_softc *); static void bce_print_adapter_info (struct bce_softc *); static void bce_probe_pci_caps (device_t, struct bce_softc *); static void bce_stop (struct bce_softc *); static int bce_reset (struct bce_softc *, u32); static int bce_chipinit (struct bce_softc *); static int bce_blockinit (struct bce_softc *); static int bce_init_tx_chain (struct bce_softc *); static void bce_free_tx_chain (struct bce_softc *); static int bce_get_rx_buf (struct bce_softc *, u16, u16, u32 *); static int bce_init_rx_chain (struct bce_softc *); static void bce_fill_rx_chain (struct bce_softc *); static void bce_free_rx_chain (struct bce_softc *); static int bce_get_pg_buf (struct bce_softc *, u16, u16); static int bce_init_pg_chain (struct bce_softc *); static void bce_fill_pg_chain (struct bce_softc *); static void bce_free_pg_chain (struct bce_softc *); static struct mbuf *bce_tso_setup (struct bce_softc *, struct mbuf **, u16 *); static int bce_tx_encap (struct bce_softc *, struct mbuf **); static void bce_start_locked (if_t); static void bce_start (if_t); static int bce_ioctl (if_t, u_long, caddr_t); static uint64_t bce_get_counter (if_t, ift_counter); static void bce_watchdog (struct bce_softc *); static int bce_ifmedia_upd (if_t); static int bce_ifmedia_upd_locked (if_t); static void bce_ifmedia_sts (if_t, struct ifmediareq *); static void bce_ifmedia_sts_rphy (struct bce_softc *, struct ifmediareq *); static void bce_init_locked (struct bce_softc *); static void bce_init (void *); static void bce_mgmt_init_locked (struct bce_softc *sc); static int bce_init_ctx (struct bce_softc *); static void bce_get_mac_addr (struct bce_softc *); static void bce_set_mac_addr (struct bce_softc *); static void bce_phy_intr (struct bce_softc *); static inline u16 bce_get_hw_rx_cons (struct bce_softc *); static void bce_rx_intr (struct bce_softc *); static void bce_tx_intr (struct bce_softc *); static void bce_disable_intr (struct bce_softc *); static void bce_enable_intr (struct bce_softc *, int); static void bce_intr (void *); static void bce_set_rx_mode (struct bce_softc *); static void bce_stats_update (struct bce_softc *); static void bce_tick (void *); static void bce_pulse (void *); static void bce_add_sysctls (struct bce_softc *); /****************************************************************************/ /* FreeBSD device dispatch table. */ /****************************************************************************/ static device_method_t bce_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bce_probe), DEVMETHOD(device_attach, bce_attach), DEVMETHOD(device_detach, bce_detach), DEVMETHOD(device_shutdown, bce_shutdown), /* Supported by device interface but not used here. */ /* DEVMETHOD(device_identify, bce_identify), */ /* DEVMETHOD(device_suspend, bce_suspend), */ /* DEVMETHOD(device_resume, bce_resume), */ /* DEVMETHOD(device_quiesce, bce_quiesce), */ /* MII interface (miibus_if.h) */ DEVMETHOD(miibus_readreg, bce_miibus_read_reg), DEVMETHOD(miibus_writereg, bce_miibus_write_reg), DEVMETHOD(miibus_statchg, bce_miibus_statchg), /* Supported by MII interface but not used here. */ /* DEVMETHOD(miibus_linkchg, bce_miibus_linkchg), */ /* DEVMETHOD(miibus_mediainit, bce_miibus_mediainit), */ DEVMETHOD_END }; static driver_t bce_driver = { "bce", bce_methods, sizeof(struct bce_softc) }; MODULE_DEPEND(bce, pci, 1, 1, 1); MODULE_DEPEND(bce, ether, 1, 1, 1); MODULE_DEPEND(bce, miibus, 1, 1, 1); DRIVER_MODULE(bce, pci, bce_driver, NULL, NULL); DRIVER_MODULE(miibus, bce, miibus_driver, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;U16:#;U16:#;D:#", pci, bce, bce_devs, nitems(bce_devs) - 1); /****************************************************************************/ /* Tunable device values */ /****************************************************************************/ static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "bce driver parameters"); /* Allowable values are TRUE or FALSE */ static int bce_verbose = TRUE; SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0, "Verbose output enable/disable"); /* Allowable values are TRUE or FALSE */ static int bce_tso_enable = TRUE; SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0, "TSO Enable/Disable"); /* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ /* ToDo: Add MSI-X support. */ static int bce_msi_enable = 1; SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0, "MSI-X|MSI|INTx selector"); /* Allowable values are 1, 2, 4, 8. */ static int bce_rx_pages = DEFAULT_RX_PAGES; SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0, "Receive buffer descriptor pages (1 page = 255 buffer descriptors)"); /* Allowable values are 1, 2, 4, 8. */ static int bce_tx_pages = DEFAULT_TX_PAGES; SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0, "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)"); /* Allowable values are TRUE or FALSE. */ static int bce_hdr_split = TRUE; SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0, "Frame header/payload splitting Enable/Disable"); /* Allowable values are TRUE or FALSE. */ static int bce_strict_rx_mtu = FALSE; SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN, &bce_strict_rx_mtu, 0, "Enable/Disable strict RX frame size checking"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every transmit completion. */ static int bce_tx_quick_cons_trip_int = 1; #else /* Generate 1 interrupt for every 20 transmit completions. */ static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN, &bce_tx_quick_cons_trip_int, 0, "Transmit BD trip point during interrupts"); /* Allowable values are 0 ... 100 */ /* Generate 1 interrupt for every transmit completion. */ #ifdef BCE_DEBUG static int bce_tx_quick_cons_trip = 1; #else /* Generate 1 interrupt for every 20 transmit completions. */ static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN, &bce_tx_quick_cons_trip, 0, "Transmit BD trip point"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an interrupt if 0us have elapsed since the last TX completion. */ static int bce_tx_ticks_int = 0; #else /* Generate an interrupt if 80us have elapsed since the last TX completion. */ static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN, &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an interrupt if 0us have elapsed since the last TX completion. */ static int bce_tx_ticks = 0; #else /* Generate an interrupt if 80us have elapsed since the last TX completion. */ static int bce_tx_ticks = DEFAULT_TX_TICKS; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN, &bce_tx_ticks, 0, "Transmit ticks count"); /* Allowable values are 1 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every received frame. */ static int bce_rx_quick_cons_trip_int = 1; #else /* Generate 1 interrupt for every 6 received frames. */ static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN, &bce_rx_quick_cons_trip_int, 0, "Receive BD trip point during interrupts"); /* Allowable values are 1 ... 100 */ #ifdef BCE_DEBUG /* Generate 1 interrupt for every received frame. */ static int bce_rx_quick_cons_trip = 1; #else /* Generate 1 interrupt for every 6 received frames. */ static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN, &bce_rx_quick_cons_trip, 0, "Receive BD trip point"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an int. if 0us have elapsed since the last received frame. */ static int bce_rx_ticks_int = 0; #else /* Generate an int. if 18us have elapsed since the last received frame. */ static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN, &bce_rx_ticks_int, 0, "Receive ticks count during interrupt"); /* Allowable values are 0 ... 100 */ #ifdef BCE_DEBUG /* Generate an int. if 0us have elapsed since the last received frame. */ static int bce_rx_ticks = 0; #else /* Generate an int. if 18us have elapsed since the last received frame. */ static int bce_rx_ticks = DEFAULT_RX_TICKS; #endif SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN, &bce_rx_ticks, 0, "Receive ticks count"); /****************************************************************************/ /* Device probe function. */ /* */ /* Compares the device to the driver's list of supported devices and */ /* reports back to the OS whether this is the right driver for the device. */ /* */ /* Returns: */ /* BUS_PROBE_DEFAULT on success, positive value on failure. */ /****************************************************************************/ static int bce_probe(device_t dev) { const struct bce_type *t; struct bce_softc *sc; u16 vid = 0, did = 0, svid = 0, sdid = 0; t = bce_devs; sc = device_get_softc(dev); sc->bce_unit = device_get_unit(dev); sc->bce_dev = dev; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); DBPRINT(sc, BCE_EXTREME_LOAD, "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); /* Look through the list of known devices for a match. */ while(t->bce_name != NULL) { if ((vid == t->bce_vid) && (did == t->bce_did) && ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) && ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) { device_set_descf(dev, "%s (%c%d)", t->bce_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /****************************************************************************/ /* PCI Capabilities Probe Function. */ /* */ /* Walks the PCI capabiites list for the device to find what features are */ /* supported. */ /* */ /* Returns: */ /* None. */ /****************************************************************************/ static void bce_print_adapter_info(struct bce_softc *sc) { int i = 0; DBENTER(BCE_VERBOSE_LOAD); if (bce_verbose || bootverbose) { BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid); printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4)); /* Bus info. */ if (sc->bce_flags & BCE_PCIE_FLAG) { printf("Bus (PCIe x%d, ", sc->link_width); switch (sc->link_speed) { case 1: printf("2.5Gbps); "); break; case 2: printf("5Gbps); "); break; default: printf("Unknown link speed); "); } } else { printf("Bus (PCI%s, %s, %dMHz); ", ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""), ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), sc->bus_speed_mhz); } /* Firmware version and device features. */ printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (", sc->bce_bc_ver, sc->rx_pages, sc->tx_pages, (bce_hdr_split == TRUE ? sc->pg_pages: 0)); if (bce_hdr_split == TRUE) { printf("SPLT"); i++; } if (sc->bce_flags & BCE_USING_MSI_FLAG) { if (i > 0) printf("|"); printf("MSI"); i++; } if (sc->bce_flags & BCE_USING_MSIX_FLAG) { if (i > 0) printf("|"); printf("MSI-X"); i++; } if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { if (i > 0) printf("|"); printf("2.5G"); i++; } if (sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) { if (i > 0) printf("|"); printf("Remote PHY(%s)", sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG ? "FIBER" : "TP"); i++; } if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { if (i > 0) printf("|"); printf("MFW); MFW (%s)\n", sc->bce_mfw_ver); } else { printf(")\n"); } printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n", sc->bce_rx_quick_cons_trip_int, sc->bce_rx_quick_cons_trip, sc->bce_rx_ticks_int, sc->bce_rx_ticks, sc->bce_tx_quick_cons_trip_int, sc->bce_tx_quick_cons_trip, sc->bce_tx_ticks_int, sc->bce_tx_ticks); } DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* PCI Capabilities Probe Function. */ /* */ /* Walks the PCI capabiites list for the device to find what features are */ /* supported. */ /* */ /* Returns: */ /* None. */ /****************************************************************************/ static void bce_probe_pci_caps(device_t dev, struct bce_softc *sc) { u32 reg; DBENTER(BCE_VERBOSE_LOAD); /* Check if PCI-X capability is enabled. */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG; } /* Check if PCIe capability is enabled. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { if (reg != 0) { u16 link_status = pci_read_config(dev, reg + 0x12, 2); DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = " "0x%08X\n", link_status); sc->link_speed = link_status & 0xf; sc->link_width = (link_status >> 4) & 0x3f; sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG; sc->bce_flags |= BCE_PCIE_FLAG; } } /* Check if MSI capability is enabled. */ if (pci_find_cap(dev, PCIY_MSI, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG; } /* Check if MSI-X capability is enabled. */ if (pci_find_cap(dev, PCIY_MSIX, ®) == 0) { if (reg != 0) sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG; } DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* Load and validate user tunable settings. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_set_tunables(struct bce_softc *sc) { /* Set sysctl values for RX page count. */ switch (bce_rx_pages) { case 1: /* fall-through */ case 2: /* fall-through */ case 4: /* fall-through */ case 8: sc->rx_pages = bce_rx_pages; break; default: sc->rx_pages = DEFAULT_RX_PAGES; BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_pages! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES); } /* ToDo: Consider allowing user setting for pg_pages. */ sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES); /* Set sysctl values for TX page count. */ switch (bce_tx_pages) { case 1: /* fall-through */ case 2: /* fall-through */ case 4: /* fall-through */ case 8: sc->tx_pages = bce_tx_pages; break; default: sc->tx_pages = DEFAULT_TX_PAGES; BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_pages! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES); } /* * Validate the TX trip point (i.e. the number of * TX completions before a status block update is * generated and an interrupt is asserted. */ if (bce_tx_quick_cons_trip_int <= 100) { sc->bce_tx_quick_cons_trip_int = bce_tx_quick_cons_trip_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_quick_cons_trip_int! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_quick_cons_trip_int, DEFAULT_TX_QUICK_CONS_TRIP_INT); sc->bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT; } if (bce_tx_quick_cons_trip <= 100) { sc->bce_tx_quick_cons_trip = bce_tx_quick_cons_trip; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_quick_cons_trip! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_quick_cons_trip, DEFAULT_TX_QUICK_CONS_TRIP); sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; } /* * Validate the TX ticks count (i.e. the maximum amount * of time to wait after the last TX completion has * occurred before a status block update is generated * and an interrupt is asserted. */ if (bce_tx_ticks_int <= 100) { sc->bce_tx_ticks_int = bce_tx_ticks_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_ticks_int! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_ticks_int, DEFAULT_TX_TICKS_INT); sc->bce_tx_ticks_int = DEFAULT_TX_TICKS_INT; } if (bce_tx_ticks <= 100) { sc->bce_tx_ticks = bce_tx_ticks; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.tx_ticks! Setting default of %d.\n", __FILE__, __LINE__, bce_tx_ticks, DEFAULT_TX_TICKS); sc->bce_tx_ticks = DEFAULT_TX_TICKS; } /* * Validate the RX trip point (i.e. the number of * RX frames received before a status block update is * generated and an interrupt is asserted. */ if (bce_rx_quick_cons_trip_int <= 100) { sc->bce_rx_quick_cons_trip_int = bce_rx_quick_cons_trip_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_quick_cons_trip_int! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_quick_cons_trip_int, DEFAULT_RX_QUICK_CONS_TRIP_INT); sc->bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT; } if (bce_rx_quick_cons_trip <= 100) { sc->bce_rx_quick_cons_trip = bce_rx_quick_cons_trip; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_quick_cons_trip! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_quick_cons_trip, DEFAULT_RX_QUICK_CONS_TRIP); sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; } /* * Validate the RX ticks count (i.e. the maximum amount * of time to wait after the last RX frame has been * received before a status block update is generated * and an interrupt is asserted. */ if (bce_rx_ticks_int <= 100) { sc->bce_rx_ticks_int = bce_rx_ticks_int; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_ticks_int! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_ticks_int, DEFAULT_RX_TICKS_INT); sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT; } if (bce_rx_ticks <= 100) { sc->bce_rx_ticks = bce_rx_ticks; } else { BCE_PRINTF("%s(%d): Illegal value (%d) specified for " "hw.bce.rx_ticks! Setting default of %d.\n", __FILE__, __LINE__, bce_rx_ticks, DEFAULT_RX_TICKS); sc->bce_rx_ticks = DEFAULT_RX_TICKS; } /* Disabling both RX ticks and RX trips will prevent interrupts. */ if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) { BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and " "hw.bce.rx_quick_cons_trip to 0. Setting default values.\n", __FILE__, __LINE__); sc->bce_rx_ticks = DEFAULT_RX_TICKS; sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP; } /* Disabling both TX ticks and TX trips will prevent interrupts. */ if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) { BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and " "hw.bce.tx_quick_cons_trip to 0. Setting default values.\n", __FILE__, __LINE__); sc->bce_tx_ticks = DEFAULT_TX_TICKS; sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP; } } /****************************************************************************/ /* Device attach function. */ /* */ /* Allocates device resources, performs secondary chip identification, */ /* resets and initializes the hardware, and initializes driver instance */ /* variables. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_attach(device_t dev) { struct bce_softc *sc; if_t ifp; u32 val; int count, error, rc = 0, rid; sc = device_get_softc(dev); sc->bce_dev = dev; DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); sc->bce_unit = device_get_unit(dev); /* Set initial device and PHY flags */ sc->bce_flags = 0; sc->bce_phy_flags = 0; bce_set_tunables(sc); pci_enable_busmaster(dev); /* Allocate PCI memory resources. */ rid = PCIR_BAR(0); sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bce_res_mem == NULL) { BCE_PRINTF("%s(%d): PCI memory allocation failed\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Get various resource handles. */ sc->bce_btag = rman_get_bustag(sc->bce_res_mem); sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem); sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem); bce_probe_pci_caps(dev, sc); rid = 1; count = 0; #if 0 /* Try allocating MSI-X interrupts. */ if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) && (bce_msi_enable >= 2) && ((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) != NULL)) { msi_needed = count = 1; if (((error = pci_alloc_msix(dev, &count)) != 0) || (count != msi_needed)) { BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d," "Received = %d, error = %d\n", __FILE__, __LINE__, msi_needed, count, error); count = 0; pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->bce_res_irq); sc->bce_res_irq = NULL; } else { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n", __FUNCTION__); sc->bce_flags |= BCE_USING_MSIX_FLAG; } } #endif /* Try allocating a MSI interrupt. */ if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) && (bce_msi_enable >= 1) && (count == 0)) { count = 1; if ((error = pci_alloc_msi(dev, &count)) != 0) { BCE_PRINTF("%s(%d): MSI allocation failed! " "error = %d\n", __FILE__, __LINE__, error); count = 0; pci_release_msi(dev); } else { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI " "interrupt.\n", __FUNCTION__); sc->bce_flags |= BCE_USING_MSI_FLAG; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG; rid = 1; } } /* Try allocating a legacy interrupt. */ if (count == 0) { DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n", __FUNCTION__); rid = 0; } sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (count != 0 ? 0 : RF_SHAREABLE)); /* Report any IRQ allocation errors. */ if (sc->bce_res_irq == NULL) { BCE_PRINTF("%s(%d): PCI map interrupt failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Initialize mutex for the current device instance. */ BCE_LOCK_INIT(sc, device_get_nameunit(dev)); /* * Configure byte swap and enable indirect register access. * Rely on CPU to do target byte swapping on big endian systems. * Access to registers outside of PCI configurtion space are not * valid until this is done. */ pci_write_config(dev, BCE_PCICFG_MISC_CONFIG, BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4); /* Save ASIC revsion info. */ sc->bce_chipid = REG_RD(sc, BCE_MISC_ID); /* Weed out any non-production controller revisions. */ switch(BCE_CHIP_ID(sc)) { case BCE_CHIP_ID_5706_A0: case BCE_CHIP_ID_5706_A1: case BCE_CHIP_ID_5708_A0: case BCE_CHIP_ID_5708_B0: case BCE_CHIP_ID_5709_A0: case BCE_CHIP_ID_5709_B0: case BCE_CHIP_ID_5709_B1: case BCE_CHIP_ID_5709_B2: BCE_PRINTF("%s(%d): Unsupported controller " "revision (%c%d)!\n", __FILE__, __LINE__, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf)); rc = ENODEV; goto bce_attach_fail; } /* * The embedded PCIe to PCI-X bridge (EPB) * in the 5708 cannot address memory above * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR; else sc->max_bus_addr = BUS_SPACE_MAXADDR; /* * Find the base address for shared memory access. * Newer versions of bootcode use a signature and offset * while older versions use a fixed address. */ val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE); if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG) /* Multi-port devices use different offsets in shared memory. */ sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2)); else sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE; DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n", __FUNCTION__, sc->bce_shmem_base); /* Fetch the bootcode revision. */ val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV); for (int i = 0, j = 0; i < 3; i++) { u8 num; num = (u8) (val >> (24 - (i * 8))); for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { if (num >= k || !skip0 || k == 1) { sc->bce_bc_ver[j++] = (num / k) + '0'; skip0 = 0; } } if (i != 2) sc->bce_bc_ver[j++] = '.'; } /* Check if any management firwmare is enabled. */ val = bce_shmem_rd(sc, BCE_PORT_FEATURE); if (val & BCE_PORT_FEATURE_ASF_ENABLED) { sc->bce_flags |= BCE_MFW_ENABLE_FLAG; /* Allow time for firmware to enter the running state. */ for (int i = 0; i < 30; i++) { val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); if (val & BCE_CONDITION_MFW_RUN_MASK) break; DELAY(10000); } /* Check if management firmware is running. */ val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); val &= BCE_CONDITION_MFW_RUN_MASK; if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) && (val != BCE_CONDITION_MFW_RUN_NONE)) { u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR); int i = 0; /* Read the management firmware version string. */ for (int j = 0; j < 3; j++) { val = bce_reg_rd_ind(sc, addr + j * 4); val = bswap32(val); memcpy(&sc->bce_mfw_ver[i], &val, 4); i += 4; } } else { /* May cause firmware synchronization timeouts. */ BCE_PRINTF("%s(%d): Management firmware enabled " "but not running!\n", __FILE__, __LINE__); strcpy(sc->bce_mfw_ver, "NOT RUNNING!"); /* ToDo: Any action the driver should take? */ } } /* Get PCI bus information (speed and type). */ val = REG_RD(sc, BCE_PCICFG_MISC_STATUS); if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) { u32 clkreg; sc->bce_flags |= BCE_PCIX_FLAG; clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS); clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; switch (clkreg) { case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: sc->bus_speed_mhz = 133; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: sc->bus_speed_mhz = 100; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: sc->bus_speed_mhz = 66; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: sc->bus_speed_mhz = 50; break; case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: sc->bus_speed_mhz = 33; break; } } else { if (val & BCE_PCICFG_MISC_STATUS_M66EN) sc->bus_speed_mhz = 66; else sc->bus_speed_mhz = 33; } if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET) sc->bce_flags |= BCE_PCI_32BIT_FLAG; /* Find the media type for the adapter. */ bce_get_media(sc); /* Reset controller and announce to bootcode that driver is present. */ if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { BCE_PRINTF("%s(%d): Controller reset failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Initialize the controller. */ if (bce_chipinit(sc)) { BCE_PRINTF("%s(%d): Controller initialization failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Perform NVRAM test. */ if (bce_nvram_test(sc)) { BCE_PRINTF("%s(%d): NVRAM test failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Fetch the permanent Ethernet MAC address. */ bce_get_mac_addr(sc); /* Update statistics once every second. */ sc->bce_stats_ticks = 1000000 & 0xffff00; /* Store data needed by PHY driver for backplane applications */ sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); sc->bce_port_hw_cfg = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG); /* Allocate DMA memory resources. */ if (bce_dma_alloc(dev)) { BCE_PRINTF("%s(%d): DMA resource allocation failed!\n", __FILE__, __LINE__); rc = ENXIO; goto bce_attach_fail; } /* Allocate an ifnet structure. */ ifp = sc->bce_ifp = if_alloc(IFT_ETHER); /* Initialize the ifnet interface. */ if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, bce_ioctl); if_setstartfn(ifp, bce_start); if_setgetcounterfn(ifp, bce_get_counter); if_setinitfn(ifp, bce_init); if_setmtu(ifp, ETHERMTU); if (bce_tso_enable) { if_sethwassist(ifp, BCE_IF_HWASSIST | CSUM_TSO); if_setcapabilities(ifp, BCE_IF_CAPABILITIES | IFCAP_TSO4 | IFCAP_VLAN_HWTSO); } else { if_sethwassist(ifp, BCE_IF_HWASSIST); if_setcapabilities(ifp, BCE_IF_CAPABILITIES); } if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Assume standard mbuf sizes for buffer allocation. * This may change later if the MTU size is set to * something other than 1500. */ bce_get_rx_buffer_sizes(sc, (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)); /* Recalculate our buffer allocation sizes. */ if_setsendqlen(ifp, USABLE_TX_BD_ALLOC); if_setsendqready(ifp); if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) if_setbaudrate(ifp, IF_Mbps(2500ULL)); else if_setbaudrate(ifp, IF_Mbps(1000)); /* Handle any special PHY initialization for SerDes PHYs. */ bce_init_media(sc); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { ifmedia_init(&sc->bce_ifmedia, IFM_IMASK, bce_ifmedia_upd, bce_ifmedia_sts); /* * We can't manually override remote PHY's link and assume * PHY port configuration(Fiber or TP) is not changed after * device attach. This may not be correct though. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) { if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) { ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_2500_SX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_2500_SX | IFM_FDX, 0, NULL); } ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); } else { ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); } ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO); sc->bce_ifmedia.ifm_media = sc->bce_ifmedia.ifm_cur->ifm_media; } else { /* MII child bus by attaching the PHY. */ rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd, bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (rc != 0) { BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__, __LINE__); goto bce_attach_fail; } } /* Attach to the Ethernet interface list. */ ether_ifattach(ifp, sc->eaddr); callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0); callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0); /* Hookup IRQ last. */ rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bce_intr, sc, &sc->bce_intrhand); if (rc) { BCE_PRINTF("%s(%d): Failed to setup IRQ!\n", __FILE__, __LINE__); bce_detach(dev); goto bce_attach_exit; } /* * At this point we've acquired all the resources * we need to run so there's no turning back, we're * cleared for launch. */ /* Print some important debugging info. */ DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc)); /* Add the supported sysctls to the kernel. */ bce_add_sysctls(sc); BCE_LOCK(sc); /* * The chip reset earlier notified the bootcode that * a driver is present. We now need to start our pulse * routine so that the bootcode is reminded that we're * still running. */ bce_pulse(sc); bce_mgmt_init_locked(sc); BCE_UNLOCK(sc); /* Finally, print some useful adapter info */ bce_print_adapter_info(sc); DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n", __FUNCTION__, sc); goto bce_attach_exit; bce_attach_fail: bce_release_resources(sc); bce_attach_exit: DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); return(rc); } /****************************************************************************/ /* Device detach function. */ /* */ /* Stops the controller, resets the controller, and releases resources. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_detach(device_t dev) { struct bce_softc *sc = device_get_softc(dev); if_t ifp; u32 msg; DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); ifp = sc->bce_ifp; /* Stop and reset the controller. */ BCE_LOCK(sc); /* Stop the pulse so the bootcode can go to driver absent state. */ callout_stop(&sc->bce_pulse_callout); bce_stop(sc); if (sc->bce_flags & BCE_NO_WOL_FLAG) msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; else msg = BCE_DRV_MSG_CODE_UNLOAD; bce_reset(sc, msg); BCE_UNLOCK(sc); ether_ifdetach(ifp); /* If we have a child device on the MII bus remove it too. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) ifmedia_removeall(&sc->bce_ifmedia); else { bus_generic_detach(dev); - device_delete_child(dev, sc->bce_miibus); } /* Release all remaining resources. */ bce_release_resources(sc); DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET); return(0); } /****************************************************************************/ /* Device shutdown function. */ /* */ /* Stops and resets the controller. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_shutdown(device_t dev) { struct bce_softc *sc = device_get_softc(dev); u32 msg; DBENTER(BCE_VERBOSE); BCE_LOCK(sc); bce_stop(sc); if (sc->bce_flags & BCE_NO_WOL_FLAG) msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN; else msg = BCE_DRV_MSG_CODE_UNLOAD; bce_reset(sc, msg); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE); return (0); } #ifdef BCE_DEBUG /****************************************************************************/ /* Register read. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static u32 bce_reg_rd(struct bce_softc *sc, u32 offset) { u32 val = REG_RD(sc, offset); DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); return val; } /****************************************************************************/ /* Register write (16 bit). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val) { DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n", __FUNCTION__, offset, val); REG_WR16(sc, offset, val); } /****************************************************************************/ /* Register write. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val) { DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); REG_WR(sc, offset, val); } #endif /****************************************************************************/ /* Indirect register read. */ /* */ /* Reads NetXtreme II registers using an index/data register pair in PCI */ /* configuration space. Using this mechanism avoids issues with posted */ /* reads but is much slower than memory-mapped I/O. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static u32 bce_reg_rd_ind(struct bce_softc *sc, u32 offset) { device_t dev; dev = sc->bce_dev; pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); #ifdef BCE_DEBUG { u32 val; val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); return val; } #else return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4); #endif } /****************************************************************************/ /* Indirect register write. */ /* */ /* Writes NetXtreme II registers using an index/data register pair in PCI */ /* configuration space. Using this mechanism avoids issues with posted */ /* writes but is muchh slower than memory-mapped I/O. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val) { device_t dev; dev = sc->bce_dev; DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n", __FUNCTION__, offset, val); pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4); pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4); } /****************************************************************************/ /* Shared memory write. */ /* */ /* Writes NetXtreme II shared memory region. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val) { DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X to " "0x%08X\n", __FUNCTION__, val, offset); bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val); } /****************************************************************************/ /* Shared memory read. */ /* */ /* Reads NetXtreme II shared memory region. */ /* */ /* Returns: */ /* The 32 bit value read. */ /****************************************************************************/ static u32 bce_shmem_rd(struct bce_softc *sc, u32 offset) { u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset); DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from " "0x%08X\n", __FUNCTION__, val, offset); return val; } #ifdef BCE_DEBUG /****************************************************************************/ /* Context memory read. */ /* */ /* The NetXtreme II controller uses context memory to track connection */ /* information for L2 and higher network protocols. */ /* */ /* Returns: */ /* The requested 32 bit value of context memory. */ /****************************************************************************/ static u32 bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset) { u32 idx, offset, retry_cnt = 5, val; DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID " "address: 0x%08X.\n", __FUNCTION__, cid_addr)); offset = ctx_offset + cid_addr; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ)); for (idx = 0; idx < retry_cnt; idx++) { val = REG_RD(sc, BCE_CTX_CTX_CTRL); if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0) break; DELAY(5); } if (val & BCE_CTX_CTX_CTRL_READ_REQ) BCE_PRINTF("%s(%d); Unable to read CTX memory: " "cid_addr = 0x%08X, offset = 0x%08X!\n", __FILE__, __LINE__, cid_addr, ctx_offset); val = REG_RD(sc, BCE_CTX_CTX_DATA); } else { REG_WR(sc, BCE_CTX_DATA_ADR, offset); val = REG_RD(sc, BCE_CTX_DATA); } DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val); return(val); } #endif /****************************************************************************/ /* Context memory write. */ /* */ /* The NetXtreme II controller uses context memory to track connection */ /* information for L2 and higher network protocols. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val) { u32 idx, offset = ctx_offset + cid_addr; u32 val, retry_cnt = 5; DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, " "val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val); DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n", __FUNCTION__, cid_addr)); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val); REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ)); for (idx = 0; idx < retry_cnt; idx++) { val = REG_RD(sc, BCE_CTX_CTX_CTRL); if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0) break; DELAY(5); } if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) BCE_PRINTF("%s(%d); Unable to write CTX memory: " "cid_addr = 0x%08X, offset = 0x%08X!\n", __FILE__, __LINE__, cid_addr, ctx_offset); } else { REG_WR(sc, BCE_CTX_DATA_ADR, offset); REG_WR(sc, BCE_CTX_DATA, ctx_val); } } /****************************************************************************/ /* PHY register read. */ /* */ /* Implements register reads on the MII bus. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static int bce_miibus_read_reg(device_t dev, int phy, int reg) { struct bce_softc *sc; u32 val; int i; sc = device_get_softc(dev); /* * The 5709S PHY is an IEEE Clause 45 PHY * with special mappings to work with IEEE * Clause 22 register accesses. */ if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { if (reg >= MII_BMCR && reg <= MII_ANLPRNP) reg += 0x10; } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val = REG_RD(sc, BCE_EMAC_MDIO_MODE); val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } val = BCE_MIPHY(phy) | BCE_MIREG(reg) | BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT | BCE_EMAC_MDIO_COMM_START_BUSY; REG_WR(sc, BCE_EMAC_MDIO_COMM, val); for (i = 0; i < BCE_PHY_TIMEOUT; i++) { DELAY(10); val = REG_RD(sc, BCE_EMAC_MDIO_COMM); if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) { DELAY(5); val = REG_RD(sc, BCE_EMAC_MDIO_COMM); val &= BCE_EMAC_MDIO_COMM_DATA; break; } } if (val & BCE_EMAC_MDIO_COMM_START_BUSY) { BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, " "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg); val = 0x0; } else { val = REG_RD(sc, BCE_EMAC_MDIO_COMM); } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val = REG_RD(sc, BCE_EMAC_MDIO_MODE); val |= BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } DB_PRINT_PHY_REG(reg, val); return (val & 0xffff); } /****************************************************************************/ /* PHY register write. */ /* */ /* Implements register writes on the MII bus. */ /* */ /* Returns: */ /* The value of the register. */ /****************************************************************************/ static int bce_miibus_write_reg(device_t dev, int phy, int reg, int val) { struct bce_softc *sc; u32 val1; int i; sc = device_get_softc(dev); DB_PRINT_PHY_REG(reg, val); /* * The 5709S PHY is an IEEE Clause 45 PHY * with special mappings to work with IEEE * Clause 22 register accesses. */ if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) { if (reg >= MII_BMCR && reg <= MII_ANLPRNP) reg += 0x10; } if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val | BCE_EMAC_MDIO_COMM_COMMAND_WRITE | BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT; REG_WR(sc, BCE_EMAC_MDIO_COMM, val1); for (i = 0; i < BCE_PHY_TIMEOUT; i++) { DELAY(10); val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM); if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) { DELAY(5); break; } } if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY) BCE_PRINTF("%s(%d): PHY write timeout!\n", __FILE__, __LINE__); if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) { val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE); val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL; REG_WR(sc, BCE_EMAC_MDIO_MODE, val1); REG_RD(sc, BCE_EMAC_MDIO_MODE); DELAY(40); } return 0; } /****************************************************************************/ /* MII bus status change. */ /* */ /* Called by the MII bus driver when the PHY establishes link to set the */ /* MAC interface registers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_miibus_statchg(device_t dev) { struct bce_softc *sc; struct mii_data *mii; struct ifmediareq ifmr; int media_active, media_status, val; sc = device_get_softc(dev); DBENTER(BCE_VERBOSE_PHY); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { bzero(&ifmr, sizeof(ifmr)); bce_ifmedia_sts_rphy(sc, &ifmr); media_active = ifmr.ifm_active; media_status = ifmr.ifm_status; } else { mii = device_get_softc(sc->bce_miibus); media_active = mii->mii_media_active; media_status = mii->mii_media_status; } /* Ignore invalid media status. */ if ((media_status & (IFM_ACTIVE | IFM_AVALID)) != (IFM_ACTIVE | IFM_AVALID)) goto bce_miibus_statchg_exit; val = REG_RD(sc, BCE_EMAC_MODE); val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK | BCE_EMAC_MODE_25G); /* Set MII or GMII interface based on the PHY speed. */ switch (IFM_SUBTYPE(media_active)) { case IFM_10_T: if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { DBPRINT(sc, BCE_INFO_PHY, "Enabling 10Mb interface.\n"); val |= BCE_EMAC_MODE_PORT_MII_10; break; } /* fall-through */ case IFM_100_TX: DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n"); val |= BCE_EMAC_MODE_PORT_MII; break; case IFM_2500_SX: DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n"); val |= BCE_EMAC_MODE_25G; /* fall-through */ case IFM_1000_T: case IFM_1000_SX: DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n"); val |= BCE_EMAC_MODE_PORT_GMII; break; default: DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling " "default GMII interface.\n"); val |= BCE_EMAC_MODE_PORT_GMII; } /* Set half or full duplex based on PHY settings. */ if ((IFM_OPTIONS(media_active) & IFM_FDX) == 0) { DBPRINT(sc, BCE_INFO_PHY, "Setting Half-Duplex interface.\n"); val |= BCE_EMAC_MODE_HALF_DUPLEX; } else DBPRINT(sc, BCE_INFO_PHY, "Setting Full-Duplex interface.\n"); REG_WR(sc, BCE_EMAC_MODE, val); if ((IFM_OPTIONS(media_active) & IFM_ETH_RXPAUSE) != 0) { DBPRINT(sc, BCE_INFO_PHY, "%s(): Enabling RX flow control.\n", __FUNCTION__); BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL; } else { DBPRINT(sc, BCE_INFO_PHY, "%s(): Disabling RX flow control.\n", __FUNCTION__); BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN); sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL; } if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) { DBPRINT(sc, BCE_INFO_PHY, "%s(): Enabling TX flow control.\n", __FUNCTION__); BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL; } else { DBPRINT(sc, BCE_INFO_PHY, "%s(): Disabling TX flow control.\n", __FUNCTION__); BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN); sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL; } /* ToDo: Update watermarks in bce_init_rx_context(). */ bce_miibus_statchg_exit: DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Acquire NVRAM lock. */ /* */ /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */ /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ /* for use by the driver. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_acquire_nvram_lock(struct bce_softc *sc) { u32 val; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Request access to the flash interface. */ REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { val = REG_RD(sc, BCE_NVM_SW_ARB); if (val & BCE_NVM_SW_ARB_ARB_ARB2) break; DELAY(5); } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n"); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } /****************************************************************************/ /* Release NVRAM lock. */ /* */ /* When the caller is finished accessing NVRAM the lock must be released. */ /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */ /* for use by the driver. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_release_nvram_lock(struct bce_softc *sc) { u32 val; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* * Relinquish nvram interface. */ REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { val = REG_RD(sc, BCE_NVM_SW_ARB); if (!(val & BCE_NVM_SW_ARB_ARB_ARB2)) break; DELAY(5); } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n"); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Enable NVRAM write access. */ /* */ /* Before writing to NVRAM the caller must enable NVRAM writes. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_enable_nvram_write(struct bce_softc *sc) { u32 val; int rc = 0; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_MISC_CFG); REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI); if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { int j; REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n"); rc = EBUSY; } } DBENTER(BCE_VERBOSE_NVRAM); return (rc); } /****************************************************************************/ /* Disable NVRAM write access. */ /* */ /* When the caller is finished writing to NVRAM write access must be */ /* disabled. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_nvram_write(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_MISC_CFG); REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN); DBEXIT(BCE_VERBOSE_NVRAM); } #endif /****************************************************************************/ /* Enable NVRAM access. */ /* */ /* Before accessing NVRAM for read or write operations the caller must */ /* enabled NVRAM access. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_enable_nvram_access(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); /* Enable both bits, even on read. */ REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN); DBEXIT(BCE_VERBOSE_NVRAM); } /****************************************************************************/ /* Disable NVRAM access. */ /* */ /* When the caller is finished accessing NVRAM access must be disabled. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_nvram_access(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_NVRAM); val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE); /* Disable both bits, even after read. */ REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN)); DBEXIT(BCE_VERBOSE_NVRAM); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Erase NVRAM page before writing. */ /* */ /* Non-buffered flash parts require that a page be erased before it is */ /* written. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_erase_page(struct bce_softc *sc, u32 offset) { u32 cmd; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Buffered flash doesn't require an erase. */ if (sc->bce_flash_info->flags & BCE_NV_BUFFERED) goto bce_nvram_erase_page_exit; /* Build an erase command. */ cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR | BCE_NVM_COMMAND_DOIT; /* * Clear the DONE bit separately, set the NVRAM address to erase, * and issue the erase command. */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { u32 val; DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n"); rc = EBUSY; } bce_nvram_erase_page_exit: DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Read a dword (32 bits) from NVRAM. */ /* */ /* Read a 32 bit word from NVRAM. The caller is assumed to have already */ /* obtained the NVRAM lock and enabled the controller for NVRAM access. */ /* */ /* Returns: */ /* 0 on success and the 32 bit value read, positive value on failure. */ /****************************************************************************/ static int bce_nvram_read_dword(struct bce_softc *sc, u32 offset, u8 *ret_val, u32 cmd_flags) { u32 cmd; int i, rc = 0; DBENTER(BCE_EXTREME_NVRAM); /* Build the command word. */ cmd = BCE_NVM_COMMAND_DOIT | cmd_flags; /* Calculate the offset for buffered flash if translation is used. */ if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { offset = ((offset / sc->bce_flash_info->page_size) << sc->bce_flash_info->page_bits) + (offset % sc->bce_flash_info->page_size); } /* * Clear the DONE bit separately, set the address to read, * and issue the read. */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) { u32 val; DELAY(5); val = REG_RD(sc, BCE_NVM_COMMAND); if (val & BCE_NVM_COMMAND_DONE) { val = REG_RD(sc, BCE_NVM_READ); val = bce_be32toh(val); memcpy(ret_val, &val, 4); break; } } /* Check for errors. */ if (i >= NVRAM_TIMEOUT_COUNT) { BCE_PRINTF("%s(%d): Timeout error reading NVRAM at " "offset 0x%08X!\n", __FILE__, __LINE__, offset); rc = EBUSY; } DBEXIT(BCE_EXTREME_NVRAM); return(rc); } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Write a dword (32 bits) to NVRAM. */ /* */ /* Write a 32 bit word to NVRAM. The caller is assumed to have already */ /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */ /* enabled NVRAM write access. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val, u32 cmd_flags) { u32 cmd, val32; int j, rc = 0; DBENTER(BCE_VERBOSE_NVRAM); /* Build the command word. */ cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags; /* Calculate the offset for buffered flash if translation is used. */ if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) { offset = ((offset / sc->bce_flash_info->page_size) << sc->bce_flash_info->page_bits) + (offset % sc->bce_flash_info->page_size); } /* * Clear the DONE bit separately, convert NVRAM data to big-endian, * set the NVRAM address to write, and issue the write command */ REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE); memcpy(&val32, val, 4); val32 = htobe32(val32); REG_WR(sc, BCE_NVM_WRITE, val32); REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE); REG_WR(sc, BCE_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { DELAY(5); if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) { BCE_PRINTF("%s(%d): Timeout error writing NVRAM at " "offset 0x%08X\n", __FILE__, __LINE__, offset); rc = EBUSY; } DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Initialize NVRAM access. */ /* */ /* Identify the NVRAM device in use and prepare the NVRAM interface to */ /* access that device. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_init_nvram(struct bce_softc *sc) { u32 val; int j, entry_count, rc = 0; const struct flash_spec *flash; DBENTER(BCE_VERBOSE_NVRAM); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { sc->bce_flash_info = &flash_5709; goto bce_init_nvram_get_flash_size; } /* Determine the selected interface. */ val = REG_RD(sc, BCE_NVM_CFG1); entry_count = sizeof(flash_table) / sizeof(struct flash_spec); /* * Flash reconfiguration is required to support additional * NVRAM devices not directly supported in hardware. * Check if the flash interface was reconfigured * by the bootcode. */ if (val & 0x40000000) { /* Flash interface reconfigured by bootcode. */ DBPRINT(sc,BCE_INFO_LOAD, "bce_init_nvram(): Flash WAS reconfigured.\n"); for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { if ((val & FLASH_BACKUP_STRAP_MASK) == (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { sc->bce_flash_info = flash; break; } } } else { /* Flash interface not yet reconfigured. */ u32 mask; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n", __FUNCTION__); if (val & (1 << 23)) mask = FLASH_BACKUP_STRAP_MASK; else mask = FLASH_STRAP_MASK; /* Look for the matching NVRAM device configuration data. */ for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) { /* Check if the device matches any of the known devices. */ if ((val & mask) == (flash->strapping & mask)) { /* Found a device match. */ sc->bce_flash_info = flash; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) return rc; /* Reconfigure the flash interface. */ bce_enable_nvram_access(sc); REG_WR(sc, BCE_NVM_CFG1, flash->config1); REG_WR(sc, BCE_NVM_CFG2, flash->config2); REG_WR(sc, BCE_NVM_CFG3, flash->config3); REG_WR(sc, BCE_NVM_WRITE1, flash->write1); bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); break; } } } /* Check if a matching device was found. */ if (j == entry_count) { sc->bce_flash_info = NULL; BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n", __FILE__, __LINE__); DBEXIT(BCE_VERBOSE_NVRAM); return (ENODEV); } bce_init_nvram_get_flash_size: /* Write the flash config data to the shared memory interface. */ val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2); val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK; if (val) sc->bce_flash_size = val; else sc->bce_flash_size = sc->bce_flash_info->total_size; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n", __FUNCTION__, sc->bce_flash_info->name, sc->bce_flash_info->total_size); DBEXIT(BCE_VERBOSE_NVRAM); return rc; } /****************************************************************************/ /* Read an arbitrary range of data from NVRAM. */ /* */ /* Prepares the NVRAM interface for access and reads the requested data */ /* into the supplied buffer. */ /* */ /* Returns: */ /* 0 on success and the data read, positive value on failure. */ /****************************************************************************/ static int bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf, int buf_size) { int rc = 0; u32 cmd_flags, offset32, len32, extra; DBENTER(BCE_VERBOSE_NVRAM); if (buf_size == 0) goto bce_nvram_read_exit; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) goto bce_nvram_read_exit; /* Enable access to flash interface */ bce_enable_nvram_access(sc); len32 = buf_size; offset32 = offset; extra = 0; cmd_flags = 0; if (offset32 & 3) { u8 buf[4]; u32 pre_len; offset32 &= ~3; pre_len = 4 - (offset & 3); if (pre_len >= len32) { pre_len = len32; cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; } else { cmd_flags = BCE_NVM_COMMAND_FIRST; } rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); if (rc) return rc; memcpy(ret_buf, buf + (offset & 3), pre_len); offset32 += 4; ret_buf += pre_len; len32 -= pre_len; } if (len32 & 3) { extra = 4 - (len32 & 3); len32 = (len32 + 4) & ~3; } if (len32 == 4) { u8 buf[4]; if (cmd_flags) cmd_flags = BCE_NVM_COMMAND_LAST; else cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST; rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); memcpy(ret_buf, buf, 4 - extra); } else if (len32 > 0) { u8 buf[4]; /* Read the first word. */ if (cmd_flags) cmd_flags = 0; else cmd_flags = BCE_NVM_COMMAND_FIRST; rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags); /* Advance to the next dword. */ offset32 += 4; ret_buf += 4; len32 -= 4; while (len32 > 4 && rc == 0) { rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0); /* Advance to the next dword. */ offset32 += 4; ret_buf += 4; len32 -= 4; } if (rc) goto bce_nvram_read_locked_exit; cmd_flags = BCE_NVM_COMMAND_LAST; rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags); memcpy(ret_buf, buf, 4 - extra); } bce_nvram_read_locked_exit: /* Disable access to flash interface and release the lock. */ bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); bce_nvram_read_exit: DBEXIT(BCE_VERBOSE_NVRAM); return rc; } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Write an arbitrary range of data from NVRAM. */ /* */ /* Prepares the NVRAM interface for write access and writes the requested */ /* data from the supplied buffer. The caller is responsible for */ /* calculating any appropriate CRCs. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf, int buf_size) { u32 written, offset32, len32; u8 *buf, start[4], end[4]; int rc = 0; int align_start, align_end; DBENTER(BCE_VERBOSE_NVRAM); buf = data_buf; offset32 = offset; len32 = buf_size; align_start = align_end = 0; if ((align_start = (offset32 & 3))) { offset32 &= ~3; len32 += align_start; if ((rc = bce_nvram_read(sc, offset32, start, 4))) goto bce_nvram_write_exit; } if (len32 & 3) { if ((len32 > 4) || !align_start) { align_end = 4 - (len32 & 3); len32 += align_end; if ((rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4))) { goto bce_nvram_write_exit; } } } if (align_start || align_end) { buf = malloc(len32, M_DEVBUF, M_NOWAIT); if (buf == NULL) { rc = ENOMEM; goto bce_nvram_write_exit; } if (align_start) { memcpy(buf, start, 4); } if (align_end) { memcpy(buf + len32 - 4, end, 4); } memcpy(buf + align_start, data_buf, buf_size); } written = 0; while ((written < len32) && (rc == 0)) { u32 page_start, page_end, data_start, data_end; u32 addr, cmd_flags; int i; u8 flash_buffer[264]; /* Find the page_start addr */ page_start = offset32 + written; page_start -= (page_start % sc->bce_flash_info->page_size); /* Find the page_end addr */ page_end = page_start + sc->bce_flash_info->page_size; /* Find the data_start addr */ data_start = (written == 0) ? offset32 : page_start; /* Find the data_end addr */ data_end = (page_end > offset32 + len32) ? (offset32 + len32) : page_end; /* Request access to the flash interface. */ if ((rc = bce_acquire_nvram_lock(sc)) != 0) goto bce_nvram_write_exit; /* Enable access to flash interface */ bce_enable_nvram_access(sc); cmd_flags = BCE_NVM_COMMAND_FIRST; if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { int j; /* Read the whole page into the buffer * (non-buffer flash only) */ for (j = 0; j < sc->bce_flash_info->page_size; j += 4) { if (j == (sc->bce_flash_info->page_size - 4)) { cmd_flags |= BCE_NVM_COMMAND_LAST; } rc = bce_nvram_read_dword(sc, page_start + j, &flash_buffer[j], cmd_flags); if (rc) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Enable writes to flash interface (unlock write-protect) */ if ((rc = bce_enable_nvram_write(sc)) != 0) goto bce_nvram_write_locked_exit; /* Erase the page */ if ((rc = bce_nvram_erase_page(sc, page_start)) != 0) goto bce_nvram_write_locked_exit; /* Re-enable the write again for the actual write */ bce_enable_nvram_write(sc); /* Loop to write back the buffer data from page_start to * data_start */ i = 0; if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { for (addr = page_start; addr < data_start; addr += 4, i += 4) { rc = bce_nvram_write_dword(sc, addr, &flash_buffer[i], cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Loop to write the new data from data_start to data_end */ for (addr = data_start; addr < data_end; addr += 4, i++) { if ((addr == page_end - 4) || ((sc->bce_flash_info->flags & BCE_NV_BUFFERED) && (addr == data_end - 4))) { cmd_flags |= BCE_NVM_COMMAND_LAST; } rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; buf += 4; } /* Loop to write back the buffer data from data_end * to page_end */ if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) { for (addr = data_end; addr < page_end; addr += 4, i += 4) { if (addr == page_end-4) { cmd_flags = BCE_NVM_COMMAND_LAST; } rc = bce_nvram_write_dword(sc, addr, &flash_buffer[i], cmd_flags); if (rc != 0) goto bce_nvram_write_locked_exit; cmd_flags = 0; } } /* Disable writes to flash interface (lock write-protect) */ bce_disable_nvram_write(sc); /* Disable access to flash interface */ bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); /* Increment written */ written += data_end - data_start; } goto bce_nvram_write_exit; bce_nvram_write_locked_exit: bce_disable_nvram_write(sc); bce_disable_nvram_access(sc); bce_release_nvram_lock(sc); bce_nvram_write_exit: if (align_start || align_end) free(buf, M_DEVBUF); DBEXIT(BCE_VERBOSE_NVRAM); return (rc); } #endif /* BCE_NVRAM_WRITE_SUPPORT */ /****************************************************************************/ /* Verifies that NVRAM is accessible and contains valid data. */ /* */ /* Reads the configuration data from NVRAM and verifies that the CRC is */ /* correct. */ /* */ /* Returns: */ /* 0 on success, positive value on failure. */ /****************************************************************************/ static int bce_nvram_test(struct bce_softc *sc) { u32 buf[BCE_NVRAM_SIZE / 4]; u8 *data = (u8 *) buf; int rc = 0; u32 magic, csum; DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); /* * Check that the device NVRAM is valid by reading * the magic value at offset 0. */ if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) { BCE_PRINTF("%s(%d): Unable to read NVRAM!\n", __FILE__, __LINE__); goto bce_nvram_test_exit; } /* * Verify that offset 0 of the NVRAM contains * a valid magic number. */ magic = bce_be32toh(buf[0]); if (magic != BCE_NVRAM_MAGIC) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid NVRAM magic value! " "Expected: 0x%08X, Found: 0x%08X\n", __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic); goto bce_nvram_test_exit; } /* * Verify that the device NVRAM includes valid * configuration data. */ if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) { BCE_PRINTF("%s(%d): Unable to read manufacturing " "Information from NVRAM!\n", __FILE__, __LINE__); goto bce_nvram_test_exit; } csum = ether_crc32_le(data, 0x100); if (csum != BCE_CRC32_RESIDUAL) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid manufacturing information " "NVRAM CRC! Expected: 0x%08X, Found: 0x%08X\n", __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); goto bce_nvram_test_exit; } csum = ether_crc32_le(data + 0x100, 0x100); if (csum != BCE_CRC32_RESIDUAL) { rc = ENODEV; BCE_PRINTF("%s(%d): Invalid feature configuration " "information NVRAM CRC! Expected: 0x%08X, " "Found: 08%08X\n", __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum); } bce_nvram_test_exit: DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET); return rc; } /****************************************************************************/ /* Calculates the size of the buffers to allocate based on the MTU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu) { DBENTER(BCE_VERBOSE_LOAD); /* Use a single allocation type when header splitting enabled. */ if (bce_hdr_split == TRUE) { sc->rx_bd_mbuf_alloc_size = MHLEN; /* Make sure offset is 16 byte aligned for hardware. */ sc->rx_bd_mbuf_align_pad = roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN); sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } else { if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > MCLBYTES) { /* Setup for jumbo RX buffer allocations. */ sc->rx_bd_mbuf_alloc_size = MJUM9BYTES; sc->rx_bd_mbuf_align_pad = roundup2(MJUM9BYTES, 16) - MJUM9BYTES; sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } else { /* Setup for standard RX buffer allocations. */ sc->rx_bd_mbuf_alloc_size = MCLBYTES; sc->rx_bd_mbuf_align_pad = roundup2(MCLBYTES, 16) - MCLBYTES; sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size - sc->rx_bd_mbuf_align_pad; } } // DBPRINT(sc, BCE_INFO_LOAD, DBPRINT(sc, BCE_WARN, "%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, " "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__, sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len, sc->rx_bd_mbuf_align_pad); DBEXIT(BCE_VERBOSE_LOAD); } /****************************************************************************/ /* Identifies the current media type of the controller and sets the PHY */ /* address. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_media(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_PHY); /* Assume PHY address for copper controllers. */ sc->bce_phy_addr = 1; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL); u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID; u32 strap; /* * The BCM5709S is software configurable * for Copper or SerDes operation. */ if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) { DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " "for copper.\n"); goto bce_get_media_exit; } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded " "for dual media.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; goto bce_get_media_exit; } if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; else strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; if (pci_get_function(sc->bce_dev) == 0) { switch (strap) { case 0x4: case 0x5: case 0x6: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for SerDes.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; break; default: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for Copper.\n"); break; } } else { switch (strap) { case 0x1: case 0x2: case 0x4: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for SerDes.\n"); sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; break; default: DBPRINT(sc, BCE_INFO_LOAD, "BCM5709 s/w configured for Copper.\n"); break; } } } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG; if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) { sc->bce_flags |= BCE_NO_WOL_FLAG; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG; if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) { /* 5708S/09S/16S use a separate PHY for SerDes. */ sc->bce_phy_addr = 2; val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG); if (val & BCE_SHARED_HW_CFG_PHY_2_5G) { sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG; DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb " "capable adapter\n"); } } } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) || (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG; bce_get_media_exit: DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY), "Using PHY address %d.\n", sc->bce_phy_addr); DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Performs PHY initialization required before MII drivers access the */ /* device. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_media(struct bce_softc *sc) { if ((sc->bce_phy_flags & (BCE_PHY_IEEE_CLAUSE_45_FLAG | BCE_PHY_REMOTE_CAP_FLAG)) == BCE_PHY_IEEE_CLAUSE_45_FLAG) { /* * Configure 5709S/5716S PHYs to use traditional IEEE * Clause 22 method. Otherwise we have no way to attach * the PHY in mii(4) layer. PHY specific configuration * is done in mii layer. */ /* Select auto-negotiation MMD of the PHY. */ bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT); bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD); /* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */ bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr, BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0); } } /****************************************************************************/ /* Free any DMA memory owned by the driver. */ /* */ /* Scans through each data structure that requires DMA memory and frees */ /* the memory if allocated. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_dma_free(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); /* Free, unmap, and destroy the status block. */ if (sc->status_block_paddr != 0) { bus_dmamap_unload( sc->status_tag, sc->status_map); sc->status_block_paddr = 0; } if (sc->status_block != NULL) { bus_dmamem_free( sc->status_tag, sc->status_block, sc->status_map); sc->status_block = NULL; } if (sc->status_tag != NULL) { bus_dma_tag_destroy(sc->status_tag); sc->status_tag = NULL; } /* Free, unmap, and destroy the statistics block. */ if (sc->stats_block_paddr != 0) { bus_dmamap_unload( sc->stats_tag, sc->stats_map); sc->stats_block_paddr = 0; } if (sc->stats_block != NULL) { bus_dmamem_free( sc->stats_tag, sc->stats_block, sc->stats_map); sc->stats_block = NULL; } if (sc->stats_tag != NULL) { bus_dma_tag_destroy(sc->stats_tag); sc->stats_tag = NULL; } /* Free, unmap and destroy all context memory pages. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { for (i = 0; i < sc->ctx_pages; i++ ) { if (sc->ctx_paddr[i] != 0) { bus_dmamap_unload( sc->ctx_tag, sc->ctx_map[i]); sc->ctx_paddr[i] = 0; } if (sc->ctx_block[i] != NULL) { bus_dmamem_free( sc->ctx_tag, sc->ctx_block[i], sc->ctx_map[i]); sc->ctx_block[i] = NULL; } } /* Destroy the context memory tag. */ if (sc->ctx_tag != NULL) { bus_dma_tag_destroy(sc->ctx_tag); sc->ctx_tag = NULL; } } /* Free, unmap and destroy all TX buffer descriptor chain pages. */ for (i = 0; i < sc->tx_pages; i++ ) { if (sc->tx_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i]); sc->tx_bd_chain_paddr[i] = 0; } if (sc->tx_bd_chain[i] != NULL) { bus_dmamem_free( sc->tx_bd_chain_tag, sc->tx_bd_chain[i], sc->tx_bd_chain_map[i]); sc->tx_bd_chain[i] = NULL; } } /* Destroy the TX buffer descriptor tag. */ if (sc->tx_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->tx_bd_chain_tag); sc->tx_bd_chain_tag = NULL; } /* Free, unmap and destroy all RX buffer descriptor chain pages. */ for (i = 0; i < sc->rx_pages; i++ ) { if (sc->rx_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i]); sc->rx_bd_chain_paddr[i] = 0; } if (sc->rx_bd_chain[i] != NULL) { bus_dmamem_free( sc->rx_bd_chain_tag, sc->rx_bd_chain[i], sc->rx_bd_chain_map[i]); sc->rx_bd_chain[i] = NULL; } } /* Destroy the RX buffer descriptor tag. */ if (sc->rx_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->rx_bd_chain_tag); sc->rx_bd_chain_tag = NULL; } /* Free, unmap and destroy all page buffer descriptor chain pages. */ if (bce_hdr_split == TRUE) { for (i = 0; i < sc->pg_pages; i++ ) { if (sc->pg_bd_chain_paddr[i] != 0) { bus_dmamap_unload( sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i]); sc->pg_bd_chain_paddr[i] = 0; } if (sc->pg_bd_chain[i] != NULL) { bus_dmamem_free( sc->pg_bd_chain_tag, sc->pg_bd_chain[i], sc->pg_bd_chain_map[i]); sc->pg_bd_chain[i] = NULL; } } /* Destroy the page buffer descriptor tag. */ if (sc->pg_bd_chain_tag != NULL) { bus_dma_tag_destroy(sc->pg_bd_chain_tag); sc->pg_bd_chain_tag = NULL; } } /* Unload and destroy the TX mbuf maps. */ for (i = 0; i < MAX_TX_BD_AVAIL; i++) { if (sc->tx_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); bus_dmamap_destroy(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]); sc->tx_mbuf_map[i] = NULL; } } /* Destroy the TX mbuf tag. */ if (sc->tx_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->tx_mbuf_tag); sc->tx_mbuf_tag = NULL; } /* Unload and destroy the RX mbuf maps. */ for (i = 0; i < MAX_RX_BD_AVAIL; i++) { if (sc->rx_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]); sc->rx_mbuf_map[i] = NULL; } } /* Destroy the RX mbuf tag. */ if (sc->rx_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->rx_mbuf_tag); sc->rx_mbuf_tag = NULL; } /* Unload and destroy the page mbuf maps. */ if (bce_hdr_split == TRUE) { for (i = 0; i < MAX_PG_BD_AVAIL; i++) { if (sc->pg_mbuf_map[i] != NULL) { bus_dmamap_unload(sc->pg_mbuf_tag, sc->pg_mbuf_map[i]); bus_dmamap_destroy(sc->pg_mbuf_tag, sc->pg_mbuf_map[i]); sc->pg_mbuf_map[i] = NULL; } } /* Destroy the page mbuf tag. */ if (sc->pg_mbuf_tag != NULL) { bus_dma_tag_destroy(sc->pg_mbuf_tag); sc->pg_mbuf_tag = NULL; } } /* Destroy the parent tag */ if (sc->parent_tag != NULL) { bus_dma_tag_destroy(sc->parent_tag); sc->parent_tag = NULL; } DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Get DMA memory from the OS. */ /* */ /* Validates that the OS has provided DMA buffers in response to a */ /* bus_dmamap_load() call and saves the physical address of those buffers. */ /* When the callback is used the OS will return 0 for the mapping function */ /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */ /* failures back to the caller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddr = arg; KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nseg)); /* Simulate a mapping failure. */ DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control), error = ENOMEM); /* ToDo: How to increment debug sim_count variable here? */ /* Check for an error and signal the caller that an error occurred. */ if (error) { *busaddr = 0; } else { *busaddr = segs->ds_addr; } } /****************************************************************************/ /* Allocate any DMA memory needed by the driver. */ /* */ /* Allocates DMA memory needed for the various global structures needed by */ /* hardware. */ /* */ /* Memory alignment requirements: */ /* +-----------------+----------+----------+----------+----------+ */ /* | | 5706 | 5708 | 5709 | 5716 | */ /* +-----------------+----------+----------+----------+----------+ */ /* |Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ /* |Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */ /* |RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */ /* |PG Buffers | none | none | none | none | */ /* |TX Buffers | none | none | none | none | */ /* |Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */ /* |Context Memory | | | | | */ /* +-----------------+----------+----------+----------+----------+ */ /* */ /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_dma_alloc(device_t dev) { struct bce_softc *sc; int i, error, rc = 0; bus_size_t max_size, max_seg_size; int max_segments; sc = device_get_softc(dev); DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_tag)) { BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* * Create a DMA tag for the status block, allocate and clear the * memory, map the memory into DMA space, and fetch the physical * address of the block. */ if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ, 0, NULL, NULL, &sc->status_tag)) { BCE_PRINTF("%s(%d): Could not allocate status block " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->status_map)) { BCE_PRINTF("%s(%d): Could not allocate status block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->status_tag, sc->status_map, sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr, &sc->status_block_paddr, BUS_DMA_NOWAIT); if (error || sc->status_block_paddr == 0) { BCE_PRINTF("%s(%d): Could not map status block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n", __FUNCTION__, (uintmax_t) sc->status_block_paddr); /* * Create a DMA tag for the statistics block, allocate and clear the * memory, map the memory into DMA space, and fetch the physical * address of the block. */ if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ, 0, NULL, NULL, &sc->stats_tag)) { BCE_PRINTF("%s(%d): Could not allocate statistics block " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) { BCE_PRINTF("%s(%d): Could not allocate statistics block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->stats_tag, sc->stats_map, sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr, &sc->stats_block_paddr, BUS_DMA_NOWAIT); if (error || sc->stats_block_paddr == 0) { BCE_PRINTF("%s(%d): Could not map statistics block " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n", __FUNCTION__, (uintmax_t) sc->stats_block_paddr); /* BCM5709 uses host memory as cache for context memory. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE; if (sc->ctx_pages == 0) sc->ctx_pages = 1; DBRUNIF((sc->ctx_pages > 512), BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n", __FILE__, __LINE__, sc->ctx_pages)); /* * Create a DMA tag for the context pages, * allocate and clear the memory, map the * memory into DMA space, and fetch the * physical address of the block. */ if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE, 0, NULL, NULL, &sc->ctx_tag)) { BCE_PRINTF("%s(%d): Could not allocate CTX " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->ctx_pages; i++) { if(bus_dmamem_alloc(sc->ctx_tag, (void **)&sc->ctx_block[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ctx_map[i])) { BCE_PRINTF("%s(%d): Could not allocate CTX " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i], sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr, &sc->ctx_paddr[i], BUS_DMA_NOWAIT); if (error || sc->ctx_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map CTX " "DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] " "= 0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->ctx_paddr[i]); } } /* * Create a DMA tag for the TX buffer descriptor chain, * allocate and clear the memory, and fetch the * physical address of the block. */ if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->tx_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate TX descriptor " "chain DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->tx_pages; i++) { if(bus_dmamem_alloc(sc->tx_bd_chain_tag, (void **)&sc->tx_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->tx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate TX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->tx_bd_chain_tag, sc->tx_bd_chain_map[i], sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->tx_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map TX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->tx_bd_chain_paddr[i]); } /* Check the required size before mapping to conserve resources. */ if (bce_tso_enable) { max_size = BCE_TSO_MAX_SIZE; max_segments = BCE_MAX_SEGMENTS; max_seg_size = BCE_TSO_MAX_SEG_SIZE; } else { max_size = MCLBYTES * BCE_MAX_SEGMENTS; max_segments = BCE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* Create a DMA tag for TX mbufs. */ if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, max_segments, max_seg_size, 0, NULL, NULL, &sc->tx_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the TX mbufs clusters. */ for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) { if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT, &sc->tx_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA " "map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } /* * Create a DMA tag for the RX buffer descriptor chain, * allocate and clear the memory, and fetch the physical * address of the blocks. */ if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, NULL, NULL, BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->rx_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->rx_pages; i++) { if (bus_dmamem_alloc(sc->rx_bd_chain_tag, (void **)&sc->rx_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->rx_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate RX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->rx_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map RX descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->rx_bd_chain_paddr[i]); } /* * Create a DMA tag for RX mbufs. */ if (bce_hdr_split == TRUE) max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ? MCLBYTES : sc->rx_bd_mbuf_alloc_size); else max_size = MJUM9BYTES; DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag " "(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size); if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the RX mbuf clusters. */ for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) { if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT, &sc->rx_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create RX mbuf " "DMA map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } if (bce_hdr_split == TRUE) { /* * Create a DMA tag for the page buffer descriptor chain, * allocate and clear the memory, and fetch the physical * address of the blocks. */ if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr, NULL, NULL, BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ, 0, NULL, NULL, &sc->pg_bd_chain_tag)) { BCE_PRINTF("%s(%d): Could not allocate page descriptor " "chain DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } for (i = 0; i < sc->pg_pages; i++) { if (bus_dmamem_alloc(sc->pg_bd_chain_tag, (void **)&sc->pg_bd_chain[i], BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->pg_bd_chain_map[i])) { BCE_PRINTF("%s(%d): Could not allocate page " "descriptor chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } error = bus_dmamap_load(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr, &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT); if (error || sc->pg_bd_chain_paddr[i] == 0) { BCE_PRINTF("%s(%d): Could not map page descriptor " "chain DMA memory!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = " "0x%jX\n", __FUNCTION__, i, (uintmax_t) sc->pg_bd_chain_paddr[i]); } /* * Create a DMA tag for page mbufs. */ if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) { BCE_PRINTF("%s(%d): Could not allocate page mbuf " "DMA tag!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } /* Create DMA maps for the page mbuf clusters. */ for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) { if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT, &sc->pg_mbuf_map[i])) { BCE_PRINTF("%s(%d): Unable to create page mbuf " "DMA map!\n", __FILE__, __LINE__); rc = ENOMEM; goto bce_dma_alloc_exit; } } } bce_dma_alloc_exit: DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); return(rc); } /****************************************************************************/ /* Release all resources used by the driver. */ /* */ /* Releases all resources acquired by the driver including interrupts, */ /* interrupt handler, interfaces, mutexes, and DMA memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_release_resources(struct bce_softc *sc) { device_t dev; DBENTER(BCE_VERBOSE_RESET); dev = sc->bce_dev; bce_dma_free(sc); if (sc->bce_intrhand != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n"); bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand); } if (sc->bce_res_irq != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n"); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bce_res_irq), sc->bce_res_irq); } if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) { DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n"); pci_release_msi(dev); } if (sc->bce_res_mem != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n"); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bce_res_mem); } if (sc->bce_ifp != NULL) { DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n"); if_free(sc->bce_ifp); } if (mtx_initialized(&sc->bce_mtx)) BCE_LOCK_DESTROY(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Firmware synchronization. */ /* */ /* Before performing certain events such as a chip reset, synchronize with */ /* the firmware first. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_fw_sync(struct bce_softc *sc, u32 msg_data) { int i, rc = 0; u32 val; DBENTER(BCE_VERBOSE_RESET); /* Don't waste any time if we've timed out before. */ if (sc->bce_fw_timed_out == TRUE) { rc = EBUSY; goto bce_fw_sync_exit; } /* Increment the message sequence number. */ sc->bce_fw_wr_seq++; msg_data |= sc->bce_fw_wr_seq; DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = " "0x%08X\n", msg_data); /* Send the message to the bootcode driver mailbox. */ bce_shmem_wr(sc, BCE_DRV_MB, msg_data); /* Wait for the bootcode to acknowledge the message. */ for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) { /* Check for a response in the bootcode firmware mailbox. */ val = bce_shmem_rd(sc, BCE_FW_MB); if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ)) break; DELAY(1000); } /* If we've timed out, tell bootcode that we've stopped waiting. */ if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) && ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) { BCE_PRINTF("%s(%d): Firmware synchronization timeout! " "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data); msg_data &= ~BCE_DRV_MSG_CODE; msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT; bce_shmem_wr(sc, BCE_DRV_MB, msg_data); sc->bce_fw_timed_out = TRUE; rc = EBUSY; } bce_fw_sync_exit: DBEXIT(BCE_VERBOSE_RESET); return (rc); } /****************************************************************************/ /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code, u32 rv2p_code_len, u32 rv2p_proc) { int i; u32 val; DBENTER(BCE_VERBOSE_RESET); /* Set the page size used by RV2P. */ if (rv2p_proc == RV2P_PROC2) { BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE); } for (i = 0; i < rv2p_code_len; i += 8) { REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code); rv2p_code++; REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code); rv2p_code++; if (rv2p_proc == RV2P_PROC1) { val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR; REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val); } else { val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR; REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val); } } /* Reset the processor, un-stall is done later. */ if (rv2p_proc == RV2P_PROC1) { REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET); } else { REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET); } DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Load RISC processor firmware. */ /* */ /* Loads firmware from the file if_bcefw.h into the scratchpad memory */ /* associated with a particular processor. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg, struct fw_info *fw) { u32 offset; DBENTER(BCE_VERBOSE_RESET); bce_halt_cpu(sc, cpu_reg); /* Load the Text area. */ offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); if (fw->text) { int j; for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->text[j]); } } /* Load the Data area. */ offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base); if (fw->data) { int j; for (j = 0; j < (fw->data_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->data[j]); } } /* Load the SBSS area. */ offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); if (fw->sbss) { int j; for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->sbss[j]); } } /* Load the BSS area. */ offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); if (fw->bss) { int j; for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->bss[j]); } } /* Load the Read-Only area. */ offset = cpu_reg->spad_base + (fw->rodata_addr - cpu_reg->mips_view_base); if (fw->rodata) { int j; for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) { REG_WR_IND(sc, offset, fw->rodata[j]); } } /* Clear the pre-fetch instruction and set the FW start address. */ REG_WR_IND(sc, cpu_reg->inst, 0); REG_WR_IND(sc, cpu_reg->pc, fw->start_addr); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Starts the RISC processor. */ /* */ /* Assumes the CPU starting address has already been set. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) { u32 val; DBENTER(BCE_VERBOSE_RESET); /* Start the CPU. */ val = REG_RD_IND(sc, cpu_reg->mode); val &= ~cpu_reg->mode_value_halt; REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); REG_WR_IND(sc, cpu_reg->mode, val); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Halts the RISC processor. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg) { u32 val; DBENTER(BCE_VERBOSE_RESET); /* Halt the CPU. */ val = REG_RD_IND(sc, cpu_reg->mode); val |= cpu_reg->mode_value_halt; REG_WR_IND(sc, cpu_reg->mode, val); REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_rxp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_RXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_RXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_RXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n"); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_rxp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_RXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_RXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE; cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_RXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_RXP_b09FwReleaseMajor; fw.ver_minor = bce_RXP_b09FwReleaseMinor; fw.ver_fix = bce_RXP_b09FwReleaseFix; fw.start_addr = bce_RXP_b09FwStartAddr; fw.text_addr = bce_RXP_b09FwTextAddr; fw.text_len = bce_RXP_b09FwTextLen; fw.text_index = 0; fw.text = bce_RXP_b09FwText; fw.data_addr = bce_RXP_b09FwDataAddr; fw.data_len = bce_RXP_b09FwDataLen; fw.data_index = 0; fw.data = bce_RXP_b09FwData; fw.sbss_addr = bce_RXP_b09FwSbssAddr; fw.sbss_len = bce_RXP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_RXP_b09FwSbss; fw.bss_addr = bce_RXP_b09FwBssAddr; fw.bss_len = bce_RXP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_RXP_b09FwBss; fw.rodata_addr = bce_RXP_b09FwRodataAddr; fw.rodata_len = bce_RXP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_RXP_b09FwRodata; } else { fw.ver_major = bce_RXP_b06FwReleaseMajor; fw.ver_minor = bce_RXP_b06FwReleaseMinor; fw.ver_fix = bce_RXP_b06FwReleaseFix; fw.start_addr = bce_RXP_b06FwStartAddr; fw.text_addr = bce_RXP_b06FwTextAddr; fw.text_len = bce_RXP_b06FwTextLen; fw.text_index = 0; fw.text = bce_RXP_b06FwText; fw.data_addr = bce_RXP_b06FwDataAddr; fw.data_len = bce_RXP_b06FwDataLen; fw.data_index = 0; fw.data = bce_RXP_b06FwData; fw.sbss_addr = bce_RXP_b06FwSbssAddr; fw.sbss_len = bce_RXP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_RXP_b06FwSbss; fw.bss_addr = bce_RXP_b06FwBssAddr; fw.bss_len = bce_RXP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_RXP_b06FwBss; fw.rodata_addr = bce_RXP_b06FwRodataAddr; fw.rodata_len = bce_RXP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_RXP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); /* Delay RXP start until initialization is complete. */ DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the TX CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_txp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_TXP_CPU_MODE; cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_TXP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE; cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK; cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION; cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_TXP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_TXP_b09FwReleaseMajor; fw.ver_minor = bce_TXP_b09FwReleaseMinor; fw.ver_fix = bce_TXP_b09FwReleaseFix; fw.start_addr = bce_TXP_b09FwStartAddr; fw.text_addr = bce_TXP_b09FwTextAddr; fw.text_len = bce_TXP_b09FwTextLen; fw.text_index = 0; fw.text = bce_TXP_b09FwText; fw.data_addr = bce_TXP_b09FwDataAddr; fw.data_len = bce_TXP_b09FwDataLen; fw.data_index = 0; fw.data = bce_TXP_b09FwData; fw.sbss_addr = bce_TXP_b09FwSbssAddr; fw.sbss_len = bce_TXP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TXP_b09FwSbss; fw.bss_addr = bce_TXP_b09FwBssAddr; fw.bss_len = bce_TXP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_TXP_b09FwBss; fw.rodata_addr = bce_TXP_b09FwRodataAddr; fw.rodata_len = bce_TXP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TXP_b09FwRodata; } else { fw.ver_major = bce_TXP_b06FwReleaseMajor; fw.ver_minor = bce_TXP_b06FwReleaseMinor; fw.ver_fix = bce_TXP_b06FwReleaseFix; fw.start_addr = bce_TXP_b06FwStartAddr; fw.text_addr = bce_TXP_b06FwTextAddr; fw.text_len = bce_TXP_b06FwTextLen; fw.text_index = 0; fw.text = bce_TXP_b06FwText; fw.data_addr = bce_TXP_b06FwDataAddr; fw.data_len = bce_TXP_b06FwDataLen; fw.data_index = 0; fw.data = bce_TXP_b06FwData; fw.sbss_addr = bce_TXP_b06FwSbssAddr; fw.sbss_len = bce_TXP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TXP_b06FwSbss; fw.bss_addr = bce_TXP_b06FwBssAddr; fw.bss_len = bce_TXP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_TXP_b06FwBss; fw.rodata_addr = bce_TXP_b06FwRodataAddr; fw.rodata_len = bce_TXP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TXP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the TPAT CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_tpat_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_TPAT_CPU_MODE; cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_TPAT_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE; cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK; cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION; cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_TPAT_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_TPAT_b09FwReleaseMajor; fw.ver_minor = bce_TPAT_b09FwReleaseMinor; fw.ver_fix = bce_TPAT_b09FwReleaseFix; fw.start_addr = bce_TPAT_b09FwStartAddr; fw.text_addr = bce_TPAT_b09FwTextAddr; fw.text_len = bce_TPAT_b09FwTextLen; fw.text_index = 0; fw.text = bce_TPAT_b09FwText; fw.data_addr = bce_TPAT_b09FwDataAddr; fw.data_len = bce_TPAT_b09FwDataLen; fw.data_index = 0; fw.data = bce_TPAT_b09FwData; fw.sbss_addr = bce_TPAT_b09FwSbssAddr; fw.sbss_len = bce_TPAT_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TPAT_b09FwSbss; fw.bss_addr = bce_TPAT_b09FwBssAddr; fw.bss_len = bce_TPAT_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_TPAT_b09FwBss; fw.rodata_addr = bce_TPAT_b09FwRodataAddr; fw.rodata_len = bce_TPAT_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TPAT_b09FwRodata; } else { fw.ver_major = bce_TPAT_b06FwReleaseMajor; fw.ver_minor = bce_TPAT_b06FwReleaseMinor; fw.ver_fix = bce_TPAT_b06FwReleaseFix; fw.start_addr = bce_TPAT_b06FwStartAddr; fw.text_addr = bce_TPAT_b06FwTextAddr; fw.text_len = bce_TPAT_b06FwTextLen; fw.text_index = 0; fw.text = bce_TPAT_b06FwText; fw.data_addr = bce_TPAT_b06FwDataAddr; fw.data_len = bce_TPAT_b06FwDataLen; fw.data_index = 0; fw.data = bce_TPAT_b06FwData; fw.sbss_addr = bce_TPAT_b06FwSbssAddr; fw.sbss_len = bce_TPAT_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_TPAT_b06FwSbss; fw.bss_addr = bce_TPAT_b06FwBssAddr; fw.bss_len = bce_TPAT_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_TPAT_b06FwBss; fw.rodata_addr = bce_TPAT_b06FwRodataAddr; fw.rodata_len = bce_TPAT_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_TPAT_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the CP CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_cp_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_CP_CPU_MODE; cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_CP_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE; cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK; cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_CP_CPU_INSTRUCTION; cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_CP_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_CP_b09FwReleaseMajor; fw.ver_minor = bce_CP_b09FwReleaseMinor; fw.ver_fix = bce_CP_b09FwReleaseFix; fw.start_addr = bce_CP_b09FwStartAddr; fw.text_addr = bce_CP_b09FwTextAddr; fw.text_len = bce_CP_b09FwTextLen; fw.text_index = 0; fw.text = bce_CP_b09FwText; fw.data_addr = bce_CP_b09FwDataAddr; fw.data_len = bce_CP_b09FwDataLen; fw.data_index = 0; fw.data = bce_CP_b09FwData; fw.sbss_addr = bce_CP_b09FwSbssAddr; fw.sbss_len = bce_CP_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_CP_b09FwSbss; fw.bss_addr = bce_CP_b09FwBssAddr; fw.bss_len = bce_CP_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_CP_b09FwBss; fw.rodata_addr = bce_CP_b09FwRodataAddr; fw.rodata_len = bce_CP_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_CP_b09FwRodata; } else { fw.ver_major = bce_CP_b06FwReleaseMajor; fw.ver_minor = bce_CP_b06FwReleaseMinor; fw.ver_fix = bce_CP_b06FwReleaseFix; fw.start_addr = bce_CP_b06FwStartAddr; fw.text_addr = bce_CP_b06FwTextAddr; fw.text_len = bce_CP_b06FwTextLen; fw.text_index = 0; fw.text = bce_CP_b06FwText; fw.data_addr = bce_CP_b06FwDataAddr; fw.data_len = bce_CP_b06FwDataLen; fw.data_index = 0; fw.data = bce_CP_b06FwData; fw.sbss_addr = bce_CP_b06FwSbssAddr; fw.sbss_len = bce_CP_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_CP_b06FwSbss; fw.bss_addr = bce_CP_b06FwBssAddr; fw.bss_len = bce_CP_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_CP_b06FwBss; fw.rodata_addr = bce_CP_b06FwRodataAddr; fw.rodata_len = bce_CP_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_CP_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the COM CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_com_cpu(struct bce_softc *sc) { struct cpu_reg cpu_reg; struct fw_info fw; DBENTER(BCE_VERBOSE_RESET); cpu_reg.mode = BCE_COM_CPU_MODE; cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT; cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA; cpu_reg.state = BCE_COM_CPU_STATE; cpu_reg.state_value_clear = 0xffffff; cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE; cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK; cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER; cpu_reg.inst = BCE_COM_CPU_INSTRUCTION; cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT; cpu_reg.spad_base = BCE_COM_SCRATCH; cpu_reg.mips_view_base = 0x8000000; if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { fw.ver_major = bce_COM_b09FwReleaseMajor; fw.ver_minor = bce_COM_b09FwReleaseMinor; fw.ver_fix = bce_COM_b09FwReleaseFix; fw.start_addr = bce_COM_b09FwStartAddr; fw.text_addr = bce_COM_b09FwTextAddr; fw.text_len = bce_COM_b09FwTextLen; fw.text_index = 0; fw.text = bce_COM_b09FwText; fw.data_addr = bce_COM_b09FwDataAddr; fw.data_len = bce_COM_b09FwDataLen; fw.data_index = 0; fw.data = bce_COM_b09FwData; fw.sbss_addr = bce_COM_b09FwSbssAddr; fw.sbss_len = bce_COM_b09FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_COM_b09FwSbss; fw.bss_addr = bce_COM_b09FwBssAddr; fw.bss_len = bce_COM_b09FwBssLen; fw.bss_index = 0; fw.bss = bce_COM_b09FwBss; fw.rodata_addr = bce_COM_b09FwRodataAddr; fw.rodata_len = bce_COM_b09FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_COM_b09FwRodata; } else { fw.ver_major = bce_COM_b06FwReleaseMajor; fw.ver_minor = bce_COM_b06FwReleaseMinor; fw.ver_fix = bce_COM_b06FwReleaseFix; fw.start_addr = bce_COM_b06FwStartAddr; fw.text_addr = bce_COM_b06FwTextAddr; fw.text_len = bce_COM_b06FwTextLen; fw.text_index = 0; fw.text = bce_COM_b06FwText; fw.data_addr = bce_COM_b06FwDataAddr; fw.data_len = bce_COM_b06FwDataLen; fw.data_index = 0; fw.data = bce_COM_b06FwData; fw.sbss_addr = bce_COM_b06FwSbssAddr; fw.sbss_len = bce_COM_b06FwSbssLen; fw.sbss_index = 0; fw.sbss = bce_COM_b06FwSbss; fw.bss_addr = bce_COM_b06FwBssAddr; fw.bss_len = bce_COM_b06FwBssLen; fw.bss_index = 0; fw.bss = bce_COM_b06FwBss; fw.rodata_addr = bce_COM_b06FwRodataAddr; fw.rodata_len = bce_COM_b06FwRodataLen; fw.rodata_index = 0; fw.rodata = bce_COM_b06FwRodata; } DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n"); bce_load_cpu_fw(sc, &cpu_reg, &fw); bce_start_cpu(sc, &cpu_reg); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */ /* */ /* Loads the firmware for each CPU and starts the CPU. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_cpus(struct bce_softc *sc) { DBENTER(BCE_VERBOSE_RESET); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) { bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1, sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2, sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2); } else { bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1, sizeof(bce_xi_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2, sizeof(bce_xi_rv2p_proc2), RV2P_PROC2); } } else { bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1); bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2); } bce_init_rxp_cpu(sc); bce_init_txp_cpu(sc); bce_init_tpat_cpu(sc); bce_init_com_cpu(sc); bce_init_cp_cpu(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize context memory. */ /* */ /* Clears the memory associated with each Context ID (CID). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static int bce_init_ctx(struct bce_softc *sc) { u32 offset, val, vcid_addr; int i, j, rc, retry_cnt; rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { retry_cnt = CTX_INIT_RETRY_COUNT; DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n"); /* * BCM5709 context memory may be cached * in host memory so prepare the host memory * for access. */ val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT | (1 << 12); val |= (BCM_PAGE_BITS - 8) << 16; REG_WR(sc, BCE_CTX_COMMAND, val); /* Wait for mem init command to complete. */ for (i = 0; i < retry_cnt; i++) { val = REG_RD(sc, BCE_CTX_COMMAND); if (!(val & BCE_CTX_COMMAND_MEM_INIT)) break; DELAY(2); } if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) { BCE_PRINTF("%s(): Context memory initialization failed!\n", __FUNCTION__); rc = EBUSY; goto init_ctx_fail; } for (i = 0; i < sc->ctx_pages; i++) { /* Set the physical address of the context memory. */ REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0, BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) | BCE_CTX_HOST_PAGE_TBL_DATA0_VALID); REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1, BCE_ADDR_HI(sc->ctx_paddr[i])); REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); /* Verify the context memory write was successful. */ for (j = 0; j < retry_cnt; j++) { val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL); if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0) break; DELAY(5); } if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) { BCE_PRINTF("%s(): Failed to initialize " "context page %d!\n", __FUNCTION__, i); rc = EBUSY; goto init_ctx_fail; } } } else { DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n"); /* * For the 5706/5708, context memory is local to * the controller, so initialize the controller * context memory. */ vcid_addr = GET_CID_ADDR(96); while (vcid_addr) { vcid_addr -= PHY_CTX_SIZE; REG_WR(sc, BCE_CTX_VIRT_ADDR, 0); REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) { CTX_WR(sc, 0x00, offset, 0); } REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr); REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr); } } init_ctx_fail: DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX); return (rc); } /****************************************************************************/ /* Fetch the permanent MAC address of the controller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_get_mac_addr(struct bce_softc *sc) { u32 mac_lo = 0, mac_hi = 0; DBENTER(BCE_VERBOSE_RESET); /* * The NetXtreme II bootcode populates various NIC * power-on and runtime configuration items in a * shared memory area. The factory configured MAC * address is available from both NVRAM and the * shared memory area so we'll read the value from * shared memory for speed. */ mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER); mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER); if ((mac_lo == 0) && (mac_hi == 0)) { BCE_PRINTF("%s(%d): Invalid Ethernet address!\n", __FILE__, __LINE__); } else { sc->eaddr[0] = (u_char)(mac_hi >> 8); sc->eaddr[1] = (u_char)(mac_hi >> 0); sc->eaddr[2] = (u_char)(mac_lo >> 24); sc->eaddr[3] = (u_char)(mac_lo >> 16); sc->eaddr[4] = (u_char)(mac_lo >> 8); sc->eaddr[5] = (u_char)(mac_lo >> 0); } DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet " "address = %6D\n", sc->eaddr, ":"); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Program the MAC address. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_set_mac_addr(struct bce_softc *sc) { u32 val; u8 *mac_addr = sc->eaddr; /* ToDo: Add support for setting multiple MAC addresses. */ DBENTER(BCE_VERBOSE_RESET); DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = " "%6D\n", sc->eaddr, ":"); val = (mac_addr[0] << 8) | mac_addr[1]; REG_WR(sc, BCE_EMAC_MAC_MATCH0, val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; REG_WR(sc, BCE_EMAC_MAC_MATCH1, val); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Stop the controller. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_stop(struct bce_softc *sc) { if_t ifp; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; callout_stop(&sc->bce_tick_callout); /* Disable the transmit/receive blocks. */ REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); DELAY(20); bce_disable_intr(sc); /* Free RX buffers. */ if (bce_hdr_split == TRUE) { bce_free_pg_chain(sc); } bce_free_rx_chain(sc); /* Free TX buffers. */ bce_free_tx_chain(sc); sc->watchdog_timer = 0; sc->bce_link_up = FALSE; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); DBEXIT(BCE_VERBOSE_RESET); } static int bce_reset(struct bce_softc *sc, u32 reset_code) { u32 emac_mode_save, val; int i, rc = 0; static const u32 emac_mode_mask = BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_25G; DBENTER(BCE_VERBOSE_RESET); DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n", __FUNCTION__, reset_code); /* * If ASF/IPMI is operational, then the EMAC Mode register already * contains appropriate values for the link settings that have * been auto-negotiated. Resetting the chip will clobber those * values. Save the important bits so we can restore them after * the reset. */ emac_mode_save = REG_RD(sc, BCE_EMAC_MODE) & emac_mode_mask; /* Wait for pending PCI transactions to complete. */ REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS); DELAY(5); /* Disable DMA */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); } /* Assume bootcode is running. */ sc->bce_fw_timed_out = FALSE; sc->bce_drv_cardiac_arrest = FALSE; /* Give the firmware a chance to prepare for the reset. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code); if (rc) goto bce_reset_exit; /* Set a firmware reminder that this is a soft reset. */ bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC); /* Dummy read to force the chip to complete all current transactions. */ val = REG_RD(sc, BCE_MISC_ID); /* Chip reset. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET); REG_RD(sc, BCE_MISC_COMMAND); DELAY(5); val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4); } else { val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val); /* Allow up to 30us for reset to complete. */ for (i = 0; i < 10; i++) { val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG); if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) { break; } DELAY(10); } /* Check that reset completed successfully. */ if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ | BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { BCE_PRINTF("%s(%d): Reset failed!\n", __FILE__, __LINE__); rc = EBUSY; goto bce_reset_exit; } } /* Make sure byte swapping is properly configured. */ val = REG_RD(sc, BCE_PCI_SWAP_DIAG0); if (val != 0x01020304) { BCE_PRINTF("%s(%d): Byte swap is incorrect!\n", __FILE__, __LINE__); rc = ENODEV; goto bce_reset_exit; } /* Just completed a reset, assume that firmware is running again. */ sc->bce_fw_timed_out = FALSE; sc->bce_drv_cardiac_arrest = FALSE; /* Wait for the firmware to finish its initialization. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code); if (rc) BCE_PRINTF("%s(%d): Firmware did not complete " "initialization!\n", __FILE__, __LINE__); /* Get firmware capabilities. */ bce_fw_cap_init(sc); bce_reset_exit: /* Restore EMAC Mode bits needed to keep ASF/IPMI running. */ if (reset_code == BCE_DRV_MSG_CODE_RESET) { val = REG_RD(sc, BCE_EMAC_MODE); val = (val & ~emac_mode_mask) | emac_mode_save; REG_WR(sc, BCE_EMAC_MODE, val); } DBEXIT(BCE_VERBOSE_RESET); return (rc); } static int bce_chipinit(struct bce_softc *sc) { u32 val; int rc = 0; DBENTER(BCE_VERBOSE_RESET); bce_disable_intr(sc); /* * Initialize DMA byte/word swapping, configure the number of DMA * channels and PCI clock compensation delay. */ val = BCE_DMA_CONFIG_DATA_BYTE_SWAP | BCE_DMA_CONFIG_DATA_WORD_SWAP | #if BYTE_ORDER == BIG_ENDIAN BCE_DMA_CONFIG_CNTL_BYTE_SWAP | #endif BCE_DMA_CONFIG_CNTL_WORD_SWAP | DMA_READ_CHANS << 12 | DMA_WRITE_CHANS << 16; val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY; if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133)) val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP; /* * This setting resolves a problem observed on certain Intel PCI * chipsets that cannot handle multiple outstanding DMA operations. * See errata E9_5706A1_65. */ if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) && !(sc->bce_flags & BCE_PCIX_FLAG)) val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA; REG_WR(sc, BCE_DMA_CONFIG, val); /* Enable the RX_V2P and Context state machines before access. */ REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); /* Initialize context mapping and zero out the quick contexts. */ if ((rc = bce_init_ctx(sc)) != 0) goto bce_chipinit_exit; /* Initialize the on-boards CPUs */ bce_init_cpus(sc); /* Enable management frames (NC-SI) to flow to the MCP. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); } /* Prepare NVRAM for access. */ if ((rc = bce_init_nvram(sc)) != 0) goto bce_chipinit_exit; /* Set the kernel bypass block size */ val = REG_RD(sc, BCE_MQ_CONFIG); val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; /* Enable bins used on the 5709. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val |= BCE_MQ_CONFIG_BIN_MQ_MODE; if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1) val |= BCE_MQ_CONFIG_HALT_DIS; } REG_WR(sc, BCE_MQ_CONFIG, val); val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val); REG_WR(sc, BCE_MQ_KNL_WIND_END, val); /* Set the page size and clear the RV2P processor stall bits. */ val = (BCM_PAGE_BITS - 8) << 24; REG_WR(sc, BCE_RV2P_CONFIG, val); /* Configure page size. */ val = REG_RD(sc, BCE_TBDR_CONFIG); val &= ~BCE_TBDR_CONFIG_PAGE_SIZE; val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; REG_WR(sc, BCE_TBDR_CONFIG, val); /* Set the perfect match control register to default. */ REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0); bce_chipinit_exit: DBEXIT(BCE_VERBOSE_RESET); return(rc); } /****************************************************************************/ /* Initialize the controller in preparation to send/receive traffic. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_blockinit(struct bce_softc *sc) { u32 reg, val; int rc = 0; DBENTER(BCE_VERBOSE_RESET); /* Load the hardware default MAC address. */ bce_set_mac_addr(sc); /* Set the Ethernet backoff seed value */ val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) + (sc->eaddr[3] ) + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16); REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val); sc->last_status_idx = 0; sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE; /* Set up link change interrupt generation. */ REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK); /* Program the physical address of the status block. */ REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr)); REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr)); /* Program the physical address of the statistics block. */ REG_WR(sc, BCE_HC_STATISTICS_ADDR_L, BCE_ADDR_LO(sc->stats_block_paddr)); REG_WR(sc, BCE_HC_STATISTICS_ADDR_H, BCE_ADDR_HI(sc->stats_block_paddr)); /* * Program various host coalescing parameters. * Trip points control how many BDs should be ready before generating * an interrupt while ticks control how long a BD can sit in the chain * before generating an interrupt. */ REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP, (sc->bce_tx_quick_cons_trip_int << 16) | sc->bce_tx_quick_cons_trip); REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP, (sc->bce_rx_quick_cons_trip_int << 16) | sc->bce_rx_quick_cons_trip); REG_WR(sc, BCE_HC_TX_TICKS, (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks); REG_WR(sc, BCE_HC_RX_TICKS, (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks); REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00); REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ /* Not used for L2. */ REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0); REG_WR(sc, BCE_HC_COM_TICKS, 0); REG_WR(sc, BCE_HC_CMD_TICKS, 0); /* Configure the Host Coalescing block. */ val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS; #if 0 /* ToDo: Add MSI-X support. */ if (sc->bce_flags & BCE_USING_MSIX_FLAG) { u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1; REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL); REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE | BCE_HC_SB_CONFIG_1_ONE_SHOT); REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF, (sc->tx_quick_cons_trip_int << 16) | sc->tx_quick_cons_trip); REG_WR(sc, base + BCE_HC_TX_TICKS_OFF, (sc->tx_ticks_int << 16) | sc->tx_ticks); val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; } /* * Tell the HC block to automatically set the * INT_MASK bit after an MSI/MSI-X interrupt * is generated so the driver doesn't have to. */ if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG) val |= BCE_HC_CONFIG_ONE_SHOT; /* Set the MSI-X status blocks to 128 byte boundaries. */ if (sc->bce_flags & BCE_USING_MSIX_FLAG) val |= BCE_HC_CONFIG_SB_ADDR_INC_128B; #endif REG_WR(sc, BCE_HC_CONFIG, val); /* Clear the internal statistics counters. */ REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); /* Verify that bootcode is running. */ reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE); DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control), BCE_PRINTF("%s(%d): Simulating bootcode failure.\n", __FILE__, __LINE__); reg = 0); if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) != BCE_DEV_INFO_SIGNATURE_MAGIC) { BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, " "Expected: 08%08X\n", __FILE__, __LINE__, (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK), BCE_DEV_INFO_SIGNATURE_MAGIC); rc = ENODEV; goto bce_blockinit_exit; } /* Enable DMA */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL); val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE; REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val); } /* Allow bootcode to apply additional fixes before enabling MAC. */ rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET); /* Enable link state change interrupt generation. */ REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE); /* Enable the RXP. */ bce_start_rxp_cpu(sc); /* Disable management frames (NC-SI) from flowing to the MCP. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) & ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN; REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val); } /* Enable all remaining blocks in the MAC. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT_XI); else REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); DELAY(20); /* Save the current host coalescing block settings. */ sc->hc_command = REG_RD(sc, BCE_HC_COMMAND); bce_blockinit_exit: DBEXIT(BCE_VERBOSE_RESET); return (rc); } /****************************************************************************/ /* Encapsulate an mbuf into the rx_bd chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq) { bus_dma_segment_t segs[1]; struct mbuf *m_new = NULL; struct rx_bd *rxbd; int nsegs, error, rc = 0; #ifdef BCE_DEBUG u16 debug_chain_prod = chain_prod; #endif DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); /* Make sure the inputs are valid. */ DBRUNIF((chain_prod > MAX_RX_BD_ALLOC), BCE_PRINTF("%s(%d): RX producer out of range: " "0x%04X > 0x%04X\n", __FILE__, __LINE__, chain_prod, (u16)MAX_RX_BD_ALLOC)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, *prod_bseq); /* Update some debug statistic counters */ DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), sc->rx_low_watermark = sc->free_rx_bd); DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); /* Simulate an mbuf allocation failure. */ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), sc->mbuf_alloc_failed_count++; sc->mbuf_alloc_failed_sim_count++; rc = ENOBUFS; goto bce_get_rx_buf_exit); /* This is a new mbuf allocation. */ if (bce_hdr_split == TRUE) MGETHDR(m_new, M_NOWAIT, MT_DATA); else m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_bd_mbuf_alloc_size); if (m_new == NULL) { sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_get_rx_buf_exit; } DBRUN(sc->debug_rx_mbuf_alloc++); /* Make sure we have a valid packet header. */ M_ASSERTPKTHDR(m_new); /* Initialize the mbuf size and pad if necessary for alignment. */ m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size; m_adj(m_new, sc->rx_bd_mbuf_align_pad); /* ToDo: Consider calling m_fragment() to test error handling. */ /* Map the mbuf cluster into device memory. */ error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag, sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT); /* Handle any mapping errors. */ if (error) { BCE_PRINTF("%s(%d): Error mapping mbuf into RX " "chain (%d)!\n", __FILE__, __LINE__, error); sc->dma_map_addr_rx_failed_count++; m_freem(m_new); DBRUN(sc->debug_rx_mbuf_alloc--); rc = ENOBUFS; goto bce_get_rx_buf_exit; } /* All mbufs must map to a single segment. */ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nsegs)); /* Setup the rx_bd for the segment. */ rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)]; rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); rxbd->rx_bd_len = htole32(segs[0].ds_len); rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); *prod_bseq += segs[0].ds_len; /* Save the mbuf and update our counter. */ sc->rx_mbuf_ptr[chain_prod] = m_new; sc->free_rx_bd -= nsegs; DBRUNMSG(BCE_INSANE_RECV, bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, *prod_bseq); bce_get_rx_buf_exit: DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); return(rc); } /****************************************************************************/ /* Encapsulate an mbuf cluster into the page chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx) { bus_dma_segment_t segs[1]; struct mbuf *m_new = NULL; struct rx_bd *pgbd; int error, nsegs, rc = 0; #ifdef BCE_DEBUG u16 debug_prod_idx = prod_idx; #endif DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); /* Make sure the inputs are valid. */ DBRUNIF((prod_idx > MAX_PG_BD_ALLOC), BCE_PRINTF("%s(%d): page producer out of range: " "0x%04X > 0x%04X\n", __FILE__, __LINE__, prod_idx, (u16)MAX_PG_BD_ALLOC)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, " "chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx); /* Update counters if we've hit a new low or run out of pages. */ DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark), sc->pg_low_watermark = sc->free_pg_bd); DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++); /* Simulate an mbuf allocation failure. */ DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control), sc->mbuf_alloc_failed_count++; sc->mbuf_alloc_failed_sim_count++; rc = ENOBUFS; goto bce_get_pg_buf_exit); /* This is a new mbuf allocation. */ m_new = m_getcl(M_NOWAIT, MT_DATA, 0); if (m_new == NULL) { sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_get_pg_buf_exit; } DBRUN(sc->debug_pg_mbuf_alloc++); m_new->m_len = MCLBYTES; /* ToDo: Consider calling m_fragment() to test error handling. */ /* Map the mbuf cluster into device memory. */ error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag, sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT); /* Handle any mapping errors. */ if (error) { BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n", __FILE__, __LINE__); m_freem(m_new); DBRUN(sc->debug_pg_mbuf_alloc--); rc = ENOBUFS; goto bce_get_pg_buf_exit; } /* All mbufs must map to a single segment. */ KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!", __FUNCTION__, nsegs)); /* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */ /* * The page chain uses the same rx_bd data structure * as the receive chain but doesn't require a byte sequence (bseq). */ pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)]; pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[0].ds_addr)); pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[0].ds_addr)); pgbd->rx_bd_len = htole32(MCLBYTES); pgbd->rx_bd_flags = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END); /* Save the mbuf and update our counter. */ sc->pg_mbuf_ptr[prod_idx] = m_new; sc->free_pg_bd--; DBRUNMSG(BCE_INSANE_RECV, bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1)); DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, " "prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx); bce_get_pg_buf_exit: DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD); return(rc); } /****************************************************************************/ /* Initialize the TX context memory. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_init_tx_context(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); /* Initialize the context ID for an L2 TX chain. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { /* Set the CID type to support an L2 connection. */ val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI | BCE_L2CTX_TX_TYPE_SIZE_L2_XI; CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val); val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE_XI, val); /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val); val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val); } else { /* Set the CID type to support an L2 connection. */ val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2; CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val); val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val); /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_HI, val); val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TBDR_BHADDR_LO, val); } DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Allocate memory and initialize the TX data structures. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_tx_chain(struct bce_softc *sc) { struct tx_bd *txbd; int i, rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); /* Set the initial TX producer/consumer indices. */ sc->tx_prod = 0; sc->tx_cons = 0; sc->tx_prod_bseq = 0; sc->used_tx_bd = 0; sc->max_tx_bd = USABLE_TX_BD_ALLOC; DBRUN(sc->tx_hi_watermark = 0); DBRUN(sc->tx_full_count = 0); /* * The NetXtreme II supports a linked-list structure called * a Buffer Descriptor Chain (or BD chain). A BD chain * consists of a series of 1 or more chain pages, each of which * consists of a fixed number of BD entries. * The last BD entry on each page is a pointer to the next page * in the chain, and the last pointer in the BD chain * points back to the beginning of the chain. */ /* Set the TX next pointer chain entries. */ for (i = 0; i < sc->tx_pages; i++) { int j; txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->tx_pages - 1)) j = 0; else j = i + 1; txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j])); txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j])); } bce_init_tx_context(sc); DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD); return(rc); } /****************************************************************************/ /* Free memory and clear the TX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_tx_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */ for (i = 0; i < MAX_TX_BD_AVAIL; i++) { if (sc->tx_mbuf_ptr[i] != NULL) { if (sc->tx_mbuf_map[i] != NULL) bus_dmamap_sync(sc->tx_mbuf_tag, sc->tx_mbuf_map[i], BUS_DMASYNC_POSTWRITE); m_freem(sc->tx_mbuf_ptr[i]); sc->tx_mbuf_ptr[i] = NULL; DBRUN(sc->debug_tx_mbuf_alloc--); } } /* Clear each TX chain page. */ for (i = 0; i < sc->tx_pages; i++) bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ); sc->used_tx_bd = 0; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_tx_mbuf_alloc), BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs " "from tx chain!\n", __FILE__, __LINE__, sc->debug_tx_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD); } /****************************************************************************/ /* Initialize the RX context memory. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_init_rx_context(struct bce_softc *sc) { u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); /* Init the type, size, and BD cache levels for the RX context. */ val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT); /* * Set the level for generating pause frames * when the number of available rx_bd's gets * too low (the low watermark) and the level * when pause frames can be stopped (the high * watermark). */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { u32 lo_water, hi_water; if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) { lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT; } else { lo_water = 0; } if (lo_water >= USABLE_RX_BD_ALLOC) { lo_water = 0; } hi_water = USABLE_RX_BD_ALLOC / 4; if (hi_water <= lo_water) { lo_water = 0; } lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE; hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE; if (hi_water > 0xf) hi_water = 0xf; else if (hi_water == 0) lo_water = 0; val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) | (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT); } CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val); /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { val = REG_RD(sc, BCE_MQ_MAP_L2_5); REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM); } /* Point the hardware to the first page in the chain. */ val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val); val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Allocate memory and initialize the RX data structures. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_rx_chain(struct bce_softc *sc) { struct rx_bd *rxbd; int i, rc = 0; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Initialize the RX producer and consumer indices. */ sc->rx_prod = 0; sc->rx_cons = 0; sc->rx_prod_bseq = 0; sc->free_rx_bd = USABLE_RX_BD_ALLOC; sc->max_rx_bd = USABLE_RX_BD_ALLOC; /* Initialize the RX next pointer chain entries. */ for (i = 0; i < sc->rx_pages; i++) { int j; rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->rx_pages - 1)) j = 0; else j = i + 1; /* Setup the chain page pointers. */ rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j])); rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j])); } /* Fill up the RX chain. */ bce_fill_rx_chain(sc); DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC); DBRUN(sc->rx_empty_count = 0); for (i = 0; i < sc->rx_pages; i++) { bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } bce_init_rx_context(sc); DBRUNMSG(BCE_EXTREME_RECV, bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* ToDo: Are there possible failure modes here? */ return(rc); } /****************************************************************************/ /* Add mbufs to the RX chain until its full or an mbuf allocation error */ /* occurs. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_fill_rx_chain(struct bce_softc *sc) { u16 prod, prod_idx; u32 prod_bseq; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Get the RX chain producer indices. */ prod = sc->rx_prod; prod_bseq = sc->rx_prod_bseq; /* Keep filling the RX chain until it's full. */ while (sc->free_rx_bd > 0) { prod_idx = RX_CHAIN_IDX(prod); if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) { /* Bail out if we can't add an mbuf to the chain. */ break; } prod = NEXT_RX_BD(prod); } /* Save the RX chain producer indices. */ sc->rx_prod = prod; sc->rx_prod_bseq = prod_bseq; /* We should never end up pointing to a next page pointer. */ DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n", __FUNCTION__, rx_prod)); /* Write the mailbox and tell the chip about the waiting rx_bd's. */ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod); REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq); DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Free memory and clear the RX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_rx_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); /* Free any mbufs still in the RX mbuf chain. */ for (i = 0; i < MAX_RX_BD_AVAIL; i++) { if (sc->rx_mbuf_ptr[i] != NULL) { if (sc->rx_mbuf_map[i] != NULL) bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[i], BUS_DMASYNC_POSTREAD); m_freem(sc->rx_mbuf_ptr[i]); sc->rx_mbuf_ptr[i] = NULL; DBRUN(sc->debug_rx_mbuf_alloc--); } } /* Clear each RX chain page. */ for (i = 0; i < sc->rx_pages; i++) if (sc->rx_bd_chain[i] != NULL) bzero((char *)sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ); sc->free_rx_bd = sc->max_rx_bd; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_rx_mbuf_alloc), BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n", __FUNCTION__, sc->debug_rx_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); } /****************************************************************************/ /* Allocate memory and initialize the page data structures. */ /* Assumes that bce_init_rx_chain() has not already been called. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_init_pg_chain(struct bce_softc *sc) { struct rx_bd *pgbd; int i, rc = 0; u32 val; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Initialize the page producer and consumer indices. */ sc->pg_prod = 0; sc->pg_cons = 0; sc->free_pg_bd = USABLE_PG_BD_ALLOC; sc->max_pg_bd = USABLE_PG_BD_ALLOC; DBRUN(sc->pg_low_watermark = sc->max_pg_bd); DBRUN(sc->pg_empty_count = 0); /* Initialize the page next pointer chain entries. */ for (i = 0; i < sc->pg_pages; i++) { int j; pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE]; /* Check if we've reached the last page. */ if (i == (sc->pg_pages - 1)) j = 0; else j = i + 1; /* Setup the chain page pointers. */ pgbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j])); pgbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j])); } /* Setup the MQ BIN mapping for host_pg_bidx. */ if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0); /* Configure the rx_bd and page chain mbuf cluster size. */ val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES; CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val); /* Configure the context reserved for jumbo support. */ CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY, BCE_L2CTX_RX_RBDC_JUMBO_KEY); /* Point the hardware to the first page in the page chain. */ val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val); val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]); CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val); /* Fill up the page chain. */ bce_fill_pg_chain(sc); for (i = 0; i < sc->pg_pages; i++) { bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } DBRUNMSG(BCE_EXTREME_RECV, bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); return(rc); } /****************************************************************************/ /* Add mbufs to the page chain until its full or an mbuf allocation error */ /* occurs. */ /* */ /* Returns: */ /* Nothing */ /****************************************************************************/ static void bce_fill_pg_chain(struct bce_softc *sc) { u16 prod, prod_idx; DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); /* Get the page chain prodcuer index. */ prod = sc->pg_prod; /* Keep filling the page chain until it's full. */ while (sc->free_pg_bd > 0) { prod_idx = PG_CHAIN_IDX(prod); if (bce_get_pg_buf(sc, prod, prod_idx)) { /* Bail out if we can't add an mbuf to the chain. */ break; } prod = NEXT_PG_BD(prod); } /* Save the page chain producer index. */ sc->pg_prod = prod; DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE), BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n", __FUNCTION__, pg_prod)); /* * Write the mailbox and tell the chip about * the new rx_bd's in the page chain. */ REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX, prod); DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Free memory and clear the RX data structures. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_free_pg_chain(struct bce_softc *sc) { int i; DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); /* Free any mbufs still in the mbuf page chain. */ for (i = 0; i < MAX_PG_BD_AVAIL; i++) { if (sc->pg_mbuf_ptr[i] != NULL) { if (sc->pg_mbuf_map[i] != NULL) bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[i], BUS_DMASYNC_POSTREAD); m_freem(sc->pg_mbuf_ptr[i]); sc->pg_mbuf_ptr[i] = NULL; DBRUN(sc->debug_pg_mbuf_alloc--); } } /* Clear each page chain pages. */ for (i = 0; i < sc->pg_pages; i++) bzero((char *)sc->pg_bd_chain[i], BCE_PG_CHAIN_PAGE_SZ); sc->free_pg_bd = sc->max_pg_bd; /* Check if we lost any mbufs in the process. */ DBRUNIF((sc->debug_pg_mbuf_alloc), BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from page chain!\n", __FUNCTION__, sc->debug_pg_mbuf_alloc)); DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD); } static u32 bce_get_rphy_link(struct bce_softc *sc) { u32 advertise, link; int fdpx; advertise = 0; fdpx = 0; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) link = bce_shmem_rd(sc, BCE_RPHY_SERDES_LINK); else link = bce_shmem_rd(sc, BCE_RPHY_COPPER_LINK); if (link & BCE_NETLINK_ANEG_ENB) advertise |= BCE_NETLINK_ANEG_ENB; if (link & BCE_NETLINK_SPEED_10HALF) advertise |= BCE_NETLINK_SPEED_10HALF; if (link & BCE_NETLINK_SPEED_10FULL) { advertise |= BCE_NETLINK_SPEED_10FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_100HALF) advertise |= BCE_NETLINK_SPEED_100HALF; if (link & BCE_NETLINK_SPEED_100FULL) { advertise |= BCE_NETLINK_SPEED_100FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_1000HALF) advertise |= BCE_NETLINK_SPEED_1000HALF; if (link & BCE_NETLINK_SPEED_1000FULL) { advertise |= BCE_NETLINK_SPEED_1000FULL; fdpx++; } if (link & BCE_NETLINK_SPEED_2500HALF) advertise |= BCE_NETLINK_SPEED_2500HALF; if (link & BCE_NETLINK_SPEED_2500FULL) { advertise |= BCE_NETLINK_SPEED_2500FULL; fdpx++; } if (fdpx) advertise |= BCE_NETLINK_FC_PAUSE_SYM | BCE_NETLINK_FC_PAUSE_ASYM; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) advertise |= BCE_NETLINK_PHY_APP_REMOTE | BCE_NETLINK_ETH_AT_WIRESPEED; return (advertise); } /****************************************************************************/ /* Set media options. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_ifmedia_upd(if_t ifp) { struct bce_softc *sc = if_getsoftc(ifp); int error; DBENTER(BCE_VERBOSE); BCE_LOCK(sc); error = bce_ifmedia_upd_locked(ifp); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE); return (error); } /****************************************************************************/ /* Set media options. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static int bce_ifmedia_upd_locked(if_t ifp) { struct bce_softc *sc = if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; u32 link; int error, fdx; DBENTER(BCE_VERBOSE_PHY); error = 0; BCE_LOCK_ASSERT(sc); sc->bce_link_up = FALSE; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { ifm = &sc->bce_ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); link = 0; fdx = IFM_OPTIONS(ifm->ifm_media) & IFM_FDX; switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * Check advertised link of remote PHY by reading * BCE_RPHY_SERDES_LINK or BCE_RPHY_COPPER_LINK. * Always use the same link type of remote PHY. */ link = bce_get_rphy_link(sc); break; case IFM_2500_SX: if ((sc->bce_phy_flags & (BCE_PHY_REMOTE_PORT_FIBER_FLAG | BCE_PHY_2_5G_CAPABLE_FLAG)) == 0) return (EINVAL); /* * XXX * Have to enable forced 2.5Gbps configuration. */ if (fdx != 0) link |= BCE_NETLINK_SPEED_2500FULL; else link |= BCE_NETLINK_SPEED_2500HALF; break; case IFM_1000_SX: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) return (EINVAL); /* * XXX * Have to disable 2.5Gbps configuration. */ if (fdx != 0) link = BCE_NETLINK_SPEED_1000FULL; else link = BCE_NETLINK_SPEED_1000HALF; break; case IFM_1000_T: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_1000FULL; else link = BCE_NETLINK_SPEED_1000HALF; break; case IFM_100_TX: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_100FULL; else link = BCE_NETLINK_SPEED_100HALF; break; case IFM_10_T: if (sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) return (EINVAL); if (fdx != 0) link = BCE_NETLINK_SPEED_10FULL; else link = BCE_NETLINK_SPEED_10HALF; break; default: return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { /* * XXX * Advertise pause capability for full-duplex media. */ if (fdx != 0) link |= BCE_NETLINK_FC_PAUSE_SYM | BCE_NETLINK_FC_PAUSE_ASYM; if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) link |= BCE_NETLINK_PHY_APP_REMOTE | BCE_NETLINK_ETH_AT_WIRESPEED; } bce_shmem_wr(sc, BCE_MB_ARGS_0, link); error = bce_fw_sync(sc, BCE_DRV_MSG_CODE_CMD_SET_LINK); } else { mii = device_get_softc(sc->bce_miibus); /* Make sure the MII bus has been enumerated. */ if (mii) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); } } DBEXIT(BCE_VERBOSE_PHY); return (error); } static void bce_ifmedia_sts_rphy(struct bce_softc *sc, struct ifmediareq *ifmr) { if_t ifp; u32 link; ifp = sc->bce_ifp; BCE_LOCK_ASSERT(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; link = bce_shmem_rd(sc, BCE_LINK_STATUS); /* XXX Handle heart beat status? */ if ((link & BCE_LINK_STATUS_LINK_UP) != 0) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; if_setbaudrate(ifp, 0); return; } switch (link & BCE_LINK_STATUS_SPEED_MASK) { case BCE_LINK_STATUS_10HALF: ifmr->ifm_active |= IFM_10_T | IFM_HDX; if_setbaudrate(ifp, IF_Mbps(10UL)); break; case BCE_LINK_STATUS_10FULL: ifmr->ifm_active |= IFM_10_T | IFM_FDX; if_setbaudrate(ifp, IF_Mbps(10UL)); break; case BCE_LINK_STATUS_100HALF: ifmr->ifm_active |= IFM_100_TX | IFM_HDX; if_setbaudrate(ifp, IF_Mbps(100UL)); break; case BCE_LINK_STATUS_100FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; if_setbaudrate(ifp, IF_Mbps(100UL)); break; case BCE_LINK_STATUS_1000HALF: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) ifmr->ifm_active |= IFM_1000_T | IFM_HDX; else ifmr->ifm_active |= IFM_1000_SX | IFM_HDX; if_setbaudrate(ifp, IF_Mbps(1000UL)); break; case BCE_LINK_STATUS_1000FULL: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) ifmr->ifm_active |= IFM_1000_T | IFM_FDX; else ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; if_setbaudrate(ifp, IF_Mbps(1000UL)); break; case BCE_LINK_STATUS_2500HALF: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { ifmr->ifm_active |= IFM_NONE; return; } else ifmr->ifm_active |= IFM_2500_SX | IFM_HDX; if_setbaudrate(ifp, IF_Mbps(2500UL)); break; case BCE_LINK_STATUS_2500FULL: if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) == 0) { ifmr->ifm_active |= IFM_NONE; return; } else ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; if_setbaudrate(ifp, IF_Mbps(2500UL)); break; default: ifmr->ifm_active |= IFM_NONE; return; } if ((link & BCE_LINK_STATUS_RX_FC_ENABLED) != 0) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if ((link & BCE_LINK_STATUS_TX_FC_ENABLED) != 0) ifmr->ifm_active |= IFM_ETH_TXPAUSE; } /****************************************************************************/ /* Reports current media status. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bce_softc *sc = if_getsoftc(ifp); struct mii_data *mii; DBENTER(BCE_VERBOSE_PHY); BCE_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { BCE_UNLOCK(sc); return; } if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) bce_ifmedia_sts_rphy(sc, ifmr); else { mii = device_get_softc(sc->bce_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_PHY); } /****************************************************************************/ /* Handles PHY generated interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_phy_intr(struct bce_softc *sc) { u32 new_link_state, old_link_state; DBENTER(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); DBRUN(sc->phy_interrupts++); new_link_state = sc->status_block->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE; old_link_state = sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE; /* Handle any changes if the link state has changed. */ if (new_link_state != old_link_state) { /* Update the status_attn_bits_ack field. */ if (new_link_state) { REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD, STATUS_ATTN_BITS_LINK_STATE); DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now UP.\n", __FUNCTION__); } else { REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD, STATUS_ATTN_BITS_LINK_STATE); DBPRINT(sc, BCE_INFO_PHY, "%s(): Link is now DOWN.\n", __FUNCTION__); } if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { if (new_link_state) { if (bootverbose) if_printf(sc->bce_ifp, "link UP\n"); if_link_state_change(sc->bce_ifp, LINK_STATE_UP); } else { if (bootverbose) if_printf(sc->bce_ifp, "link DOWN\n"); if_link_state_change(sc->bce_ifp, LINK_STATE_DOWN); } } /* * Assume link is down and allow * tick routine to update the state * based on the actual media state. */ sc->bce_link_up = FALSE; callout_stop(&sc->bce_tick_callout); bce_tick(sc); } /* Acknowledge the link change interrupt. */ REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE); DBEXIT(BCE_VERBOSE_PHY | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Reads the receive consumer value from the status block (skipping over */ /* chain page pointer if necessary). */ /* */ /* Returns: */ /* hw_cons */ /****************************************************************************/ static inline u16 bce_get_hw_rx_cons(struct bce_softc *sc) { u16 hw_cons; rmb(); hw_cons = sc->status_block->status_rx_quick_consumer_index0; if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) hw_cons++; return hw_cons; } /****************************************************************************/ /* Handles received frame interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_rx_intr(struct bce_softc *sc) { if_t ifp = sc->bce_ifp; struct l2_fhdr *l2fhdr; struct ether_vlan_header *vh; unsigned int pkt_len; u16 sw_rx_cons, sw_rx_cons_idx, hw_rx_cons; u32 status; unsigned int rem_len; u16 sw_pg_cons, sw_pg_cons_idx; DBENTER(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); DBRUN(sc->interrupts_rx++); DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): rx_prod = 0x%04X, " "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); /* Prepare the RX chain pages to be accessed by the host CPU. */ for (int i = 0; i < sc->rx_pages; i++) bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_POSTREAD); /* Prepare the page chain pages to be accessed by the host CPU. */ if (bce_hdr_split == TRUE) { for (int i = 0; i < sc->pg_pages; i++) bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_POSTREAD); } /* Get the hardware's view of the RX consumer index. */ hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); /* Get working copies of the driver's view of the consumer indices. */ sw_rx_cons = sc->rx_cons; sw_pg_cons = sc->pg_cons; /* Update some debug statistics counters */ DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark), sc->rx_low_watermark = sc->free_rx_bd); DBRUNIF((sc->free_rx_bd == sc->max_rx_bd), sc->rx_empty_count++); /* Scan through the receive chain as long as there is work to do */ /* ToDo: Consider setting a limit on the number of packets processed. */ rmb(); while (sw_rx_cons != hw_rx_cons) { struct mbuf *m0; /* Convert the producer/consumer indices to an actual rx_bd index. */ sw_rx_cons_idx = RX_CHAIN_IDX(sw_rx_cons); /* Unmap the mbuf from DMA space. */ bus_dmamap_sync(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[sw_rx_cons_idx]); /* Remove the mbuf from the RX chain. */ m0 = sc->rx_mbuf_ptr[sw_rx_cons_idx]; sc->rx_mbuf_ptr[sw_rx_cons_idx] = NULL; DBRUN(sc->debug_rx_mbuf_alloc--); sc->free_rx_bd++; /* * Frames received on the NetXteme II are prepended * with an l2_fhdr structure which provides status * information about the received frame (including * VLAN tags and checksum info). The frames are * also automatically adjusted to word align the IP * header (i.e. two null bytes are inserted before * the Ethernet header). As a result the data * DMA'd by the controller into the mbuf looks * like this: * * +---------+-----+---------------------+-----+ * | l2_fhdr | pad | packet data | FCS | * +---------+-----+---------------------+-----+ * * The l2_fhdr needs to be checked and skipped and * the FCS needs to be stripped before sending the * packet up the stack. */ l2fhdr = mtod(m0, struct l2_fhdr *); /* Get the packet data + FCS length and the status. */ pkt_len = l2fhdr->l2_fhdr_pkt_len; status = l2fhdr->l2_fhdr_status; /* * Skip over the l2_fhdr and pad, resulting in the * following data in the mbuf: * +---------------------+-----+ * | packet data | FCS | * +---------------------+-----+ */ m_adj(m0, sizeof(struct l2_fhdr) + ETHER_ALIGN); /* * When split header mode is used, an ethernet frame * may be split across the receive chain and the * page chain. If that occurs an mbuf cluster must be * reassembled from the individual mbuf pieces. */ if (bce_hdr_split == TRUE) { /* * Check whether the received frame fits in a single * mbuf or not (i.e. packet data + FCS <= * sc->rx_bd_mbuf_data_len bytes). */ if (pkt_len > m0->m_len) { /* * The received frame is larger than a single mbuf. * If the frame was a TCP frame then only the TCP * header is placed in the mbuf, the remaining * payload (including FCS) is placed in the page * chain, the SPLIT flag is set, and the header * length is placed in the IP checksum field. * If the frame is not a TCP frame then the mbuf * is filled and the remaining bytes are placed * in the page chain. */ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a large " "packet.\n", __FUNCTION__); DBRUN(sc->split_header_frames_rcvd++); /* * When the page chain is enabled and the TCP * header has been split from the TCP payload, * the ip_xsum structure will reflect the length * of the TCP header, not the IP checksum. Set * the packet length of the mbuf accordingly. */ if (status & L2_FHDR_STATUS_SPLIT) { m0->m_len = l2fhdr->l2_fhdr_ip_xsum; DBRUN(sc->split_header_tcp_frames_rcvd++); } rem_len = pkt_len - m0->m_len; /* Pull mbufs off the page chain for any remaining data. */ while (rem_len > 0) { struct mbuf *m_pg; sw_pg_cons_idx = PG_CHAIN_IDX(sw_pg_cons); /* Remove the mbuf from the page chain. */ m_pg = sc->pg_mbuf_ptr[sw_pg_cons_idx]; sc->pg_mbuf_ptr[sw_pg_cons_idx] = NULL; DBRUN(sc->debug_pg_mbuf_alloc--); sc->free_pg_bd++; /* Unmap the page chain mbuf from DMA space. */ bus_dmamap_sync(sc->pg_mbuf_tag, sc->pg_mbuf_map[sw_pg_cons_idx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->pg_mbuf_tag, sc->pg_mbuf_map[sw_pg_cons_idx]); /* Adjust the mbuf length. */ if (rem_len < m_pg->m_len) { /* The mbuf chain is complete. */ m_pg->m_len = rem_len; rem_len = 0; } else { /* More packet data is waiting. */ rem_len -= m_pg->m_len; } /* Concatenate the mbuf cluster to the mbuf. */ m_cat(m0, m_pg); sw_pg_cons = NEXT_PG_BD(sw_pg_cons); } /* Set the total packet length. */ m0->m_pkthdr.len = pkt_len; } else { /* * The received packet is small and fits in a * single mbuf (i.e. the l2_fhdr + pad + packet + * FCS <= MHLEN). In other words, the packet is * 154 bytes or less in size. */ DBPRINT(sc, BCE_INFO_RECV, "%s(): Found a small " "packet.\n", __FUNCTION__); /* Set the total packet length. */ m0->m_pkthdr.len = m0->m_len = pkt_len; } } else /* Set the total packet length. */ m0->m_pkthdr.len = m0->m_len = pkt_len; /* Remove the trailing Ethernet FCS. */ m_adj(m0, -ETHER_CRC_LEN); /* Check that the resulting mbuf chain is valid. */ DBRUN(m_sanity(m0, FALSE)); DBRUNIF(((m0->m_len < ETHER_HDR_LEN) | (m0->m_pkthdr.len > BCE_MAX_JUMBO_ETHER_MTU_VLAN)), BCE_PRINTF("Invalid Ethernet frame size!\n"); m_print(m0, 128)); DBRUNIF(DB_RANDOMTRUE(l2fhdr_error_sim_control), sc->l2fhdr_error_sim_count++; status = status | L2_FHDR_ERRORS_PHY_DECODE); /* Check the received frame for errors. */ if (status & (L2_FHDR_ERRORS_BAD_CRC | L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { /* Log the error and release the mbuf. */ sc->l2fhdr_error_count++; m_freem(m0); m0 = NULL; goto bce_rx_intr_next_rx; } /* Send the packet to the appropriate interface. */ m0->m_pkthdr.rcvif = ifp; /* Assume no hardware checksum. */ m0->m_pkthdr.csum_flags = 0; /* Validate the checksum if offload enabled. */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* Check for an IP datagram. */ if (!(status & L2_FHDR_STATUS_SPLIT) && (status & L2_FHDR_STATUS_IP_DATAGRAM)) { m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; DBRUN(sc->csum_offload_ip++); /* Check if the IP checksum is valid. */ if ((l2fhdr->l2_fhdr_ip_xsum ^ 0xffff) == 0) m0->m_pkthdr.csum_flags |= CSUM_IP_VALID; } /* Check for a valid TCP/UDP frame. */ if (status & (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_UDP_DATAGRAM)) { /* Check for a good TCP/UDP checksum. */ if ((status & (L2_FHDR_ERRORS_TCP_XSUM | L2_FHDR_ERRORS_UDP_XSUM)) == 0) { DBRUN(sc->csum_offload_tcp_udp++); m0->m_pkthdr.csum_data = l2fhdr->l2_fhdr_tcp_udp_xsum; m0->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* Attach the VLAN tag. */ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(sc->rx_mode & BCE_EMAC_RX_MODE_KEEP_VLAN_TAG)) { DBRUN(sc->vlan_tagged_frames_rcvd++); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { DBRUN(sc->vlan_tagged_frames_stripped++); m0->m_pkthdr.ether_vtag = l2fhdr->l2_fhdr_vlan_tag; m0->m_flags |= M_VLANTAG; } else { /* * bce(4) controllers can't disable VLAN * tag stripping if management firmware * (ASF/IPMI/UMP) is running. So we always * strip VLAN tag and manually reconstruct * the VLAN frame by appending stripped * VLAN tag in driver if VLAN tag stripping * was disabled. * * TODO: LLC SNAP handling. */ bcopy(mtod(m0, uint8_t *), mtod(m0, uint8_t *) - ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN * 2); m0->m_data -= ETHER_VLAN_ENCAP_LEN; vh = mtod(m0, struct ether_vlan_header *); vh->evl_encap_proto = htons(ETHERTYPE_VLAN); vh->evl_tag = htons(l2fhdr->l2_fhdr_vlan_tag); m0->m_pkthdr.len += ETHER_VLAN_ENCAP_LEN; m0->m_len += ETHER_VLAN_ENCAP_LEN; } } /* Increment received packet statistics. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); bce_rx_intr_next_rx: sw_rx_cons = NEXT_RX_BD(sw_rx_cons); /* If we have a packet, pass it up the stack */ if (m0) { /* Make sure we don't lose our place when we release the lock. */ sc->rx_cons = sw_rx_cons; sc->pg_cons = sw_pg_cons; BCE_UNLOCK(sc); if_input(ifp, m0); BCE_LOCK(sc); /* Recover our place. */ sw_rx_cons = sc->rx_cons; sw_pg_cons = sc->pg_cons; } /* Refresh hw_cons to see if there's new work */ if (sw_rx_cons == hw_rx_cons) hw_rx_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc); } /* No new packets. Refill the page chain. */ if (bce_hdr_split == TRUE) { sc->pg_cons = sw_pg_cons; bce_fill_pg_chain(sc); } /* No new packets. Refill the RX chain. */ sc->rx_cons = sw_rx_cons; bce_fill_rx_chain(sc); /* Prepare the page chain pages to be accessed by the NIC. */ for (int i = 0; i < sc->rx_pages; i++) bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i], BUS_DMASYNC_PREWRITE); if (bce_hdr_split == TRUE) { for (int i = 0; i < sc->pg_pages; i++) bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i], BUS_DMASYNC_PREWRITE); } DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): rx_prod = 0x%04X, " "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq); DBEXIT(BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Reads the transmit consumer value from the status block (skipping over */ /* chain page pointer if necessary). */ /* */ /* Returns: */ /* hw_cons */ /****************************************************************************/ static inline u16 bce_get_hw_tx_cons(struct bce_softc *sc) { u16 hw_cons; mb(); hw_cons = sc->status_block->status_tx_quick_consumer_index0; if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) hw_cons++; return hw_cons; } /****************************************************************************/ /* Handles transmit completion interrupt events. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_tx_intr(struct bce_softc *sc) { if_t ifp = sc->bce_ifp; u16 hw_tx_cons, sw_tx_cons, sw_tx_chain_cons; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); DBRUN(sc->interrupts_tx++); DBPRINT(sc, BCE_EXTREME_SEND, "%s(enter): tx_prod = 0x%04X, " "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); BCE_LOCK_ASSERT(sc); /* Get the hardware's view of the TX consumer index. */ hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); sw_tx_cons = sc->tx_cons; /* Prevent speculative reads of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); /* Cycle through any completed TX chain page entries. */ while (sw_tx_cons != hw_tx_cons) { #ifdef BCE_DEBUG struct tx_bd *txbd = NULL; #endif sw_tx_chain_cons = TX_CHAIN_IDX(sw_tx_cons); DBPRINT(sc, BCE_INFO_SEND, "%s(): hw_tx_cons = 0x%04X, sw_tx_cons = 0x%04X, " "sw_tx_chain_cons = 0x%04X\n", __FUNCTION__, hw_tx_cons, sw_tx_cons, sw_tx_chain_cons); DBRUNIF((sw_tx_chain_cons > MAX_TX_BD_ALLOC), BCE_PRINTF("%s(%d): TX chain consumer out of range! " " 0x%04X > 0x%04X\n", __FILE__, __LINE__, sw_tx_chain_cons, (int) MAX_TX_BD_ALLOC); bce_breakpoint(sc)); DBRUN(txbd = &sc->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)] [TX_IDX(sw_tx_chain_cons)]); DBRUNIF((txbd == NULL), BCE_PRINTF("%s(%d): Unexpected NULL tx_bd[0x%04X]!\n", __FILE__, __LINE__, sw_tx_chain_cons); bce_breakpoint(sc)); DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): ", __FUNCTION__); bce_dump_txbd(sc, sw_tx_chain_cons, txbd)); /* * Free the associated mbuf. Remember * that only the last tx_bd of a packet * has an mbuf pointer and DMA map. */ if (sc->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) { /* Validate that this is the last tx_bd. */ DBRUNIF((!(txbd->tx_bd_flags & TX_BD_FLAGS_END)), BCE_PRINTF("%s(%d): tx_bd END flag not set but " "txmbuf == NULL!\n", __FILE__, __LINE__); bce_breakpoint(sc)); DBRUNMSG(BCE_INFO_SEND, BCE_PRINTF("%s(): Unloading map/freeing mbuf " "from tx_bd[0x%04X]\n", __FUNCTION__, sw_tx_chain_cons)); /* Unmap the mbuf. */ bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[sw_tx_chain_cons]); /* Free the mbuf. */ m_freem(sc->tx_mbuf_ptr[sw_tx_chain_cons]); sc->tx_mbuf_ptr[sw_tx_chain_cons] = NULL; DBRUN(sc->debug_tx_mbuf_alloc--); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } sc->used_tx_bd--; sw_tx_cons = NEXT_TX_BD(sw_tx_cons); /* Refresh hw_cons to see if there's new work. */ hw_tx_cons = sc->hw_tx_cons = bce_get_hw_tx_cons(sc); /* Prevent speculative reads of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); } /* Clear the TX timeout timer. */ sc->watchdog_timer = 0; /* Clear the tx hardware queue full flag. */ if (sc->used_tx_bd < sc->max_tx_bd) { DBRUNIF((if_getdrvflags(ifp) & IFF_DRV_OACTIVE), DBPRINT(sc, BCE_INFO_SEND, "%s(): Open TX chain! %d/%d (used/total)\n", __FUNCTION__, sc->used_tx_bd, sc->max_tx_bd)); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } sc->tx_cons = sw_tx_cons; DBPRINT(sc, BCE_EXTREME_SEND, "%s(exit): tx_prod = 0x%04X, " "tx_cons = 0x%04X, tx_prod_bseq = 0x%08X\n", __FUNCTION__, sc->tx_prod, sc->tx_cons, sc->tx_prod_bseq); DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Disables interrupt generation. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_disable_intr(struct bce_softc *sc) { DBENTER(BCE_VERBOSE_INTR); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT); REG_RD(sc, BCE_PCICFG_INT_ACK_CMD); DBEXIT(BCE_VERBOSE_INTR); } /****************************************************************************/ /* Enables interrupt generation. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_enable_intr(struct bce_softc *sc, int coal_now) { DBENTER(BCE_VERBOSE_INTR); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx); REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx); /* Force an immediate interrupt (whether there is new data or not). */ if (coal_now) REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW); DBEXIT(BCE_VERBOSE_INTR); } /****************************************************************************/ /* Handles controller initialization. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init_locked(struct bce_softc *sc) { if_t ifp; u32 ether_mtu = 0; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; /* Check if the driver is still running and bail out if it is. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) goto bce_init_locked_exit; bce_stop(sc); if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) { BCE_PRINTF("%s(%d): Controller reset failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } if (bce_chipinit(sc)) { BCE_PRINTF("%s(%d): Controller initialization failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } if (bce_blockinit(sc)) { BCE_PRINTF("%s(%d): Block initialization failed!\n", __FILE__, __LINE__); goto bce_init_locked_exit; } /* Load our MAC address. */ bcopy(if_getlladdr(sc->bce_ifp), sc->eaddr, ETHER_ADDR_LEN); bce_set_mac_addr(sc); if (bce_hdr_split == FALSE) bce_get_rx_buffer_sizes(sc, if_getmtu(ifp)); /* * Calculate and program the hardware Ethernet MTU * size. Be generous on the receive if we have room * and allowed by the user. */ if (bce_strict_rx_mtu == TRUE) ether_mtu = if_getmtu(ifp); else { if (bce_hdr_split == TRUE) { if (if_getmtu(ifp) <= sc->rx_bd_mbuf_data_len + MCLBYTES) ether_mtu = sc->rx_bd_mbuf_data_len + MCLBYTES; else ether_mtu = if_getmtu(ifp); } else { if (if_getmtu(ifp) <= sc->rx_bd_mbuf_data_len) ether_mtu = sc->rx_bd_mbuf_data_len; else ether_mtu = if_getmtu(ifp); } } ether_mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; DBPRINT(sc, BCE_INFO_MISC, "%s(): setting h/w mtu = %d\n", __FUNCTION__, ether_mtu); /* Program the mtu, enabling jumbo frame support if necessary. */ if (ether_mtu > (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)) REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) | BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA); else REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu); /* Program appropriate promiscuous/multicast filtering. */ bce_set_rx_mode(sc); if (bce_hdr_split == TRUE) { /* Init page buffer descriptor chain. */ bce_init_pg_chain(sc); } /* Init RX buffer descriptor chain. */ bce_init_rx_chain(sc); /* Init TX buffer descriptor chain. */ bce_init_tx_chain(sc); /* Enable host interrupts. */ bce_enable_intr(sc, 1); bce_ifmedia_upd_locked(ifp); /* Let the OS know the driver is up and running. */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); bce_init_locked_exit: DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Initialize the controller just enough so that any management firmware */ /* running on the device will continue to operate correctly. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_mgmt_init_locked(struct bce_softc *sc) { if_t ifp; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK_ASSERT(sc); /* Bail out if management firmware is not running. */ if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG)) { DBPRINT(sc, BCE_VERBOSE_SPECIAL, "No management firmware running...\n"); goto bce_mgmt_init_locked_exit; } ifp = sc->bce_ifp; /* Enable all critical blocks in the MAC. */ REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT); REG_RD(sc, BCE_MISC_ENABLE_SET_BITS); DELAY(20); bce_ifmedia_upd_locked(ifp); bce_mgmt_init_locked_exit: DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Handles controller initialization when called from an unlocked routine. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_init(void *xsc) { struct bce_softc *sc = xsc; DBENTER(BCE_VERBOSE_RESET); BCE_LOCK(sc); bce_init_locked(sc); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_RESET); } /****************************************************************************/ /* Modifies an mbuf for TSO on the hardware. */ /* */ /* Returns: */ /* Pointer to a modified mbuf. */ /****************************************************************************/ static struct mbuf * bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags) { struct mbuf *m; struct ether_header *eh; struct ip *ip; struct tcphdr *th; u16 etype; int hdr_len __unused, ip_len __unused, ip_hlen = 0, tcp_hlen = 0; DBRUN(sc->tso_frames_requested++); ip_len = 0; /* Controller may modify mbuf chains. */ if (M_WRITABLE(*m_head) == 0) { m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { sc->mbuf_alloc_failed_count++; *m_head = NULL; return (NULL); } *m_head = m; } /* * For TSO the controller needs two pieces of info, * the MSS and the IP+TCP options length. */ m = m_pullup(*m_head, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (NULL); } eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); /* Check for supported TSO Ethernet types (only IPv4 for now) */ switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); /* TSO only supported for TCP protocol. */ if (ip->ip_p != IPPROTO_TCP) { BCE_PRINTF("%s(%d): TSO enabled for non-TCP frame!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); } /* Get IP header length in bytes (min 20) */ ip_hlen = ip->ip_hl << 2; m = m_pullup(*m_head, sizeof(struct ether_header) + ip_hlen + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (NULL); } /* Get the TCP header length in bytes (min 20) */ ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); th = (struct tcphdr *)((caddr_t)ip + ip_hlen); tcp_hlen = (th->th_off << 2); /* Make sure all IP/TCP options live in the same buffer. */ m = m_pullup(*m_head, sizeof(struct ether_header)+ ip_hlen + tcp_hlen); if (m == NULL) { *m_head = NULL; return (NULL); } /* Clear IP header length and checksum, will be calc'd by h/w. */ ip = (struct ip *)(m->m_data + sizeof(struct ether_header)); ip_len = ip->ip_len; ip->ip_len = 0; ip->ip_sum = 0; break; case ETHERTYPE_IPV6: BCE_PRINTF("%s(%d): TSO over IPv6 not supported!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); /* NOT REACHED */ default: BCE_PRINTF("%s(%d): TSO enabled for unsupported protocol!.\n", __FILE__, __LINE__); m_freem(*m_head); *m_head = NULL; return (NULL); } hdr_len = sizeof(struct ether_header) + ip_hlen + tcp_hlen; DBPRINT(sc, BCE_EXTREME_SEND, "%s(): hdr_len = %d, e_hlen = %d, " "ip_hlen = %d, tcp_hlen = %d, ip_len = %d\n", __FUNCTION__, hdr_len, (int) sizeof(struct ether_header), ip_hlen, tcp_hlen, ip_len); /* Set the LSO flag in the TX BD */ *flags |= TX_BD_FLAGS_SW_LSO; /* Set the length of IP + TCP options (in 32 bit words) */ *flags |= (((ip_hlen + tcp_hlen - sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8); DBRUN(sc->tso_frames_completed++); return (*m_head); } /****************************************************************************/ /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */ /* memory visible to the controller. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /* Modified: */ /* m_head: May be set to NULL if MBUF is excessively fragmented. */ /****************************************************************************/ static int bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head) { bus_dma_segment_t segs[BCE_MAX_SEGMENTS]; bus_dmamap_t map; struct tx_bd *txbd = NULL; struct mbuf *m0; u16 prod, chain_prod, mss = 0, vlan_tag = 0, flags = 0; u32 prod_bseq; #ifdef BCE_DEBUG u16 debug_prod; #endif int i, error, nsegs, rc = 0; DBENTER(BCE_VERBOSE_SEND); /* Make sure we have room in the TX chain. */ if (sc->used_tx_bd >= sc->max_tx_bd) goto bce_tx_encap_exit; /* Transfer any checksum offload flags to the bd. */ m0 = *m_head; if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_TSO) { m0 = bce_tso_setup(sc, m_head, &flags); if (m0 == NULL) { DBRUN(sc->tso_frames_failed++); goto bce_tx_encap_exit; } mss = htole16(m0->m_pkthdr.tso_segsz); } else { if (m0->m_pkthdr.csum_flags & CSUM_IP) flags |= TX_BD_FLAGS_IP_CKSUM; if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; } } /* Transfer any VLAN tags to the bd. */ if (m0->m_flags & M_VLANTAG) { flags |= TX_BD_FLAGS_VLAN_TAG; vlan_tag = m0->m_pkthdr.ether_vtag; } /* Map the mbuf into DMAable memory. */ prod = sc->tx_prod; chain_prod = TX_CHAIN_IDX(prod); map = sc->tx_mbuf_map[chain_prod]; /* Map the mbuf into our DMA address space. */ error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* Check if the DMA mapping was successful */ if (error == EFBIG) { sc->mbuf_frag_count++; /* Try to defrag the mbuf. */ m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS); if (m0 == NULL) { /* Defrag was unsuccessful */ m_freem(*m_head); *m_head = NULL; sc->mbuf_alloc_failed_count++; rc = ENOBUFS; goto bce_tx_encap_exit; } /* Defrag was successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(sc->tx_mbuf_tag, map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* Still getting an error after a defrag. */ if (error == ENOMEM) { /* Insufficient DMA buffers available. */ sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } else if (error != 0) { /* Release it and return an error. */ BCE_PRINTF("%s(%d): Unknown error mapping mbuf into " "TX chain!\n", __FILE__, __LINE__); m_freem(m0); *m_head = NULL; sc->dma_map_addr_tx_failed_count++; rc = ENOBUFS; goto bce_tx_encap_exit; } } else if (error == ENOMEM) { /* Insufficient DMA buffers available. */ sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } else if (error != 0) { m_freem(m0); *m_head = NULL; sc->dma_map_addr_tx_failed_count++; rc = error; goto bce_tx_encap_exit; } /* Make sure there's room in the chain */ if (nsegs > (sc->max_tx_bd - sc->used_tx_bd)) { bus_dmamap_unload(sc->tx_mbuf_tag, map); rc = ENOBUFS; goto bce_tx_encap_exit; } /* prod points to an empty tx_bd at this point. */ prod_bseq = sc->tx_prod_bseq; #ifdef BCE_DEBUG debug_prod = chain_prod; #endif DBPRINT(sc, BCE_INFO_SEND, "%s(start): prod = 0x%04X, chain_prod = 0x%04X, " "prod_bseq = 0x%08X\n", __FUNCTION__, prod, chain_prod, prod_bseq); /* * Cycle through each mbuf segment that makes up * the outgoing frame, gathering the mapping info * for that segment and creating a tx_bd for * the mbuf. */ for (i = 0; i < nsegs ; i++) { chain_prod = TX_CHAIN_IDX(prod); txbd= &sc->tx_bd_chain[TX_PAGE(chain_prod)] [TX_IDX(chain_prod)]; txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr)); txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr)); txbd->tx_bd_mss_nbytes = htole32(mss << 16) | htole16(segs[i].ds_len); txbd->tx_bd_vlan_tag = htole16(vlan_tag); txbd->tx_bd_flags = htole16(flags); prod_bseq += segs[i].ds_len; if (i == 0) txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START); prod = NEXT_TX_BD(prod); } /* Set the END flag on the last TX buffer descriptor. */ txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END); DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_chain(sc, debug_prod, nsegs)); /* * Ensure that the mbuf pointer for this transmission * is placed at the array index of the last * descriptor in this chain. This is done * because a single map is used for all * segments of the mbuf and we don't want to * unload the map before all of the segments * have been freed. */ sc->tx_mbuf_ptr[chain_prod] = m0; sc->used_tx_bd += nsegs; /* Update some debug statistic counters */ DBRUNIF((sc->used_tx_bd > sc->tx_hi_watermark), sc->tx_hi_watermark = sc->used_tx_bd); DBRUNIF((sc->used_tx_bd == sc->max_tx_bd), sc->tx_full_count++); DBRUNIF(sc->debug_tx_mbuf_alloc++); DBRUNMSG(BCE_EXTREME_SEND, bce_dump_tx_mbuf_chain(sc, chain_prod, 1)); /* prod points to the next free tx_bd at this point. */ sc->tx_prod = prod; sc->tx_prod_bseq = prod_bseq; /* Tell the chip about the waiting TX frames. */ REG_WR16(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BIDX, sc->tx_prod); REG_WR(sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2MQ_TX_HOST_BSEQ, sc->tx_prod_bseq); bce_tx_encap_exit: DBEXIT(BCE_VERBOSE_SEND); return(rc); } /****************************************************************************/ /* Main transmit routine when called from another routine with a lock. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start_locked(if_t ifp) { struct bce_softc *sc = if_getsoftc(ifp); struct mbuf *m_head = NULL; int count = 0; u16 tx_prod, tx_chain_prod __unused; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); BCE_LOCK_ASSERT(sc); /* prod points to the next free tx_bd. */ tx_prod = sc->tx_prod; tx_chain_prod = TX_CHAIN_IDX(tx_prod); DBPRINT(sc, BCE_INFO_SEND, "%s(enter): tx_prod = 0x%04X, tx_chain_prod = 0x%04X, " "tx_prod_bseq = 0x%08X\n", __FUNCTION__, tx_prod, tx_chain_prod, sc->tx_prod_bseq); /* If there's no link or the transmit queue is empty then just exit. */ if (sc->bce_link_up == FALSE) { DBPRINT(sc, BCE_INFO_SEND, "%s(): No link.\n", __FUNCTION__); goto bce_start_locked_exit; } if (if_sendq_empty(ifp)) { DBPRINT(sc, BCE_INFO_SEND, "%s(): Transmit queue empty.\n", __FUNCTION__); goto bce_start_locked_exit; } /* * Keep adding entries while there is space in the ring. */ while (sc->used_tx_bd < sc->max_tx_bd) { /* Check for any frames to send. */ m_head = if_dequeue(ifp); /* Stop when the transmit queue is empty. */ if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, place the mbuf back at the * head of the queue and set the OACTIVE flag * to wait for the NIC to drain the chain. */ if (bce_tx_encap(sc, &m_head)) { if (m_head != NULL) if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); DBPRINT(sc, BCE_INFO_SEND, "TX chain is closed for business! Total " "tx_bd used = %d\n", sc->used_tx_bd); break; } count++; /* Send a copy of the frame to any BPF listeners. */ ETHER_BPF_MTAP(ifp, m_head); } /* Exit if no packets were dequeued. */ if (count == 0) { DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): No packets were " "dequeued\n", __FUNCTION__); goto bce_start_locked_exit; } DBPRINT(sc, BCE_VERBOSE_SEND, "%s(): Inserted %d frames into " "send queue.\n", __FUNCTION__, count); /* Set the tx timeout. */ sc->watchdog_timer = BCE_TX_TIMEOUT; DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_ctx(sc, TX_CID)); DBRUNMSG(BCE_VERBOSE_SEND, bce_dump_mq_regs(sc)); bce_start_locked_exit: DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_CTX); } /****************************************************************************/ /* Main transmit routine when called from another routine without a lock. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_start(if_t ifp) { struct bce_softc *sc = if_getsoftc(ifp); DBENTER(BCE_VERBOSE_SEND); BCE_LOCK(sc); bce_start_locked(ifp); BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_SEND); } /****************************************************************************/ /* Handles any IOCTL calls from the operating system. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_ioctl(if_t ifp, u_long command, caddr_t data) { struct bce_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int mask, error = 0; DBENTER(BCE_VERBOSE_MISC); switch(command) { /* Set the interface MTU. */ case SIOCSIFMTU: /* Check that the MTU setting is supported. */ if ((ifr->ifr_mtu < BCE_MIN_MTU) || (ifr->ifr_mtu > BCE_MAX_JUMBO_MTU)) { error = EINVAL; break; } DBPRINT(sc, BCE_INFO_MISC, "SIOCSIFMTU: Changing MTU from %d to %d\n", (int) if_getmtu(ifp), (int) ifr->ifr_mtu); BCE_LOCK(sc); if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bce_init_locked(sc); } BCE_UNLOCK(sc); break; /* Set interface flags. */ case SIOCSIFFLAGS: DBPRINT(sc, BCE_VERBOSE_SPECIAL, "Received SIOCSIFFLAGS\n"); BCE_LOCK(sc); /* Check if the interface is up. */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Change promiscuous/multicast flags as necessary. */ bce_set_rx_mode(sc); } else { /* Start the HW */ bce_init_locked(sc); } } else { /* The interface is down, check if driver is running. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bce_stop(sc); /* If MFW is running, restart the controller a bit. */ if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) { bce_reset(sc, BCE_DRV_MSG_CODE_RESET); bce_chipinit(sc); bce_mgmt_init_locked(sc); } } } BCE_UNLOCK(sc); break; /* Add/Delete multicast address */ case SIOCADDMULTI: case SIOCDELMULTI: DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCADDMULTI/SIOCDELMULTI\n"); BCE_LOCK(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bce_set_rx_mode(sc); BCE_UNLOCK(sc); break; /* Set/Get Interface media */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: DBPRINT(sc, BCE_VERBOSE_MISC, "Received SIOCSIFMEDIA/SIOCGIFMEDIA\n"); if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) error = ifmedia_ioctl(ifp, ifr, &sc->bce_ifmedia, command); else { mii = device_get_softc(sc->bce_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; /* Set interface capability */ case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); DBPRINT(sc, BCE_INFO_MISC, "Received SIOCSIFCAP = 0x%08X\n", (u32) mask); /* Toggle the TX checksum capabilities enable flag. */ if (mask & IFCAP_TXCSUM && if_getcapabilities(ifp) & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); if (IFCAP_TXCSUM & if_getcapenable(ifp)) if_sethwassistbits(ifp, BCE_IF_HWASSIST, 0); else if_sethwassistbits(ifp, 0, BCE_IF_HWASSIST); } /* Toggle the RX checksum capabilities enable flag. */ if (mask & IFCAP_RXCSUM && if_getcapabilities(ifp) & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); /* Toggle the TSO capabilities enable flag. */ if (bce_tso_enable && (mask & IFCAP_TSO4) && if_getcapabilities(ifp) & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); if (IFCAP_TSO4 & if_getcapenable(ifp)) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if (mask & IFCAP_VLAN_HWCSUM && if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); /* * Don't actually disable VLAN tag stripping as * management firmware (ASF/IPMI/UMP) requires the * feature. If VLAN tag stripping is disabled driver * will manually reconstruct the VLAN frame by * appending stripped VLAN tag. */ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING)) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); } VLAN_CAPABILITIES(ifp); break; default: /* We don't know how to handle the IOCTL, pass it on. */ error = ether_ioctl(ifp, command, data); break; } DBEXIT(BCE_VERBOSE_MISC); return(error); } /****************************************************************************/ /* Transmit timeout handler. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_watchdog(struct bce_softc *sc) { uint32_t status; DBENTER(BCE_EXTREME_SEND); BCE_LOCK_ASSERT(sc); status = 0; /* If the watchdog timer hasn't expired then just exit. */ if (sc->watchdog_timer == 0 || --sc->watchdog_timer) goto bce_watchdog_exit; status = REG_RD(sc, BCE_EMAC_RX_STATUS); /* If pause frames are active then don't reset the hardware. */ if ((sc->bce_flags & BCE_USING_RX_FLOW_CONTROL) != 0) { if ((status & BCE_EMAC_RX_STATUS_FFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ sc->watchdog_timer = BCE_TX_TIMEOUT; goto bce_watchdog_exit; } else if ((status & BCE_EMAC_RX_STATUS_FF_RECEIVED) != 0 && (status & BCE_EMAC_RX_STATUS_N_RECEIVED) != 0) { /* * If we're not currently XOFF'ed but have recently * been XOFF'd/XON'd then assume that's delaying TX * this time around. */ sc->watchdog_timer = BCE_TX_TIMEOUT; goto bce_watchdog_exit; } /* * Any other condition is unexpected and the controller * should be reset. */ } BCE_PRINTF("%s(%d): Watchdog timeout occurred, resetting!\n", __FILE__, __LINE__); DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc); bce_dump_status_block(sc); bce_dump_stats_block(sc); bce_dump_ftqs(sc); bce_dump_txp_state(sc, 0); bce_dump_rxp_state(sc, 0); bce_dump_tpat_state(sc, 0); bce_dump_cp_state(sc, 0); bce_dump_com_state(sc, 0)); DBRUN(bce_breakpoint(sc)); if_setdrvflagbits(sc->bce_ifp, 0, IFF_DRV_RUNNING); bce_init_locked(sc); sc->watchdog_timeouts++; bce_watchdog_exit: REG_WR(sc, BCE_EMAC_RX_STATUS, status); DBEXIT(BCE_EXTREME_SEND); } /* * Interrupt handler. */ /****************************************************************************/ /* Main interrupt entry point. Verifies that the controller generated the */ /* interrupt and then calls a separate routine for handle the various */ /* interrupt causes (PHY, TX, RX). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_intr(void *xsc) { struct bce_softc *sc; if_t ifp; u32 status_attn_bits; u16 hw_rx_cons, hw_tx_cons; sc = xsc; ifp = sc->bce_ifp; DBENTER(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_status_block(sc)); DBRUNMSG(BCE_VERBOSE_INTR, bce_dump_stats_block(sc)); BCE_LOCK(sc); DBRUN(sc->interrupts_generated++); /* Synchnorize before we read from interface's status block */ bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); /* * If the hardware status block index matches the last value read * by the driver and we haven't asserted our interrupt then there's * nothing to do. This may only happen in case of INTx due to the * interrupt arriving at the CPU before the status block is updated. */ if ((sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) == 0 && sc->status_block->status_idx == sc->last_status_idx && (REG_RD(sc, BCE_PCICFG_MISC_STATUS) & BCE_PCICFG_MISC_STATUS_INTA_VALUE)) { DBPRINT(sc, BCE_VERBOSE_INTR, "%s(): Spurious interrupt.\n", __FUNCTION__); goto bce_intr_exit; } /* Ack the interrupt and stop others from occurring. */ REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BCE_PCICFG_INT_ACK_CMD_MASK_INT); /* Check if the hardware has finished any work. */ hw_rx_cons = bce_get_hw_rx_cons(sc); hw_tx_cons = bce_get_hw_tx_cons(sc); /* Keep processing data as long as there is work to do. */ for (;;) { status_attn_bits = sc->status_block->status_attn_bits; DBRUNIF(DB_RANDOMTRUE(unexpected_attention_sim_control), BCE_PRINTF("Simulating unexpected status attention " "bit set."); sc->unexpected_attention_sim_count++; status_attn_bits = status_attn_bits | STATUS_ATTN_BITS_PARITY_ERROR); /* Was it a link change interrupt? */ if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != (sc->status_block->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) { bce_phy_intr(sc); /* Clear transient updates during link state change. */ REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT); REG_RD(sc, BCE_HC_COMMAND); } /* If any other attention is asserted, the chip is toast. */ if (((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) != (sc->status_block->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE))) { sc->unexpected_attention_count++; BCE_PRINTF("%s(%d): Fatal attention detected: " "0x%08X\n", __FILE__, __LINE__, sc->status_block->status_attn_bits); DBRUNMSG(BCE_FATAL, if (unexpected_attention_sim_control == 0) bce_breakpoint(sc)); bce_init_locked(sc); goto bce_intr_exit; } /* Check for any completed RX frames. */ if (hw_rx_cons != sc->hw_rx_cons) bce_rx_intr(sc); /* Check for any completed TX frames. */ if (hw_tx_cons != sc->hw_tx_cons) bce_tx_intr(sc); /* Save status block index value for the next interrupt. */ sc->last_status_idx = sc->status_block->status_idx; /* * Prevent speculative reads from getting * ahead of the status block. */ bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0, BUS_SPACE_BARRIER_READ); /* * If there's no work left then exit the * interrupt service routine. */ hw_rx_cons = bce_get_hw_rx_cons(sc); hw_tx_cons = bce_get_hw_tx_cons(sc); if ((hw_rx_cons == sc->hw_rx_cons) && (hw_tx_cons == sc->hw_tx_cons)) break; } bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_PREREAD); /* Re-enable interrupts. */ bce_enable_intr(sc, 0); /* Handle any frames that arrived while handling the interrupt. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !if_sendq_empty(ifp)) bce_start_locked(ifp); bce_intr_exit: BCE_UNLOCK(sc); DBEXIT(BCE_VERBOSE_SEND | BCE_VERBOSE_RECV | BCE_VERBOSE_INTR); } /****************************************************************************/ /* Programs the various packet receive modes (broadcast and multicast). */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static u_int bce_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { u32 *hashes = arg; int h; h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0xFF; hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F); return (1); } static void bce_set_rx_mode(struct bce_softc *sc) { if_t ifp; u32 hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 rx_mode, sort_mode; int i; DBENTER(BCE_VERBOSE_MISC); BCE_LOCK_ASSERT(sc); ifp = sc->bce_ifp; /* Initialize receive mode default settings. */ rx_mode = sc->rx_mode & ~(BCE_EMAC_RX_MODE_PROMISCUOUS | BCE_EMAC_RX_MODE_KEEP_VLAN_TAG); sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN; /* * ASF/IPMI/UMP firmware requires that VLAN tag stripping * be enbled. */ if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) && (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))) rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG; /* * Check for promiscuous, all multicast, or selected * multicast address filtering. */ if (if_getflags(ifp) & IFF_PROMISC) { DBPRINT(sc, BCE_INFO_MISC, "Enabling promiscuous mode.\n"); /* Enable promiscuous mode. */ rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS; sort_mode |= BCE_RPM_SORT_USER0_PROM_EN; } else if (if_getflags(ifp) & IFF_ALLMULTI) { DBPRINT(sc, BCE_INFO_MISC, "Enabling all multicast mode.\n"); /* Enable all multicast addresses. */ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); } sort_mode |= BCE_RPM_SORT_USER0_MC_EN; } else { /* Accept one or more multicast(s). */ DBPRINT(sc, BCE_INFO_MISC, "Enabling selective multicast mode.\n"); if_foreach_llmaddr(ifp, bce_hash_maddr, hashes); for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4), hashes[i]); sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN; } /* Only make changes if the recive mode has actually changed. */ if (rx_mode != sc->rx_mode) { DBPRINT(sc, BCE_VERBOSE_MISC, "Enabling new receive mode: " "0x%08X\n", rx_mode); sc->rx_mode = rx_mode; REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode); } /* Disable and clear the existing sort before enabling a new sort. */ REG_WR(sc, BCE_RPM_SORT_USER0, 0x0); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode); REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA); DBEXIT(BCE_VERBOSE_MISC); } /****************************************************************************/ /* Called periodically to updates statistics from the controllers */ /* statistics block. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_stats_update(struct bce_softc *sc) { struct statistics_block *stats; DBENTER(BCE_EXTREME_MISC); bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); stats = (struct statistics_block *) sc->stats_block; /* * Update the sysctl statistics from the * hardware statistics. */ sc->stat_IfHCInOctets = ((u64) stats->stat_IfHCInOctets_hi << 32) + (u64) stats->stat_IfHCInOctets_lo; sc->stat_IfHCInBadOctets = ((u64) stats->stat_IfHCInBadOctets_hi << 32) + (u64) stats->stat_IfHCInBadOctets_lo; sc->stat_IfHCOutOctets = ((u64) stats->stat_IfHCOutOctets_hi << 32) + (u64) stats->stat_IfHCOutOctets_lo; sc->stat_IfHCOutBadOctets = ((u64) stats->stat_IfHCOutBadOctets_hi << 32) + (u64) stats->stat_IfHCOutBadOctets_lo; sc->stat_IfHCInUcastPkts = ((u64) stats->stat_IfHCInUcastPkts_hi << 32) + (u64) stats->stat_IfHCInUcastPkts_lo; sc->stat_IfHCInMulticastPkts = ((u64) stats->stat_IfHCInMulticastPkts_hi << 32) + (u64) stats->stat_IfHCInMulticastPkts_lo; sc->stat_IfHCInBroadcastPkts = ((u64) stats->stat_IfHCInBroadcastPkts_hi << 32) + (u64) stats->stat_IfHCInBroadcastPkts_lo; sc->stat_IfHCOutUcastPkts = ((u64) stats->stat_IfHCOutUcastPkts_hi << 32) + (u64) stats->stat_IfHCOutUcastPkts_lo; sc->stat_IfHCOutMulticastPkts = ((u64) stats->stat_IfHCOutMulticastPkts_hi << 32) + (u64) stats->stat_IfHCOutMulticastPkts_lo; sc->stat_IfHCOutBroadcastPkts = ((u64) stats->stat_IfHCOutBroadcastPkts_hi << 32) + (u64) stats->stat_IfHCOutBroadcastPkts_lo; /* ToDo: Preserve counters beyond 32 bits? */ /* ToDo: Read the statistics from auto-clear regs? */ sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors = stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors; sc->stat_Dot3StatsCarrierSenseErrors = stats->stat_Dot3StatsCarrierSenseErrors; sc->stat_Dot3StatsFCSErrors = stats->stat_Dot3StatsFCSErrors; sc->stat_Dot3StatsAlignmentErrors = stats->stat_Dot3StatsAlignmentErrors; sc->stat_Dot3StatsSingleCollisionFrames = stats->stat_Dot3StatsSingleCollisionFrames; sc->stat_Dot3StatsMultipleCollisionFrames = stats->stat_Dot3StatsMultipleCollisionFrames; sc->stat_Dot3StatsDeferredTransmissions = stats->stat_Dot3StatsDeferredTransmissions; sc->stat_Dot3StatsExcessiveCollisions = stats->stat_Dot3StatsExcessiveCollisions; sc->stat_Dot3StatsLateCollisions = stats->stat_Dot3StatsLateCollisions; sc->stat_EtherStatsCollisions = stats->stat_EtherStatsCollisions; sc->stat_EtherStatsFragments = stats->stat_EtherStatsFragments; sc->stat_EtherStatsJabbers = stats->stat_EtherStatsJabbers; sc->stat_EtherStatsUndersizePkts = stats->stat_EtherStatsUndersizePkts; sc->stat_EtherStatsOversizePkts = stats->stat_EtherStatsOversizePkts; sc->stat_EtherStatsPktsRx64Octets = stats->stat_EtherStatsPktsRx64Octets; sc->stat_EtherStatsPktsRx65Octetsto127Octets = stats->stat_EtherStatsPktsRx65Octetsto127Octets; sc->stat_EtherStatsPktsRx128Octetsto255Octets = stats->stat_EtherStatsPktsRx128Octetsto255Octets; sc->stat_EtherStatsPktsRx256Octetsto511Octets = stats->stat_EtherStatsPktsRx256Octetsto511Octets; sc->stat_EtherStatsPktsRx512Octetsto1023Octets = stats->stat_EtherStatsPktsRx512Octetsto1023Octets; sc->stat_EtherStatsPktsRx1024Octetsto1522Octets = stats->stat_EtherStatsPktsRx1024Octetsto1522Octets; sc->stat_EtherStatsPktsRx1523Octetsto9022Octets = stats->stat_EtherStatsPktsRx1523Octetsto9022Octets; sc->stat_EtherStatsPktsTx64Octets = stats->stat_EtherStatsPktsTx64Octets; sc->stat_EtherStatsPktsTx65Octetsto127Octets = stats->stat_EtherStatsPktsTx65Octetsto127Octets; sc->stat_EtherStatsPktsTx128Octetsto255Octets = stats->stat_EtherStatsPktsTx128Octetsto255Octets; sc->stat_EtherStatsPktsTx256Octetsto511Octets = stats->stat_EtherStatsPktsTx256Octetsto511Octets; sc->stat_EtherStatsPktsTx512Octetsto1023Octets = stats->stat_EtherStatsPktsTx512Octetsto1023Octets; sc->stat_EtherStatsPktsTx1024Octetsto1522Octets = stats->stat_EtherStatsPktsTx1024Octetsto1522Octets; sc->stat_EtherStatsPktsTx1523Octetsto9022Octets = stats->stat_EtherStatsPktsTx1523Octetsto9022Octets; sc->stat_XonPauseFramesReceived = stats->stat_XonPauseFramesReceived; sc->stat_XoffPauseFramesReceived = stats->stat_XoffPauseFramesReceived; sc->stat_OutXonSent = stats->stat_OutXonSent; sc->stat_OutXoffSent = stats->stat_OutXoffSent; sc->stat_FlowControlDone = stats->stat_FlowControlDone; sc->stat_MacControlFramesReceived = stats->stat_MacControlFramesReceived; sc->stat_XoffStateEntered = stats->stat_XoffStateEntered; sc->stat_IfInFramesL2FilterDiscards = stats->stat_IfInFramesL2FilterDiscards; sc->stat_IfInRuleCheckerDiscards = stats->stat_IfInRuleCheckerDiscards; sc->stat_IfInFTQDiscards = stats->stat_IfInFTQDiscards; sc->stat_IfInMBUFDiscards = stats->stat_IfInMBUFDiscards; sc->stat_IfInRuleCheckerP4Hit = stats->stat_IfInRuleCheckerP4Hit; sc->stat_CatchupInRuleCheckerDiscards = stats->stat_CatchupInRuleCheckerDiscards; sc->stat_CatchupInFTQDiscards = stats->stat_CatchupInFTQDiscards; sc->stat_CatchupInMBUFDiscards = stats->stat_CatchupInMBUFDiscards; sc->stat_CatchupInRuleCheckerP4Hit = stats->stat_CatchupInRuleCheckerP4Hit; sc->com_no_buffers = REG_RD_IND(sc, 0x120084); /* ToDo: Add additional statistics? */ DBEXIT(BCE_EXTREME_MISC); } static uint64_t bce_get_counter(if_t ifp, ift_counter cnt) { struct bce_softc *sc; uint64_t rv; sc = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_COLLISIONS: return (sc->stat_EtherStatsCollisions); case IFCOUNTER_IERRORS: return (sc->stat_EtherStatsUndersizePkts + sc->stat_EtherStatsOversizePkts + sc->stat_IfInMBUFDiscards + sc->stat_Dot3StatsAlignmentErrors + sc->stat_Dot3StatsFCSErrors + sc->stat_IfInRuleCheckerDiscards + sc->stat_IfInFTQDiscards + sc->l2fhdr_error_count + sc->com_no_buffers); case IFCOUNTER_OERRORS: rv = sc->stat_Dot3StatsExcessiveCollisions + sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors + sc->stat_Dot3StatsLateCollisions + sc->watchdog_timeouts; /* * Certain controllers don't report * carrier sense errors correctly. * See errata E11_5708CA0_1165. */ if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) && !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) rv += sc->stat_Dot3StatsCarrierSenseErrors; return (rv); default: return (if_get_counter_default(ifp, cnt)); } } /****************************************************************************/ /* Periodic function to notify the bootcode that the driver is still */ /* present. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_pulse(void *xsc) { struct bce_softc *sc = xsc; u32 msg; DBENTER(BCE_EXTREME_MISC); BCE_LOCK_ASSERT(sc); /* Tell the firmware that the driver is still running. */ msg = (u32) ++sc->bce_fw_drv_pulse_wr_seq; bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg); /* Update the bootcode condition. */ sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); /* Report whether the bootcode still knows the driver is running. */ if (bce_verbose || bootverbose) { if (sc->bce_drv_cardiac_arrest == FALSE) { if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) { sc->bce_drv_cardiac_arrest = TRUE; BCE_PRINTF("%s(): Warning: bootcode " "thinks driver is absent! " "(bc_state = 0x%08X)\n", __FUNCTION__, sc->bc_state); } } else { /* * Not supported by all bootcode versions. * (v5.0.11+ and v5.2.1+) Older bootcode * will require the driver to reset the * controller to clear this condition. */ if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) { sc->bce_drv_cardiac_arrest = FALSE; BCE_PRINTF("%s(): Bootcode found the " "driver pulse! (bc_state = 0x%08X)\n", __FUNCTION__, sc->bc_state); } } } /* Schedule the next pulse. */ callout_reset(&sc->bce_pulse_callout, hz, bce_pulse, sc); DBEXIT(BCE_EXTREME_MISC); } /****************************************************************************/ /* Periodic function to perform maintenance tasks. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static void bce_tick(void *xsc) { struct bce_softc *sc = xsc; struct mii_data *mii; if_t ifp; struct ifmediareq ifmr; ifp = sc->bce_ifp; DBENTER(BCE_EXTREME_MISC); BCE_LOCK_ASSERT(sc); /* Schedule the next tick. */ callout_reset(&sc->bce_tick_callout, hz, bce_tick, sc); /* Update the statistics from the hardware statistics block. */ bce_stats_update(sc); /* Ensure page and RX chains get refilled in low-memory situations. */ if (bce_hdr_split == TRUE) bce_fill_pg_chain(sc); bce_fill_rx_chain(sc); /* Check that chip hasn't hung. */ bce_watchdog(sc); /* If link is up already up then we're done. */ if (sc->bce_link_up == TRUE) goto bce_tick_exit; /* Link is down. Check what the PHY's doing. */ if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) { bzero(&ifmr, sizeof(ifmr)); bce_ifmedia_sts_rphy(sc, &ifmr); if ((ifmr.ifm_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { sc->bce_link_up = TRUE; bce_miibus_statchg(sc->bce_dev); } } else { mii = device_get_softc(sc->bce_miibus); mii_tick(mii); /* Check if the link has come up. */ if ((mii->mii_media_status & IFM_ACTIVE) && (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)) { DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Link up!\n", __FUNCTION__); sc->bce_link_up = TRUE; if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX || IFM_SUBTYPE(mii->mii_media_active) == IFM_2500_SX) && (bce_verbose || bootverbose)) BCE_PRINTF("Gigabit link up!\n"); } } if (sc->bce_link_up == TRUE) { /* Now that link is up, handle any outstanding TX traffic. */ if (!if_sendq_empty(ifp)) { DBPRINT(sc, BCE_VERBOSE_MISC, "%s(): Found " "pending TX traffic.\n", __FUNCTION__); bce_start_locked(ifp); } } bce_tick_exit: DBEXIT(BCE_EXTREME_MISC); } static void bce_fw_cap_init(struct bce_softc *sc) { u32 ack, cap, link; ack = 0; cap = bce_shmem_rd(sc, BCE_FW_CAP_MB); if ((cap & BCE_FW_CAP_SIGNATURE_MAGIC_MASK) != BCE_FW_CAP_SIGNATURE_MAGIC) return; if ((cap & (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) == (BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN)) ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | BCE_FW_CAP_MFW_KEEP_VLAN | BCE_FW_CAP_BC_KEEP_VLAN; if ((sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) != 0 && (cap & BCE_FW_CAP_REMOTE_PHY_CAP) != 0) { sc->bce_phy_flags &= ~BCE_PHY_REMOTE_PORT_FIBER_FLAG; sc->bce_phy_flags |= BCE_PHY_REMOTE_CAP_FLAG; link = bce_shmem_rd(sc, BCE_LINK_STATUS); if ((link & BCE_LINK_STATUS_SERDES_LINK) != 0) sc->bce_phy_flags |= BCE_PHY_REMOTE_PORT_FIBER_FLAG; ack |= BCE_DRV_ACK_CAP_SIGNATURE_MAGIC | BCE_FW_CAP_REMOTE_PHY_CAP; } if (ack != 0) bce_shmem_wr(sc, BCE_DRV_ACK_CAP_MB, ack); } #ifdef BCE_DEBUG /****************************************************************************/ /* Allows the driver state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_driver_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_driver_state(sc); } return error; } /****************************************************************************/ /* Allows the hardware state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_hw_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_hw_state(sc); } return error; } /****************************************************************************/ /* Allows the status block to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_status_block(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_status_block(sc); } return error; } /****************************************************************************/ /* Allows the stats block to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_stats_block(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_stats_block(sc); } return error; } /****************************************************************************/ /* Allows the stat counters to be cleared without unloading/reloading the */ /* driver. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_stats_clear(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; struct statistics_block *stats; stats = (struct statistics_block *) sc->stats_block; bzero(stats, sizeof(struct statistics_block)); bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Clear the internal H/W statistics counters. */ REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW); /* Reset the driver maintained statistics. */ sc->interrupts_rx = sc->interrupts_tx = 0; sc->tso_frames_requested = sc->tso_frames_completed = sc->tso_frames_failed = 0; sc->rx_empty_count = sc->tx_full_count = 0; sc->rx_low_watermark = USABLE_RX_BD_ALLOC; sc->tx_hi_watermark = 0; sc->l2fhdr_error_count = sc->l2fhdr_error_sim_count = 0; sc->mbuf_alloc_failed_count = sc->mbuf_alloc_failed_sim_count = 0; sc->dma_map_addr_rx_failed_count = sc->dma_map_addr_tx_failed_count = 0; sc->mbuf_frag_count = 0; sc->csum_offload_tcp_udp = sc->csum_offload_ip = 0; sc->vlan_tagged_frames_rcvd = sc->vlan_tagged_frames_stripped = 0; sc->split_header_frames_rcvd = sc->split_header_tcp_frames_rcvd = 0; /* Clear firmware maintained statistics. */ REG_WR_IND(sc, 0x120084, 0); } return error; } /****************************************************************************/ /* Allows the shared memory contents to be dumped through the sysctl . */ /* interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_shmem_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_shmem_state(sc); } return error; } /****************************************************************************/ /* Allows the bootcode state to be dumped through the sysctl interface. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_bc_state(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_bc_state(sc); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the RX BD chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the RX MBUF chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_rx_mbuf_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the TX chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow dumping the page chain. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_pg_chain(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC); } return error; } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary NVRAM offsets in */ /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; u32 result; u32 val[1]; u8 *data = (u8 *) val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); error = bce_nvram_read(sc, result, data, 4); BCE_PRINTF("offset 0x%08X = 0x%08X\n", result, bce_be32toh(val[0])); return (error); } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary registers in the */ /* device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; u32 val, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result < 0x8000) { val = REG_RD(sc, result); BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); } else if (result < 0x0280000) { val = REG_RD_IND(sc, result); BCE_PRINTF("reg 0x%08X = 0x%08X\n", result, val); } return (error); } /****************************************************************************/ /* Provides a sysctl interface to allow reading arbitrary PHY registers in */ /* the device. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_phy_read(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc; device_t dev; int error, result; u16 val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result < 0x20) { sc = (struct bce_softc *)arg1; dev = sc->bce_dev; val = bce_miibus_read_reg(dev, sc->bce_phy_addr, result); BCE_PRINTF("phy 0x%02X = 0x%04X\n", result, val); } return (error); } /****************************************************************************/ /* Provides a sysctl interface for dumping the nvram contents. */ /* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive errno for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error, i; if (sc->nvram_buf == NULL) sc->nvram_buf = malloc(sc->bce_flash_size, M_TEMP, M_ZERO | M_WAITOK); error = 0; if (req->oldlen == sc->bce_flash_size) { for (i = 0; i < sc->bce_flash_size && error == 0; i++) error = bce_nvram_read(sc, i, &sc->nvram_buf[i], 1); } if (error == 0) error = SYSCTL_OUT(req, sc->nvram_buf, sc->bce_flash_size); return error; } #ifdef BCE_NVRAM_WRITE_SUPPORT /****************************************************************************/ /* Provides a sysctl interface for writing to nvram. */ /* DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive errno for failure. */ /****************************************************************************/ static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc = (struct bce_softc *)arg1; int error; if (sc->nvram_buf == NULL) sc->nvram_buf = malloc(sc->bce_flash_size, M_TEMP, M_ZERO | M_WAITOK); else bzero(sc->nvram_buf, sc->bce_flash_size); error = SYSCTL_IN(req, sc->nvram_buf, sc->bce_flash_size); if (error == 0) return (error); if (req->newlen == sc->bce_flash_size) error = bce_nvram_write(sc, 0, sc->nvram_buf, sc->bce_flash_size); return error; } #endif /****************************************************************************/ /* Provides a sysctl interface to allow reading a CID. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_dump_ctx(SYSCTL_HANDLER_ARGS) { struct bce_softc *sc; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); /* Make sure the register is accessible. */ if (result <= TX_CID) { sc = (struct bce_softc *)arg1; bce_dump_ctx(sc, result); } return (error); } /****************************************************************************/ /* Provides a sysctl interface to forcing the driver to dump state and */ /* enter the debugger. DO NOT ENABLE ON PRODUCTION SYSTEMS! */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static int bce_sysctl_breakpoint(SYSCTL_HANDLER_ARGS) { int error; int result; struct bce_softc *sc; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { sc = (struct bce_softc *)arg1; bce_breakpoint(sc); } return error; } #endif /****************************************************************************/ /* Adds any sysctl parameters for tuning or debugging purposes. */ /* */ /* Returns: */ /* 0 for success, positive value for failure. */ /****************************************************************************/ static void bce_add_sysctls(struct bce_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; DBENTER(BCE_VERBOSE_MISC); ctx = device_get_sysctl_ctx(sc->bce_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev)); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "l2fhdr_error_sim_control", CTLFLAG_RW, &l2fhdr_error_sim_control, 0, "Debug control to force l2fhdr errors"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "l2fhdr_error_sim_count", CTLFLAG_RD, &sc->l2fhdr_error_sim_count, 0, "Number of simulated l2_fhdr errors"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "l2fhdr_error_count", CTLFLAG_RD, &sc->l2fhdr_error_count, 0, "Number of l2_fhdr errors"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mbuf_alloc_failed_sim_control", CTLFLAG_RW, &mbuf_alloc_failed_sim_control, 0, "Debug control to force mbuf allocation failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_alloc_failed_sim_count", CTLFLAG_RD, &sc->mbuf_alloc_failed_sim_count, 0, "Number of simulated mbuf cluster allocation failures"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_alloc_failed_count", CTLFLAG_RD, &sc->mbuf_alloc_failed_count, 0, "Number of mbuf allocation failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mbuf_frag_count", CTLFLAG_RD, &sc->mbuf_frag_count, 0, "Number of fragmented mbufs"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dma_map_addr_failed_sim_control", CTLFLAG_RW, &dma_map_addr_failed_sim_control, 0, "Debug control to force DMA mapping failures"); /* ToDo: Figure out how to update this value in bce_dma_map_addr(). */ SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_failed_sim_count", CTLFLAG_RD, &sc->dma_map_addr_failed_sim_count, 0, "Number of simulated DMA mapping failures"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_rx_failed_count", CTLFLAG_RD, &sc->dma_map_addr_rx_failed_count, 0, "Number of RX DMA mapping failures"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "dma_map_addr_tx_failed_count", CTLFLAG_RD, &sc->dma_map_addr_tx_failed_count, 0, "Number of TX DMA mapping failures"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "unexpected_attention_sim_control", CTLFLAG_RW, &unexpected_attention_sim_control, 0, "Debug control to simulate unexpected attentions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unexpected_attention_sim_count", CTLFLAG_RW, &sc->unexpected_attention_sim_count, 0, "Number of simulated unexpected attentions"); #endif SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unexpected_attention_count", CTLFLAG_RW, &sc->unexpected_attention_count, 0, "Number of unexpected attentions"); #ifdef BCE_DEBUG SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_bootcode_running_failure", CTLFLAG_RW, &bootcode_running_failure_sim_control, 0, "Debug control to force bootcode running failures"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_low_watermark", CTLFLAG_RD, &sc->rx_low_watermark, 0, "Lowest level of free rx_bd's"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rx_empty_count", CTLFLAG_RD, &sc->rx_empty_count, "Number of times the RX chain was empty"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_hi_watermark", CTLFLAG_RD, &sc->tx_hi_watermark, 0, "Highest level of used tx_bd's"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tx_full_count", CTLFLAG_RD, &sc->tx_full_count, "Number of times the TX chain was full"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_requested", CTLFLAG_RD, &sc->tso_frames_requested, "Number of TSO frames requested"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_completed", CTLFLAG_RD, &sc->tso_frames_completed, "Number of TSO frames completed"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "tso_frames_failed", CTLFLAG_RD, &sc->tso_frames_failed, "Number of TSO frames failed"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "csum_offload_ip", CTLFLAG_RD, &sc->csum_offload_ip, "Number of IP checksum offload frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "csum_offload_tcp_udp", CTLFLAG_RD, &sc->csum_offload_tcp_udp, "Number of TCP/UDP checksum offload frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "vlan_tagged_frames_rcvd", CTLFLAG_RD, &sc->vlan_tagged_frames_rcvd, "Number of VLAN tagged frames received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "vlan_tagged_frames_stripped", CTLFLAG_RD, &sc->vlan_tagged_frames_stripped, "Number of VLAN tagged frames stripped"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "interrupts_rx", CTLFLAG_RD, &sc->interrupts_rx, "Number of RX interrupts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "interrupts_tx", CTLFLAG_RD, &sc->interrupts_tx, "Number of TX interrupts"); if (bce_hdr_split == TRUE) { SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "split_header_frames_rcvd", CTLFLAG_RD, &sc->split_header_frames_rcvd, "Number of split header frames received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "split_header_tcp_frames_rcvd", CTLFLAG_RD, &sc->split_header_tcp_frames_rcvd, "Number of split header TCP frames received"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_dump", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_nvram_dump, "S", ""); #ifdef BCE_NVRAM_WRITE_SUPPORT SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_write", CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_nvram_write, "S", ""); #endif #endif /* BCE_DEBUG */ SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHcInOctets", CTLFLAG_RD, &sc->stat_IfHCInOctets, "Bytes received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInBadOctets", CTLFLAG_RD, &sc->stat_IfHCInBadOctets, "Bad bytes received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutOctets", CTLFLAG_RD, &sc->stat_IfHCOutOctets, "Bytes sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutBadOctets", CTLFLAG_RD, &sc->stat_IfHCOutBadOctets, "Bad bytes sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInUcastPkts", CTLFLAG_RD, &sc->stat_IfHCInUcastPkts, "Unicast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInMulticastPkts", CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts, "Multicast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCInBroadcastPkts", CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts, "Broadcast packets received"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutUcastPkts", CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts, "Unicast packets sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutMulticastPkts", CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts, "Multicast packets sent"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "stat_IfHCOutBroadcastPkts", CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts, "Broadcast packets sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_emac_tx_stat_dot3statsinternalmactransmiterrors", CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors, 0, "Internal MAC transmit errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsCarrierSenseErrors", CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors, 0, "Carrier sense errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsFCSErrors", CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors, 0, "Frame check sequence errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsAlignmentErrors", CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors, 0, "Alignment errors"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsSingleCollisionFrames", CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames, 0, "Single Collision Frames"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsMultipleCollisionFrames", CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames, 0, "Multiple Collision Frames"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsDeferredTransmissions", CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions, 0, "Deferred Transmissions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsExcessiveCollisions", CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions, 0, "Excessive Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_Dot3StatsLateCollisions", CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions, 0, "Late Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsCollisions", CTLFLAG_RD, &sc->stat_EtherStatsCollisions, 0, "Collisions"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsFragments", CTLFLAG_RD, &sc->stat_EtherStatsFragments, 0, "Fragments"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsJabbers", CTLFLAG_RD, &sc->stat_EtherStatsJabbers, 0, "Jabbers"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsUndersizePkts", CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts, 0, "Undersize packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsOversizePkts", CTLFLAG_RD, &sc->stat_EtherStatsOversizePkts, 0, "stat_EtherStatsOversizePkts"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx64Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets, 0, "Bytes received in 64 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx65Octetsto127Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets, 0, "Bytes received in 65 to 127 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx128Octetsto255Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets, 0, "Bytes received in 128 to 255 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx256Octetsto511Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets, 0, "Bytes received in 256 to 511 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx512Octetsto1023Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets, 0, "Bytes received in 512 to 1023 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx1024Octetsto1522Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets, 0, "Bytes received in 1024 t0 1522 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsRx1523Octetsto9022Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets, 0, "Bytes received in 1523 to 9022 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx64Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets, 0, "Bytes sent in 64 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx65Octetsto127Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets, 0, "Bytes sent in 65 to 127 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx128Octetsto255Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets, 0, "Bytes sent in 128 to 255 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx256Octetsto511Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets, 0, "Bytes sent in 256 to 511 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx512Octetsto1023Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets, 0, "Bytes sent in 512 to 1023 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx1024Octetsto1522Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets, 0, "Bytes sent in 1024 to 1522 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_EtherStatsPktsTx1523Octetsto9022Octets", CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets, 0, "Bytes sent in 1523 to 9022 byte packets"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XonPauseFramesReceived", CTLFLAG_RD, &sc->stat_XonPauseFramesReceived, 0, "XON pause frames receved"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XoffPauseFramesReceived", CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived, 0, "XOFF pause frames received"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_OutXonSent", CTLFLAG_RD, &sc->stat_OutXonSent, 0, "XON pause frames sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_OutXoffSent", CTLFLAG_RD, &sc->stat_OutXoffSent, 0, "XOFF pause frames sent"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_FlowControlDone", CTLFLAG_RD, &sc->stat_FlowControlDone, 0, "Flow control done"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_MacControlFramesReceived", CTLFLAG_RD, &sc->stat_MacControlFramesReceived, 0, "MAC control frames received"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_XoffStateEntered", CTLFLAG_RD, &sc->stat_XoffStateEntered, 0, "XOFF state entered"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInFramesL2FilterDiscards", CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards, 0, "Received L2 packets discarded"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInRuleCheckerDiscards", CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards, 0, "Received packets discarded by rule"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInFTQDiscards", CTLFLAG_RD, &sc->stat_IfInFTQDiscards, 0, "Received packet FTQ discards"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInMBUFDiscards", CTLFLAG_RD, &sc->stat_IfInMBUFDiscards, 0, "Received packets discarded due to lack " "of controller buffer memory"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_IfInRuleCheckerP4Hit", CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit, 0, "Received packets rule checker hits"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInRuleCheckerDiscards", CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards, 0, "Received packets discarded in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInFTQDiscards", CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards, 0, "Received packets discarded in FTQ in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInMBUFDiscards", CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards, 0, "Received packets discarded in controller " "buffer memory in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "stat_CatchupInRuleCheckerP4Hit", CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit, 0, "Received packets rule checker hits in Catchup path"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "com_no_buffers", CTLFLAG_RD, &sc->com_no_buffers, 0, "Valid packets received but no RX buffers available"); #ifdef BCE_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "driver_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_driver_state, "I", "Drive state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_hw_state, "I", "Hardware state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "status_block", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_status_block, "I", "Dump status block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats_block", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_stats_block, "I", "Dump statistics block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats_clear", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_stats_clear, "I", "Clear statistics block"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "shmem_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_shmem_state, "I", "Shared memory state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "bc_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_bc_state, "I", "Bootcode state information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_bd_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_dump_rx_bd_chain, "I", "Dump RX BD chain"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_rx_mbuf_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_dump_rx_mbuf_chain, "I", "Dump RX MBUF chain"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_tx_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_dump_tx_chain, "I", "Dump tx_bd chain"); if (bce_hdr_split == TRUE) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_pg_chain", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_dump_pg_chain, "I", "Dump page chain"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dump_ctx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_dump_ctx, "I", "Dump context memory"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "breakpoint", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_breakpoint, "I", "Driver breakpoint"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW| CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_reg_read, "I", "Register read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "nvram_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_nvram_read, "I", "NVRAM read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "phy_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)sc, 0, bce_sysctl_phy_read, "I", "PHY register read"); #endif DBEXIT(BCE_VERBOSE_MISC); } /****************************************************************************/ /* BCE Debug Routines */ /****************************************************************************/ #ifdef BCE_DEBUG /****************************************************************************/ /* Freezes the controller to allow for a cohesive state dump. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_freeze_controller(struct bce_softc *sc) { u32 val; val = REG_RD(sc, BCE_MISC_COMMAND); val |= BCE_MISC_COMMAND_DISABLE_ALL; REG_WR(sc, BCE_MISC_COMMAND, val); } /****************************************************************************/ /* Unfreezes the controller after a freeze operation. This may not always */ /* work and the controller will require a reset! */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_unfreeze_controller(struct bce_softc *sc) { u32 val; val = REG_RD(sc, BCE_MISC_COMMAND); val |= BCE_MISC_COMMAND_ENABLE_ALL; REG_WR(sc, BCE_MISC_COMMAND, val); } /****************************************************************************/ /* Prints out Ethernet frame information from an mbuf. */ /* */ /* Partially decode an Ethernet frame to look at some important headers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_enet(struct bce_softc *sc, struct mbuf *m) { struct ether_vlan_header *eh; u16 etype; int ehlen; struct ip *ip; struct tcphdr *th; struct udphdr *uh; struct arphdr *ah; BCE_PRINTF( "-----------------------------" " Frame Decode " "-----------------------------\n"); eh = mtod(m, struct ether_vlan_header *); /* Handle VLAN encapsulation if present. */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehlen = ETHER_HDR_LEN; } /* ToDo: Add VLAN output. */ BCE_PRINTF("enet: dest = %6D, src = %6D, type = 0x%04X, hlen = %d\n", eh->evl_dhost, ":", eh->evl_shost, ":", etype, ehlen); switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehlen); BCE_PRINTF("--ip: dest = 0x%08X , src = 0x%08X, " "len = %d bytes, protocol = 0x%02X, xsum = 0x%04X\n", ntohl(ip->ip_dst.s_addr), ntohl(ip->ip_src.s_addr), ntohs(ip->ip_len), ip->ip_p, ntohs(ip->ip_sum)); switch (ip->ip_p) { case IPPROTO_TCP: th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); BCE_PRINTF("-tcp: dest = %d, src = %d, hlen = " "%d bytes, flags = 0x%b, csum = 0x%04X\n", ntohs(th->th_dport), ntohs(th->th_sport), (th->th_off << 2), th->th_flags, "\20\10CWR\07ECE\06URG\05ACK\04PSH\03RST" "\02SYN\01FIN", ntohs(th->th_sum)); break; case IPPROTO_UDP: uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); BCE_PRINTF("-udp: dest = %d, src = %d, len = %d " "bytes, csum = 0x%04X\n", ntohs(uh->uh_dport), ntohs(uh->uh_sport), ntohs(uh->uh_ulen), ntohs(uh->uh_sum)); break; case IPPROTO_ICMP: BCE_PRINTF("icmp:\n"); break; default: BCE_PRINTF("----: Other IP protocol.\n"); } break; case ETHERTYPE_IPV6: BCE_PRINTF("ipv6: No decode supported.\n"); break; case ETHERTYPE_ARP: BCE_PRINTF("-arp: "); ah = (struct arphdr *) (m->m_data + ehlen); switch (ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: printf("reverse ARP request\n"); break; case ARPOP_REVREPLY: printf("reverse ARP reply\n"); break; case ARPOP_REQUEST: printf("ARP request\n"); break; case ARPOP_REPLY: printf("ARP reply\n"); break; default: printf("other ARP operation\n"); } break; default: BCE_PRINTF("----: Other protocol.\n"); } BCE_PRINTF( "-----------------------------" "--------------" "-----------------------------\n"); } /****************************************************************************/ /* Prints out information about an mbuf. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_mbuf(struct bce_softc *sc, struct mbuf *m) { struct mbuf *mp = m; if (m == NULL) { BCE_PRINTF("mbuf: null pointer\n"); return; } while (mp) { BCE_PRINTF("mbuf: %p, m_len = %d, m_flags = 0x%b, " "m_data = %p\n", mp, mp->m_len, mp->m_flags, "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", mp->m_data); if (mp->m_flags & M_PKTHDR) { BCE_PRINTF("- m_pkthdr: len = %d, flags = 0x%b, " "csum_flags = %b\n", mp->m_pkthdr.len, mp->m_flags, M_FLAG_PRINTF, mp->m_pkthdr.csum_flags, CSUM_BITS); } if (mp->m_flags & M_EXT) { BCE_PRINTF("- m_ext: %p, ext_size = %d, type = ", mp->m_ext.ext_buf, mp->m_ext.ext_size); switch (mp->m_ext.ext_type) { case EXT_CLUSTER: printf("EXT_CLUSTER\n"); break; case EXT_SFBUF: printf("EXT_SFBUF\n"); break; case EXT_JUMBO9: printf("EXT_JUMBO9\n"); break; case EXT_JUMBO16: printf("EXT_JUMBO16\n"); break; case EXT_PACKET: printf("EXT_PACKET\n"); break; case EXT_MBUF: printf("EXT_MBUF\n"); break; case EXT_NET_DRV: printf("EXT_NET_DRV\n"); break; case EXT_MOD_TYPE: printf("EXT_MDD_TYPE\n"); break; case EXT_DISPOSABLE: printf("EXT_DISPOSABLE\n"); break; case EXT_EXTREF: printf("EXT_EXTREF\n"); break; default: printf("UNKNOWN\n"); } } mp = mp->m_next; } } /****************************************************************************/ /* Prints out the mbufs in the TX mbuf chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " tx mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->tx_mbuf_ptr[chain_prod]; BCE_PRINTF("txmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = TX_CHAIN_IDX(NEXT_TX_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mbufs in the RX mbuf chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rx_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " rx mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->rx_mbuf_ptr[chain_prod]; BCE_PRINTF("rxmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = RX_CHAIN_IDX(NEXT_RX_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mbufs in the mbuf page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pg_mbuf_chain(struct bce_softc *sc, u16 chain_prod, int count) { struct mbuf *m; BCE_PRINTF( "----------------------------" " pg mbuf data " "----------------------------\n"); for (int i = 0; i < count; i++) { m = sc->pg_mbuf_ptr[chain_prod]; BCE_PRINTF("pgmbuf[0x%04X]\n", chain_prod); bce_dump_mbuf(sc, m); chain_prod = PG_CHAIN_IDX(NEXT_PG_BD(chain_prod)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out a tx_bd structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_txbd(struct bce_softc *sc, int idx, struct tx_bd *txbd) { int i = 0; if (idx > MAX_TX_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("tx_bd[0x%04X]: Invalid tx_bd index!\n", idx); else if ((idx & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE) /* TX Chain page pointer. */ BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " "pointer\n", idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo); else { /* Normal tx_bd entry. */ BCE_PRINTF("tx_bd[0x%04X]: haddr = 0x%08X:%08X, " "mss_nbytes = 0x%08X, vlan tag = 0x%04X, flags = " "0x%04X (", idx, txbd->tx_bd_haddr_hi, txbd->tx_bd_haddr_lo, txbd->tx_bd_mss_nbytes, txbd->tx_bd_vlan_tag, txbd->tx_bd_flags); if (txbd->tx_bd_flags & TX_BD_FLAGS_CONN_FAULT) { if (i>0) printf("|"); printf("CONN_FAULT"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_TCP_UDP_CKSUM) { if (i>0) printf("|"); printf("TCP_UDP_CKSUM"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_IP_CKSUM) { if (i>0) printf("|"); printf("IP_CKSUM"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_VLAN_TAG) { if (i>0) printf("|"); printf("VLAN"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_COAL_NOW) { if (i>0) printf("|"); printf("COAL_NOW"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_DONT_GEN_CRC) { if (i>0) printf("|"); printf("DONT_GEN_CRC"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_START) { if (i>0) printf("|"); printf("START"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_END) { if (i>0) printf("|"); printf("END"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_LSO) { if (i>0) printf("|"); printf("LSO"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) { if (i>0) printf("|"); printf("SW_OPTION=%d", ((txbd->tx_bd_flags & TX_BD_FLAGS_SW_OPTION_WORD) >> 8)); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_FLAGS) { if (i>0) printf("|"); printf("SW_FLAGS"); i++; } if (txbd->tx_bd_flags & TX_BD_FLAGS_SW_SNAP) { if (i>0) printf("|"); printf("SNAP)"); } else { printf(")\n"); } } } /****************************************************************************/ /* Prints out a rx_bd structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rxbd(struct bce_softc *sc, int idx, struct rx_bd *rxbd) { if (idx > MAX_RX_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("rx_bd[0x%04X]: Invalid rx_bd index!\n", idx); else if ((idx & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE) /* RX Chain page pointer. */ BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, chain page " "pointer\n", idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo); else /* Normal rx_bd entry. */ BCE_PRINTF("rx_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = " "0x%08X, flags = 0x%08X\n", idx, rxbd->rx_bd_haddr_hi, rxbd->rx_bd_haddr_lo, rxbd->rx_bd_len, rxbd->rx_bd_flags); } /****************************************************************************/ /* Prints out a rx_bd structure in the page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pgbd(struct bce_softc *sc, int idx, struct rx_bd *pgbd) { if (idx > MAX_PG_BD_ALLOC) /* Index out of range. */ BCE_PRINTF("pg_bd[0x%04X]: Invalid pg_bd index!\n", idx); else if ((idx & USABLE_PG_BD_PER_PAGE) == USABLE_PG_BD_PER_PAGE) /* Page Chain page pointer. */ BCE_PRINTF("px_bd[0x%04X]: haddr = 0x%08X:%08X, chain page pointer\n", idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo); else /* Normal rx_bd entry. */ BCE_PRINTF("pg_bd[0x%04X]: haddr = 0x%08X:%08X, nbytes = 0x%08X, " "flags = 0x%08X\n", idx, pgbd->rx_bd_haddr_hi, pgbd->rx_bd_haddr_lo, pgbd->rx_bd_len, pgbd->rx_bd_flags); } /****************************************************************************/ /* Prints out a l2_fhdr structure. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_l2fhdr(struct bce_softc *sc, int idx, struct l2_fhdr *l2fhdr) { BCE_PRINTF("l2_fhdr[0x%04X]: status = 0x%b, " "pkt_len = %d, vlan = 0x%04x, ip_xsum/hdr_len = 0x%04X, " "tcp_udp_xsum = 0x%04X\n", idx, l2fhdr->l2_fhdr_status, BCE_L2FHDR_PRINTFB, l2fhdr->l2_fhdr_pkt_len, l2fhdr->l2_fhdr_vlan_tag, l2fhdr->l2_fhdr_ip_xsum, l2fhdr->l2_fhdr_tcp_udp_xsum); } /****************************************************************************/ /* Prints out context memory info. (Only useful for CID 0 to 16.) */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_ctx(struct bce_softc *sc, u16 cid) { if (cid > TX_CID) { BCE_PRINTF(" Unknown CID\n"); return; } BCE_PRINTF( "----------------------------" " CTX Data " "----------------------------\n"); BCE_PRINTF(" 0x%04X - (CID) Context ID\n", cid); if (cid == RX_CID) { BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BDIDX) host rx " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_BSEQ) host " "byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_BSEQ)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BSEQ) h/w byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BSEQ)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_HI) h/w buffer " "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDHADDR_LO) h/w buffer " "descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_BDIDX) h/w rx consumer " "index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_HOST_PG_BDIDX) host page " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_HOST_PG_BDIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_PG_BUF_SIZE) host rx_bd/page " "buffer size\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_PG_BUF_SIZE)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_HI) h/w page " "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDHADDR_LO) h/w page " "chain address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_RX_NX_PG_BDIDX) h/w page " "consumer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_RX_NX_PG_BDIDX)); } else if (cid == TX_CID) { if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE_XI) ctx type\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_CMD_TX_TYPE_XI) ctx " "cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI_XI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO_XI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX_XI) " "host producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BIDX_XI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ_XI) " "host byte sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BSEQ_XI)); } else { BCE_PRINTF(" 0x%08X - (L2CTX_TX_TYPE) ctx type\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TYPE)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_CMD_TYPE) ctx cmd\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_CMD_TYPE)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BDHADDR_HI) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_HI)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_TBDR_BHADDR_LO) " "h/w buffer descriptor address\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_TBDR_BHADDR_LO)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BIDX) host " "producer index\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BIDX)); BCE_PRINTF(" 0x%08X - (L2CTX_TX_HOST_BSEQ) host byte " "sequence\n", CTX_RD(sc, GET_CID_ADDR(cid), BCE_L2CTX_TX_HOST_BSEQ)); } } BCE_PRINTF( "----------------------------" " Raw CTX " "----------------------------\n"); for (int i = 0x0; i < 0x300; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, CTX_RD(sc, GET_CID_ADDR(cid), i), CTX_RD(sc, GET_CID_ADDR(cid), i + 0x4), CTX_RD(sc, GET_CID_ADDR(cid), i + 0x8), CTX_RD(sc, GET_CID_ADDR(cid), i + 0xc)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the FTQ data. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_ftqs(struct bce_softc *sc) { u32 cmd, ctl, cur_depth, max_depth, valid_cnt, val; BCE_PRINTF( "----------------------------" " FTQ Data " "----------------------------\n"); BCE_PRINTF(" FTQ Command Control Depth_Now " "Max_Depth Valid_Cnt \n"); BCE_PRINTF(" ------- ---------- ---------- ---------- " "---------- ----------\n"); /* Setup the generic statistic counters for the FTQ valid count. */ val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_1, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_2, val); val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT << 24) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT); REG_WR(sc, BCE_HC_STAT_GEN_SEL_3, val); /* Input queue to the Receive Lookup state machine */ cmd = REG_RD(sc, BCE_RLUP_FTQ_CMD); ctl = REG_RD(sc, BCE_RLUP_FTQ_CTL); cur_depth = (ctl & BCE_RLUP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RLUP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); BCE_PRINTF(" RLUP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Processor */ cmd = REG_RD_IND(sc, BCE_RXP_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_RXP_FTQ_CTL); cur_depth = (ctl & BCE_RXP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RXP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); BCE_PRINTF(" RXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Recevie Processor */ cmd = REG_RD_IND(sc, BCE_RXP_CFTQ_CMD); ctl = REG_RD_IND(sc, BCE_RXP_CFTQ_CTL); cur_depth = (ctl & BCE_RXP_CFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RXP_CFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); BCE_PRINTF(" RXPC 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_PFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_PFTQ_CTL); cur_depth = (ctl & BCE_RV2P_PFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_PFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); BCE_PRINTF(" RV2PP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Recevie Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_MFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_MFTQ_CTL); cur_depth = (ctl & BCE_RV2P_MFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_MFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT4); BCE_PRINTF(" RV2PM 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive Virtual to Physical state machine */ cmd = REG_RD(sc, BCE_RV2P_TFTQ_CMD); ctl = REG_RD(sc, BCE_RV2P_TFTQ_CTL); cur_depth = (ctl & BCE_RV2P_TFTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RV2P_TFTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT5); BCE_PRINTF(" RV2PT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Receive DMA state machine */ cmd = REG_RD(sc, BCE_RDMA_FTQ_CMD); ctl = REG_RD(sc, BCE_RDMA_FTQ_CTL); cur_depth = (ctl & BCE_RDMA_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_RDMA_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT6); BCE_PRINTF(" RDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Scheduler state machine */ cmd = REG_RD(sc, BCE_TSCH_FTQ_CMD); ctl = REG_RD(sc, BCE_TSCH_FTQ_CTL); cur_depth = (ctl & BCE_TSCH_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TSCH_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT7); BCE_PRINTF(" TSCH 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Buffer Descriptor state machine */ cmd = REG_RD(sc, BCE_TBDR_FTQ_CMD); ctl = REG_RD(sc, BCE_TBDR_FTQ_CTL); cur_depth = (ctl & BCE_TBDR_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TBDR_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT8); BCE_PRINTF(" TBDR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Processor */ cmd = REG_RD_IND(sc, BCE_TXP_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TXP_FTQ_CTL); cur_depth = (ctl & BCE_TXP_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TXP_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT9); BCE_PRINTF(" TXP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit DMA state machine */ cmd = REG_RD(sc, BCE_TDMA_FTQ_CMD); ctl = REG_RD(sc, BCE_TDMA_FTQ_CTL); cur_depth = (ctl & BCE_TDMA_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TDMA_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT10); BCE_PRINTF(" TDMA 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Patch-Up Processor */ cmd = REG_RD_IND(sc, BCE_TPAT_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TPAT_FTQ_CTL); cur_depth = (ctl & BCE_TPAT_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TPAT_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT11); BCE_PRINTF(" TPAT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Transmit Assembler state machine */ cmd = REG_RD_IND(sc, BCE_TAS_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_TAS_FTQ_CTL); cur_depth = (ctl & BCE_TAS_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_TAS_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT12); BCE_PRINTF(" TAS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMXQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMXQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT13); BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMTQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMTQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT14); BCE_PRINTF(" COMT 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Processor */ cmd = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_COM_COMQ_FTQ_CTL); cur_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_COM_COMQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT15); BCE_PRINTF(" COMX 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Setup the generic statistic counters for the FTQ valid count. */ val = (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT << 16) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT << 8) | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) val = val | (BCE_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI << 24); REG_WR(sc, BCE_HC_STAT_GEN_SEL_0, val); /* Input queue to the Management Control Processor */ cmd = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_MCP_MCPQ_FTQ_CTL); cur_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_MCP_MCPQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT0); BCE_PRINTF(" MCP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Command Processor */ cmd = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CMD); ctl = REG_RD_IND(sc, BCE_CP_CPQ_FTQ_CTL); cur_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_CP_CPQ_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT1); BCE_PRINTF(" CP 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); /* Input queue to the Completion Scheduler state machine */ cmd = REG_RD(sc, BCE_CSCH_CH_FTQ_CMD); ctl = REG_RD(sc, BCE_CSCH_CH_FTQ_CTL); cur_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_CUR_DEPTH) >> 22; max_depth = (ctl & BCE_CSCH_CH_FTQ_CTL_MAX_DEPTH) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT2); BCE_PRINTF(" CS 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) { /* Input queue to the RV2P Command Scheduler */ cmd = REG_RD(sc, BCE_RV2PCSR_FTQ_CMD); ctl = REG_RD(sc, BCE_RV2PCSR_FTQ_CTL); cur_depth = (ctl & 0xFFC00000) >> 22; max_depth = (ctl & 0x003FF000) >> 12; valid_cnt = REG_RD(sc, BCE_HC_STAT_GEN_STAT3); BCE_PRINTF(" RV2PCSR 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd, ctl, cur_depth, max_depth, valid_cnt); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TX chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tx_chain(struct bce_softc *sc, u16 tx_prod, int count) { struct tx_bd *txbd; /* First some info about the tx_bd chain structure. */ BCE_PRINTF( "----------------------------" " tx_bd chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, tx chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->tx_pages); BCE_PRINTF("tx_bd per page = 0x%08X, usable tx_bd per page = 0x%08X\n", (u32) TOTAL_TX_BD_PER_PAGE, (u32) USABLE_TX_BD_PER_PAGE); BCE_PRINTF("total tx_bd = 0x%08X\n", (u32) TOTAL_TX_BD_ALLOC); BCE_PRINTF( "----------------------------" " tx_bd data " "----------------------------\n"); /* Now print out a decoded list of TX buffer descriptors. */ for (int i = 0; i < count; i++) { txbd = &sc->tx_bd_chain[TX_PAGE(tx_prod)][TX_IDX(tx_prod)]; bce_dump_txbd(sc, tx_prod, txbd); tx_prod++; } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the RX chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rx_bd_chain(struct bce_softc *sc, u16 rx_prod, int count) { struct rx_bd *rxbd; /* First some info about the rx_bd chain structure. */ BCE_PRINTF( "----------------------------" " rx_bd chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, rx chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->rx_pages); BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", (u32) TOTAL_RX_BD_PER_PAGE, (u32) USABLE_RX_BD_PER_PAGE); BCE_PRINTF("total rx_bd = 0x%08X\n", (u32) TOTAL_RX_BD_ALLOC); BCE_PRINTF( "----------------------------" " rx_bd data " "----------------------------\n"); /* Now print out the rx_bd's themselves. */ for (int i = 0; i < count; i++) { rxbd = &sc->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]; bce_dump_rxbd(sc, rx_prod, rxbd); rx_prod = RX_CHAIN_IDX(rx_prod + 1); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the page chain. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_pg_chain(struct bce_softc *sc, u16 pg_prod, int count) { struct rx_bd *pgbd; /* First some info about the page chain structure. */ BCE_PRINTF( "----------------------------" " page chain " "----------------------------\n"); BCE_PRINTF("page size = 0x%08X, pg chain pages = 0x%08X\n", (u32) BCM_PAGE_SIZE, (u32) sc->pg_pages); BCE_PRINTF("rx_bd per page = 0x%08X, usable rx_bd per page = 0x%08X\n", (u32) TOTAL_PG_BD_PER_PAGE, (u32) USABLE_PG_BD_PER_PAGE); BCE_PRINTF("total pg_bd = 0x%08X\n", (u32) TOTAL_PG_BD_ALLOC); BCE_PRINTF( "----------------------------" " page data " "----------------------------\n"); /* Now print out the rx_bd's themselves. */ for (int i = 0; i < count; i++) { pgbd = &sc->pg_bd_chain[PG_PAGE(pg_prod)][PG_IDX(pg_prod)]; bce_dump_pgbd(sc, pg_prod, pgbd); pg_prod = PG_CHAIN_IDX(pg_prod + 1); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } #define BCE_PRINT_RX_CONS(arg) \ if (sblk->status_rx_quick_consumer_index##arg) \ BCE_PRINTF("0x%04X(0x%04X) - rx_quick_consumer_index%d\n", \ sblk->status_rx_quick_consumer_index##arg, (u16) \ RX_CHAIN_IDX(sblk->status_rx_quick_consumer_index##arg), \ arg); #define BCE_PRINT_TX_CONS(arg) \ if (sblk->status_tx_quick_consumer_index##arg) \ BCE_PRINTF("0x%04X(0x%04X) - tx_quick_consumer_index%d\n", \ sblk->status_tx_quick_consumer_index##arg, (u16) \ TX_CHAIN_IDX(sblk->status_tx_quick_consumer_index##arg), \ arg); /****************************************************************************/ /* Prints out the status block from host memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_status_block(struct bce_softc *sc) { struct status_block *sblk; bus_dmamap_sync(sc->status_tag, sc->status_map, BUS_DMASYNC_POSTREAD); sblk = sc->status_block; BCE_PRINTF( "----------------------------" " Status Block " "----------------------------\n"); /* Theses indices are used for normal L2 drivers. */ BCE_PRINTF(" 0x%08X - attn_bits\n", sblk->status_attn_bits); BCE_PRINTF(" 0x%08X - attn_bits_ack\n", sblk->status_attn_bits_ack); BCE_PRINT_RX_CONS(0); BCE_PRINT_TX_CONS(0) BCE_PRINTF(" 0x%04X - status_idx\n", sblk->status_idx); /* Theses indices are not used for normal L2 drivers. */ BCE_PRINT_RX_CONS(1); BCE_PRINT_RX_CONS(2); BCE_PRINT_RX_CONS(3); BCE_PRINT_RX_CONS(4); BCE_PRINT_RX_CONS(5); BCE_PRINT_RX_CONS(6); BCE_PRINT_RX_CONS(7); BCE_PRINT_RX_CONS(8); BCE_PRINT_RX_CONS(9); BCE_PRINT_RX_CONS(10); BCE_PRINT_RX_CONS(11); BCE_PRINT_RX_CONS(12); BCE_PRINT_RX_CONS(13); BCE_PRINT_RX_CONS(14); BCE_PRINT_RX_CONS(15); BCE_PRINT_TX_CONS(1); BCE_PRINT_TX_CONS(2); BCE_PRINT_TX_CONS(3); if (sblk->status_completion_producer_index || sblk->status_cmd_consumer_index) BCE_PRINTF("com_prod = 0x%08X, cmd_cons = 0x%08X\n", sblk->status_completion_producer_index, sblk->status_cmd_consumer_index); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } #define BCE_PRINT_64BIT_STAT(arg) \ if (sblk->arg##_lo || sblk->arg##_hi) \ BCE_PRINTF("0x%08X:%08X : %s\n", sblk->arg##_hi, \ sblk->arg##_lo, #arg); #define BCE_PRINT_32BIT_STAT(arg) \ if (sblk->arg) \ BCE_PRINTF(" 0x%08X : %s\n", \ sblk->arg, #arg); /****************************************************************************/ /* Prints out the statistics block from host memory. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_stats_block(struct bce_softc *sc) { struct statistics_block *sblk; bus_dmamap_sync(sc->stats_tag, sc->stats_map, BUS_DMASYNC_POSTREAD); sblk = sc->stats_block; BCE_PRINTF( "---------------" " Stats Block (All Stats Not Shown Are 0) " "---------------\n"); BCE_PRINT_64BIT_STAT(stat_IfHCInOctets); BCE_PRINT_64BIT_STAT(stat_IfHCInBadOctets); BCE_PRINT_64BIT_STAT(stat_IfHCOutOctets); BCE_PRINT_64BIT_STAT(stat_IfHCOutBadOctets); BCE_PRINT_64BIT_STAT(stat_IfHCInUcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCInBroadcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCInMulticastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutUcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutBroadcastPkts); BCE_PRINT_64BIT_STAT(stat_IfHCOutMulticastPkts); BCE_PRINT_32BIT_STAT( stat_emac_tx_stat_dot3statsinternalmactransmiterrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsCarrierSenseErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsFCSErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsAlignmentErrors); BCE_PRINT_32BIT_STAT(stat_Dot3StatsSingleCollisionFrames); BCE_PRINT_32BIT_STAT(stat_Dot3StatsMultipleCollisionFrames); BCE_PRINT_32BIT_STAT(stat_Dot3StatsDeferredTransmissions); BCE_PRINT_32BIT_STAT(stat_Dot3StatsExcessiveCollisions); BCE_PRINT_32BIT_STAT(stat_Dot3StatsLateCollisions); BCE_PRINT_32BIT_STAT(stat_EtherStatsCollisions); BCE_PRINT_32BIT_STAT(stat_EtherStatsFragments); BCE_PRINT_32BIT_STAT(stat_EtherStatsJabbers); BCE_PRINT_32BIT_STAT(stat_EtherStatsUndersizePkts); BCE_PRINT_32BIT_STAT(stat_EtherStatsOversizePkts); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx64Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx65Octetsto127Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx128Octetsto255Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx256Octetsto511Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx512Octetsto1023Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1024Octetsto1522Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsRx1523Octetsto9022Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx64Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx65Octetsto127Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx128Octetsto255Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx256Octetsto511Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx512Octetsto1023Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1024Octetsto1522Octets); BCE_PRINT_32BIT_STAT(stat_EtherStatsPktsTx1523Octetsto9022Octets); BCE_PRINT_32BIT_STAT(stat_XonPauseFramesReceived); BCE_PRINT_32BIT_STAT(stat_XoffPauseFramesReceived); BCE_PRINT_32BIT_STAT(stat_OutXonSent); BCE_PRINT_32BIT_STAT(stat_OutXoffSent); BCE_PRINT_32BIT_STAT(stat_FlowControlDone); BCE_PRINT_32BIT_STAT(stat_MacControlFramesReceived); BCE_PRINT_32BIT_STAT(stat_XoffStateEntered); BCE_PRINT_32BIT_STAT(stat_IfInFramesL2FilterDiscards); BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerDiscards); BCE_PRINT_32BIT_STAT(stat_IfInFTQDiscards); BCE_PRINT_32BIT_STAT(stat_IfInMBUFDiscards); BCE_PRINT_32BIT_STAT(stat_IfInRuleCheckerP4Hit); BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInFTQDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInMBUFDiscards); BCE_PRINT_32BIT_STAT(stat_CatchupInRuleCheckerP4Hit); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out a summary of the driver state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_driver_state(struct bce_softc *sc) { u32 val_hi, val_lo; BCE_PRINTF( "-----------------------------" " Driver State " "-----------------------------\n"); val_hi = BCE_ADDR_HI(sc); val_lo = BCE_ADDR_LO(sc); BCE_PRINTF("0x%08X:%08X - (sc) driver softc structure virtual " "address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->bce_vhandle); val_lo = BCE_ADDR_LO(sc->bce_vhandle); BCE_PRINTF("0x%08X:%08X - (sc->bce_vhandle) PCI BAR virtual " "address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->status_block); val_lo = BCE_ADDR_LO(sc->status_block); BCE_PRINTF("0x%08X:%08X - (sc->status_block) status block " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->stats_block); val_lo = BCE_ADDR_LO(sc->stats_block); BCE_PRINTF("0x%08X:%08X - (sc->stats_block) statistics block " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->tx_bd_chain); val_lo = BCE_ADDR_LO(sc->tx_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->tx_bd_chain) tx_bd chain " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->rx_bd_chain); val_lo = BCE_ADDR_LO(sc->rx_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->rx_bd_chain) rx_bd chain " "virtual address\n", val_hi, val_lo); if (bce_hdr_split == TRUE) { val_hi = BCE_ADDR_HI(sc->pg_bd_chain); val_lo = BCE_ADDR_LO(sc->pg_bd_chain); BCE_PRINTF("0x%08X:%08X - (sc->pg_bd_chain) page chain " "virtual address\n", val_hi, val_lo); } val_hi = BCE_ADDR_HI(sc->tx_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->tx_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->tx_mbuf_ptr) tx mbuf chain " "virtual address\n", val_hi, val_lo); val_hi = BCE_ADDR_HI(sc->rx_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->rx_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->rx_mbuf_ptr) rx mbuf chain " "virtual address\n", val_hi, val_lo); if (bce_hdr_split == TRUE) { val_hi = BCE_ADDR_HI(sc->pg_mbuf_ptr); val_lo = BCE_ADDR_LO(sc->pg_mbuf_ptr); BCE_PRINTF("0x%08X:%08X - (sc->pg_mbuf_ptr) page mbuf chain " "virtual address\n", val_hi, val_lo); } BCE_PRINTF(" 0x%016llX - (sc->interrupts_generated) " "h/w intrs\n", (long long unsigned int) sc->interrupts_generated); BCE_PRINTF(" 0x%016llX - (sc->interrupts_rx) " "rx interrupts handled\n", (long long unsigned int) sc->interrupts_rx); BCE_PRINTF(" 0x%016llX - (sc->interrupts_tx) " "tx interrupts handled\n", (long long unsigned int) sc->interrupts_tx); BCE_PRINTF(" 0x%016llX - (sc->phy_interrupts) " "phy interrupts handled\n", (long long unsigned int) sc->phy_interrupts); BCE_PRINTF(" 0x%08X - (sc->last_status_idx) " "status block index\n", sc->last_status_idx); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_prod) tx producer " "index\n", sc->tx_prod, (u16) TX_CHAIN_IDX(sc->tx_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->tx_cons) tx consumer " "index\n", sc->tx_cons, (u16) TX_CHAIN_IDX(sc->tx_cons)); BCE_PRINTF(" 0x%08X - (sc->tx_prod_bseq) tx producer " "byte seq index\n", sc->tx_prod_bseq); BCE_PRINTF(" 0x%08X - (sc->debug_tx_mbuf_alloc) tx " "mbufs allocated\n", sc->debug_tx_mbuf_alloc); BCE_PRINTF(" 0x%08X - (sc->used_tx_bd) used " "tx_bd's\n", sc->used_tx_bd); BCE_PRINTF(" 0x%04X/0x%04X - (sc->tx_hi_watermark)/" "(sc->max_tx_bd)\n", sc->tx_hi_watermark, sc->max_tx_bd); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_prod) rx producer " "index\n", sc->rx_prod, (u16) RX_CHAIN_IDX(sc->rx_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->rx_cons) rx consumer " "index\n", sc->rx_cons, (u16) RX_CHAIN_IDX(sc->rx_cons)); BCE_PRINTF(" 0x%08X - (sc->rx_prod_bseq) rx producer " "byte seq index\n", sc->rx_prod_bseq); BCE_PRINTF(" 0x%04X/0x%04X - (sc->rx_low_watermark)/" "(sc->max_rx_bd)\n", sc->rx_low_watermark, sc->max_rx_bd); BCE_PRINTF(" 0x%08X - (sc->debug_rx_mbuf_alloc) rx " "mbufs allocated\n", sc->debug_rx_mbuf_alloc); BCE_PRINTF(" 0x%08X - (sc->free_rx_bd) free " "rx_bd's\n", sc->free_rx_bd); if (bce_hdr_split == TRUE) { BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_prod) page producer " "index\n", sc->pg_prod, (u16) PG_CHAIN_IDX(sc->pg_prod)); BCE_PRINTF(" 0x%04X(0x%04X) - (sc->pg_cons) page consumer " "index\n", sc->pg_cons, (u16) PG_CHAIN_IDX(sc->pg_cons)); BCE_PRINTF(" 0x%08X - (sc->debug_pg_mbuf_alloc) page " "mbufs allocated\n", sc->debug_pg_mbuf_alloc); } BCE_PRINTF(" 0x%08X - (sc->free_pg_bd) free page " "rx_bd's\n", sc->free_pg_bd); BCE_PRINTF(" 0x%04X/0x%04X - (sc->pg_low_watermark)/" "(sc->max_pg_bd)\n", sc->pg_low_watermark, sc->max_pg_bd); BCE_PRINTF(" 0x%08X - (sc->mbuf_alloc_failed_count) " "mbuf alloc failures\n", sc->mbuf_alloc_failed_count); BCE_PRINTF(" 0x%08X - (sc->bce_flags) " "bce mac flags\n", sc->bce_flags); BCE_PRINTF(" 0x%08X - (sc->bce_phy_flags) " "bce phy flags\n", sc->bce_phy_flags); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the hardware state through a summary of important register, */ /* followed by a complete register dump. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_hw_state(struct bce_softc *sc) { u32 val; BCE_PRINTF( "----------------------------" " Hardware State " "----------------------------\n"); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); val = REG_RD(sc, BCE_MISC_ENABLE_STATUS_BITS); BCE_PRINTF("0x%08X - (0x%06X) misc_enable_status_bits\n", val, BCE_MISC_ENABLE_STATUS_BITS); val = REG_RD(sc, BCE_DMA_STATUS); BCE_PRINTF("0x%08X - (0x%06X) dma_status\n", val, BCE_DMA_STATUS); val = REG_RD(sc, BCE_CTX_STATUS); BCE_PRINTF("0x%08X - (0x%06X) ctx_status\n", val, BCE_CTX_STATUS); val = REG_RD(sc, BCE_EMAC_STATUS); BCE_PRINTF("0x%08X - (0x%06X) emac_status\n", val, BCE_EMAC_STATUS); val = REG_RD(sc, BCE_RPM_STATUS); BCE_PRINTF("0x%08X - (0x%06X) rpm_status\n", val, BCE_RPM_STATUS); /* ToDo: Create a #define for this constant. */ val = REG_RD(sc, 0x2004); BCE_PRINTF("0x%08X - (0x%06X) rlup_status\n", val, 0x2004); val = REG_RD(sc, BCE_RV2P_STATUS); BCE_PRINTF("0x%08X - (0x%06X) rv2p_status\n", val, BCE_RV2P_STATUS); /* ToDo: Create a #define for this constant. */ val = REG_RD(sc, 0x2c04); BCE_PRINTF("0x%08X - (0x%06X) rdma_status\n", val, 0x2c04); val = REG_RD(sc, BCE_TBDR_STATUS); BCE_PRINTF("0x%08X - (0x%06X) tbdr_status\n", val, BCE_TBDR_STATUS); val = REG_RD(sc, BCE_TDMA_STATUS); BCE_PRINTF("0x%08X - (0x%06X) tdma_status\n", val, BCE_TDMA_STATUS); val = REG_RD(sc, BCE_HC_STATUS); BCE_PRINTF("0x%08X - (0x%06X) hc_status\n", val, BCE_HC_STATUS); val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE); val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE); val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE); val = REG_RD_IND(sc, BCE_COM_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE); val = REG_RD_IND(sc, BCE_MCP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) mcp_cpu_state\n", val, BCE_MCP_CPU_STATE); val = REG_RD_IND(sc, BCE_CP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = 0x400; i < 0x8000; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, REG_RD(sc, i), REG_RD(sc, i + 0x4), REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the contentst of shared memory which is used for host driver */ /* to bootcode firmware communication. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_shmem_state(struct bce_softc *sc) { BCE_PRINTF( "----------------------------" " Hardware State " "----------------------------\n"); BCE_PRINTF("0x%08X - Shared memory base address\n", sc->bce_shmem_base); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); BCE_PRINTF( "----------------------------" " Shared Mem " "----------------------------\n"); for (int i = 0x0; i < 0x200; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, bce_shmem_rd(sc, i), bce_shmem_rd(sc, i + 0x4), bce_shmem_rd(sc, i + 0x8), bce_shmem_rd(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the mailbox queue registers. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_mq_regs(struct bce_softc *sc) { BCE_PRINTF( "----------------------------" " MQ Regs " "----------------------------\n"); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); for (int i = 0x3c00; i < 0x4000; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, REG_RD(sc, i), REG_RD(sc, i + 0x4), REG_RD(sc, i + 0x8), REG_RD(sc, i + 0xC)); } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the bootcode state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_bc_state(struct bce_softc *sc) { u32 val; BCE_PRINTF( "----------------------------" " Bootcode State " "----------------------------\n"); BCE_PRINTF("%s - bootcode version\n", sc->bce_bc_ver); val = bce_shmem_rd(sc, BCE_BC_RESET_TYPE); BCE_PRINTF("0x%08X - (0x%06X) reset_type\n", val, BCE_BC_RESET_TYPE); val = bce_shmem_rd(sc, BCE_BC_STATE); BCE_PRINTF("0x%08X - (0x%06X) state\n", val, BCE_BC_STATE); val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION); BCE_PRINTF("0x%08X - (0x%06X) condition\n", val, BCE_BC_STATE_CONDITION); val = bce_shmem_rd(sc, BCE_BC_STATE_DEBUG_CMD); BCE_PRINTF("0x%08X - (0x%06X) debug_cmd\n", val, BCE_BC_STATE_DEBUG_CMD); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TXP processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_txp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " TXP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_TXP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_TXP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_mode\n", val, BCE_TXP_CPU_MODE); val = REG_RD_IND(sc, BCE_TXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_state\n", val, BCE_TXP_CPU_STATE); val = REG_RD_IND(sc, BCE_TXP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) txp_cpu_event_mask\n", val, BCE_TXP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_TXP_CPU_MODE; i < 0x68000; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x454000 && i > 0x5ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the RXP processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rxp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " RXP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_RXP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_RXP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_mode\n", val, BCE_RXP_CPU_MODE); val = REG_RD_IND(sc, BCE_RXP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_state\n", val, BCE_RXP_CPU_STATE); val = REG_RD_IND(sc, BCE_RXP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) rxp_cpu_event_mask\n", val, BCE_RXP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) { /* Skip the big blank sapces */ if (i < 0xc5400 && i > 0xdffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the TPAT processor state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_tpat_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " TPAT State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_TPAT_SCRATCH + 0x410 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_TPAT_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_mode\n", val, BCE_TPAT_CPU_MODE); val = REG_RD_IND(sc, BCE_TPAT_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_state\n", val, BCE_TPAT_CPU_STATE); val = REG_RD_IND(sc, BCE_TPAT_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) tpat_cpu_event_mask\n", val, BCE_TPAT_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_TPAT_CPU_MODE; i < 0xa3fff; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x854000 && i > 0x9ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Command Procesor (CP) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_cp_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[3]; BCE_PRINTF( "----------------------------" " CP State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_CP_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_CP_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_mode\n", val, BCE_CP_CPU_MODE); val = REG_RD_IND(sc, BCE_CP_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_state\n", val, BCE_CP_CPU_STATE); val = REG_RD_IND(sc, BCE_CP_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) cp_cpu_event_mask\n", val, BCE_CP_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) { /* Skip the big blank spaces */ if (i < 0x185400 && i > 0x19ffff) BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Completion Procesor (COM) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_com_state(struct bce_softc *sc, int regs) { u32 val; u32 fw_version[4]; BCE_PRINTF( "----------------------------" " COM State " "----------------------------\n"); for (int i = 0; i < 3; i++) fw_version[i] = htonl(REG_RD_IND(sc, (BCE_COM_SCRATCH + 0x10 + i * 4))); BCE_PRINTF("Firmware version - %s\n", (char *) fw_version); val = REG_RD_IND(sc, BCE_COM_CPU_MODE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_mode\n", val, BCE_COM_CPU_MODE); val = REG_RD_IND(sc, BCE_COM_CPU_STATE); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_state\n", val, BCE_COM_CPU_STATE); val = REG_RD_IND(sc, BCE_COM_CPU_EVENT_MASK); BCE_PRINTF("0x%08X - (0x%06X) com_cpu_event_mask\n", val, BCE_COM_CPU_EVENT_MASK); if (regs) { BCE_PRINTF( "----------------------------" " Register Dump " "----------------------------\n"); for (int i = BCE_COM_CPU_MODE; i < 0x1053e8; i += 0x10) { BCE_PRINTF("0x%04X: 0x%08X 0x%08X " "0x%08X 0x%08X\n", i, REG_RD_IND(sc, i), REG_RD_IND(sc, i + 0x4), REG_RD_IND(sc, i + 0x8), REG_RD_IND(sc, i + 0xC)); } } BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the Receive Virtual 2 Physical (RV2P) state. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_dump_rv2p_state(struct bce_softc *sc) { u32 val, pc1, pc2, fw_ver_high, fw_ver_low; BCE_PRINTF( "----------------------------" " RV2P State " "----------------------------\n"); /* Stall the RV2P processors. */ val = REG_RD_IND(sc, BCE_RV2P_CONFIG); val |= BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2; REG_WR_IND(sc, BCE_RV2P_CONFIG, val); /* Read the firmware version. */ val = 0x00000001; REG_WR_IND(sc, BCE_RV2P_PROC1_ADDR_CMD, val); fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & BCE_RV2P_INSTR_HIGH_HIGH; BCE_PRINTF("RV2P1 Firmware version - 0x%08X:0x%08X\n", fw_ver_high, fw_ver_low); val = 0x00000001; REG_WR_IND(sc, BCE_RV2P_PROC2_ADDR_CMD, val); fw_ver_low = REG_RD_IND(sc, BCE_RV2P_INSTR_LOW); fw_ver_high = REG_RD_IND(sc, BCE_RV2P_INSTR_HIGH) & BCE_RV2P_INSTR_HIGH_HIGH; BCE_PRINTF("RV2P2 Firmware version - 0x%08X:0x%08X\n", fw_ver_high, fw_ver_low); /* Resume the RV2P processors. */ val = REG_RD_IND(sc, BCE_RV2P_CONFIG); val &= ~(BCE_RV2P_CONFIG_STALL_PROC1 | BCE_RV2P_CONFIG_STALL_PROC2); REG_WR_IND(sc, BCE_RV2P_CONFIG, val); /* Fetch the program counter value. */ val = 0x68007800; REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; BCE_PRINTF("0x%08X - RV2P1 program counter (1st read)\n", pc1); BCE_PRINTF("0x%08X - RV2P2 program counter (1st read)\n", pc2); /* Fetch the program counter value again to see if it is advancing. */ val = 0x68007800; REG_WR_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK, val); val = REG_RD_IND(sc, BCE_RV2P_DEBUG_VECT_PEEK); pc1 = (val & BCE_RV2P_DEBUG_VECT_PEEK_1_VALUE); pc2 = (val & BCE_RV2P_DEBUG_VECT_PEEK_2_VALUE) >> 16; BCE_PRINTF("0x%08X - RV2P1 program counter (2nd read)\n", pc1); BCE_PRINTF("0x%08X - RV2P2 program counter (2nd read)\n", pc2); BCE_PRINTF( "----------------------------" "----------------" "----------------------------\n"); } /****************************************************************************/ /* Prints out the driver state and then enters the debugger. */ /* */ /* Returns: */ /* Nothing. */ /****************************************************************************/ static __attribute__ ((noinline)) void bce_breakpoint(struct bce_softc *sc) { /* * Unreachable code to silence compiler warnings * about unused functions. */ if (0) { bce_freeze_controller(sc); bce_unfreeze_controller(sc); bce_dump_enet(sc, NULL); bce_dump_txbd(sc, 0, NULL); bce_dump_rxbd(sc, 0, NULL); bce_dump_tx_mbuf_chain(sc, 0, USABLE_TX_BD_ALLOC); bce_dump_rx_mbuf_chain(sc, 0, USABLE_RX_BD_ALLOC); bce_dump_pg_mbuf_chain(sc, 0, USABLE_PG_BD_ALLOC); bce_dump_l2fhdr(sc, 0, NULL); bce_dump_ctx(sc, RX_CID); bce_dump_ftqs(sc); bce_dump_tx_chain(sc, 0, USABLE_TX_BD_ALLOC); bce_dump_rx_bd_chain(sc, 0, USABLE_RX_BD_ALLOC); bce_dump_pg_chain(sc, 0, USABLE_PG_BD_ALLOC); bce_dump_status_block(sc); bce_dump_stats_block(sc); bce_dump_driver_state(sc); bce_dump_hw_state(sc); bce_dump_bc_state(sc); bce_dump_txp_state(sc, 0); bce_dump_rxp_state(sc, 0); bce_dump_tpat_state(sc, 0); bce_dump_cp_state(sc, 0); bce_dump_com_state(sc, 0); bce_dump_rv2p_state(sc); bce_dump_pgbd(sc, 0, NULL); } bce_dump_status_block(sc); bce_dump_driver_state(sc); /* Call the debugger. */ breakpoint(); } #endif diff --git a/sys/dev/bfe/if_bfe.c b/sys/dev/bfe/if_bfe.c index 817c867862d0..2fb6938fbdc5 100644 --- a/sys/dev/bfe/if_bfe.c +++ b/sys/dev/bfe/if_bfe.c @@ -1,1958 +1,1956 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 Stuart Walsh * and Duncan Barclay * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(bfe, pci, 1, 1, 1); MODULE_DEPEND(bfe, ether, 1, 1, 1); MODULE_DEPEND(bfe, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define BFE_DEVDESC_MAX 64 /* Maximum device description length */ static struct bfe_type bfe_devs[] = { { BCOM_VENDORID, BCOM_DEVICEID_BCM4401, "Broadcom BCM4401 Fast Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0, "Broadcom BCM4401-B0 Fast Ethernet" }, { 0, 0, NULL } }; static int bfe_probe (device_t); static int bfe_attach (device_t); static int bfe_detach (device_t); static int bfe_suspend (device_t); static int bfe_resume (device_t); static void bfe_release_resources (struct bfe_softc *); static void bfe_intr (void *); static int bfe_encap (struct bfe_softc *, struct mbuf **); static void bfe_start (if_t); static void bfe_start_locked (if_t); static int bfe_ioctl (if_t, u_long, caddr_t); static void bfe_init (void *); static void bfe_init_locked (void *); static void bfe_stop (struct bfe_softc *); static void bfe_watchdog (struct bfe_softc *); static int bfe_shutdown (device_t); static void bfe_tick (void *); static void bfe_txeof (struct bfe_softc *); static void bfe_rxeof (struct bfe_softc *); static void bfe_set_rx_mode (struct bfe_softc *); static int bfe_list_rx_init (struct bfe_softc *); static void bfe_list_tx_init (struct bfe_softc *); static void bfe_discard_buf (struct bfe_softc *, int); static int bfe_list_newbuf (struct bfe_softc *, int); static void bfe_rx_ring_free (struct bfe_softc *); static void bfe_pci_setup (struct bfe_softc *, u_int32_t); static int bfe_ifmedia_upd (if_t); static void bfe_ifmedia_sts (if_t, struct ifmediareq *); static int bfe_miibus_readreg (device_t, int, int); static int bfe_miibus_writereg (device_t, int, int, int); static void bfe_miibus_statchg (device_t); static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t, u_long, const int); static void bfe_get_config (struct bfe_softc *sc); static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *); static void bfe_stats_update (struct bfe_softc *); static void bfe_clear_stats (struct bfe_softc *); static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*); static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t); static int bfe_resetphy (struct bfe_softc *); static int bfe_setupphy (struct bfe_softc *); static void bfe_chip_reset (struct bfe_softc *); static void bfe_chip_halt (struct bfe_softc *); static void bfe_core_reset (struct bfe_softc *); static void bfe_core_disable (struct bfe_softc *); static int bfe_dma_alloc (struct bfe_softc *); static void bfe_dma_free (struct bfe_softc *sc); static void bfe_dma_map (void *, bus_dma_segment_t *, int, int); static void bfe_cam_write (struct bfe_softc *, u_char *, int); static int sysctl_bfe_stats (SYSCTL_HANDLER_ARGS); static device_method_t bfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bfe_probe), DEVMETHOD(device_attach, bfe_attach), DEVMETHOD(device_detach, bfe_detach), DEVMETHOD(device_shutdown, bfe_shutdown), DEVMETHOD(device_suspend, bfe_suspend), DEVMETHOD(device_resume, bfe_resume), /* MII interface */ DEVMETHOD(miibus_readreg, bfe_miibus_readreg), DEVMETHOD(miibus_writereg, bfe_miibus_writereg), DEVMETHOD(miibus_statchg, bfe_miibus_statchg), DEVMETHOD_END }; static driver_t bfe_driver = { "bfe", bfe_methods, sizeof(struct bfe_softc) }; DRIVER_MODULE(bfe, pci, bfe_driver, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, bfe, bfe_devs, nitems(bfe_devs) - 1); DRIVER_MODULE(miibus, bfe, miibus_driver, 0, 0); /* * Probe for a Broadcom 4401 chip. */ static int bfe_probe(device_t dev) { struct bfe_type *t; t = bfe_devs; while (t->bfe_name != NULL) { if (pci_get_vendor(dev) == t->bfe_vid && pci_get_device(dev) == t->bfe_did) { device_set_desc(dev, t->bfe_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } struct bfe_dmamap_arg { bus_addr_t bfe_busaddr; }; static int bfe_dma_alloc(struct bfe_softc *sc) { struct bfe_dmamap_arg ctx; struct bfe_rx_data *rd; struct bfe_tx_data *td; int error, i; /* * parent tag. Apparently the chip cannot handle any DMA address * greater than 1GB. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bfe_dev), /* parent */ 1, 0, /* alignment, boundary */ BFE_DMA_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_parent_tag); if (error != 0) { device_printf(sc->bfe_dev, "cannot create parent DMA tag.\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ BFE_TX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BFE_TX_LIST_SIZE, /* maxsize */ 1, /* nsegments */ BFE_TX_LIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_tx_tag); if (error != 0) { device_printf(sc->bfe_dev, "cannot create Tx ring DMA tag.\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ BFE_RX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BFE_RX_LIST_SIZE, /* maxsize */ 1, /* nsegments */ BFE_RX_LIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_rx_tag); if (error != 0) { device_printf(sc->bfe_dev, "cannot create Rx ring DMA tag.\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * BFE_MAXTXSEGS, /* maxsize */ BFE_MAXTXSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_txmbuf_tag); if (error != 0) { device_printf(sc->bfe_dev, "cannot create Tx buffer DMA tag.\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_rxmbuf_tag); if (error != 0) { device_printf(sc->bfe_dev, "cannot create Rx buffer DMA tag.\n"); goto fail; } /* Allocate DMA'able memory and load DMA map. */ error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_tx_map); if (error != 0) { device_printf(sc->bfe_dev, "cannot allocate DMA'able memory for Tx ring.\n"); goto fail; } ctx.bfe_busaddr = 0; error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, sc->bfe_tx_list, BFE_TX_LIST_SIZE, bfe_dma_map, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.bfe_busaddr == 0) { device_printf(sc->bfe_dev, "cannot load DMA'able memory for Tx ring.\n"); goto fail; } sc->bfe_tx_dma = BFE_ADDR_LO(ctx.bfe_busaddr); error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_rx_map); if (error != 0) { device_printf(sc->bfe_dev, "cannot allocate DMA'able memory for Rx ring.\n"); goto fail; } ctx.bfe_busaddr = 0; error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, sc->bfe_rx_list, BFE_RX_LIST_SIZE, bfe_dma_map, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.bfe_busaddr == 0) { device_printf(sc->bfe_dev, "cannot load DMA'able memory for Rx ring.\n"); goto fail; } sc->bfe_rx_dma = BFE_ADDR_LO(ctx.bfe_busaddr); /* Create DMA maps for Tx buffers. */ for (i = 0; i < BFE_TX_LIST_CNT; i++) { td = &sc->bfe_tx_ring[i]; td->bfe_mbuf = NULL; td->bfe_map = NULL; error = bus_dmamap_create(sc->bfe_txmbuf_tag, 0, &td->bfe_map); if (error != 0) { device_printf(sc->bfe_dev, "cannot create DMA map for Tx.\n"); goto fail; } } /* Create spare DMA map for Rx buffers. */ error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &sc->bfe_rx_sparemap); if (error != 0) { device_printf(sc->bfe_dev, "cannot create spare DMA map for Rx.\n"); goto fail; } /* Create DMA maps for Rx buffers. */ for (i = 0; i < BFE_RX_LIST_CNT; i++) { rd = &sc->bfe_rx_ring[i]; rd->bfe_mbuf = NULL; rd->bfe_map = NULL; rd->bfe_ctrl = 0; error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &rd->bfe_map); if (error != 0) { device_printf(sc->bfe_dev, "cannot create DMA map for Rx.\n"); goto fail; } } fail: return (error); } static void bfe_dma_free(struct bfe_softc *sc) { struct bfe_tx_data *td; struct bfe_rx_data *rd; int i; /* Tx ring. */ if (sc->bfe_tx_tag != NULL) { if (sc->bfe_tx_dma != 0) bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); if (sc->bfe_tx_list != NULL) bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, sc->bfe_tx_map); sc->bfe_tx_dma = 0; sc->bfe_tx_list = NULL; bus_dma_tag_destroy(sc->bfe_tx_tag); sc->bfe_tx_tag = NULL; } /* Rx ring. */ if (sc->bfe_rx_tag != NULL) { if (sc->bfe_rx_dma != 0) bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); if (sc->bfe_rx_list != NULL) bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, sc->bfe_rx_map); sc->bfe_rx_dma = 0; sc->bfe_rx_list = NULL; bus_dma_tag_destroy(sc->bfe_rx_tag); sc->bfe_rx_tag = NULL; } /* Tx buffers. */ if (sc->bfe_txmbuf_tag != NULL) { for (i = 0; i < BFE_TX_LIST_CNT; i++) { td = &sc->bfe_tx_ring[i]; if (td->bfe_map != NULL) { bus_dmamap_destroy(sc->bfe_txmbuf_tag, td->bfe_map); td->bfe_map = NULL; } } bus_dma_tag_destroy(sc->bfe_txmbuf_tag); sc->bfe_txmbuf_tag = NULL; } /* Rx buffers. */ if (sc->bfe_rxmbuf_tag != NULL) { for (i = 0; i < BFE_RX_LIST_CNT; i++) { rd = &sc->bfe_rx_ring[i]; if (rd->bfe_map != NULL) { bus_dmamap_destroy(sc->bfe_rxmbuf_tag, rd->bfe_map); rd->bfe_map = NULL; } } if (sc->bfe_rx_sparemap != NULL) { bus_dmamap_destroy(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap); sc->bfe_rx_sparemap = NULL; } bus_dma_tag_destroy(sc->bfe_rxmbuf_tag); sc->bfe_rxmbuf_tag = NULL; } if (sc->bfe_parent_tag != NULL) { bus_dma_tag_destroy(sc->bfe_parent_tag); sc->bfe_parent_tag = NULL; } } static int bfe_attach(device_t dev) { if_t ifp = NULL; struct bfe_softc *sc; int error = 0, rid; sc = device_get_softc(dev); mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->bfe_stat_co, &sc->bfe_mtx, 0); sc->bfe_dev = dev; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bfe_res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } /* Allocate interrupt */ rid = 0; sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bfe_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } if (bfe_dma_alloc(sc) != 0) { device_printf(dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, sysctl_bfe_stats, "I", "Statistics"); /* Set up ifnet structure */ ifp = sc->bfe_ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, bfe_ioctl); if_setstartfn(ifp, bfe_start); if_setinitfn(ifp, bfe_init); if_setsendqlen(ifp, BFE_TX_QLEN); if_setsendqready(ifp); bfe_get_config(sc); /* Reset the chip and turn on the PHY */ BFE_LOCK(sc); bfe_chip_reset(sc); BFE_UNLOCK(sc); error = mii_attach(dev, &sc->bfe_miibus, ifp, bfe_ifmedia_upd, bfe_ifmedia_sts, BMSR_DEFCAPMASK, sc->bfe_phyaddr, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->bfe_enaddr); /* * Tell the upper layer(s) we support long frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0); /* * Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bfe_intr, sc, &sc->bfe_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } fail: if (error != 0) bfe_detach(dev); return (error); } static int bfe_detach(device_t dev) { struct bfe_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bfe_ifp; if (device_is_attached(dev)) { BFE_LOCK(sc); sc->bfe_flags |= BFE_FLAG_DETACH; bfe_stop(sc); BFE_UNLOCK(sc); callout_drain(&sc->bfe_stat_co); if (ifp != NULL) ether_ifdetach(ifp); } BFE_LOCK(sc); bfe_chip_reset(sc); BFE_UNLOCK(sc); bus_generic_detach(dev); - if (sc->bfe_miibus != NULL) - device_delete_child(dev, sc->bfe_miibus); bfe_release_resources(sc); bfe_dma_free(sc); mtx_destroy(&sc->bfe_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bfe_shutdown(device_t dev) { struct bfe_softc *sc; sc = device_get_softc(dev); BFE_LOCK(sc); bfe_stop(sc); BFE_UNLOCK(sc); return (0); } static int bfe_suspend(device_t dev) { struct bfe_softc *sc; sc = device_get_softc(dev); BFE_LOCK(sc); bfe_stop(sc); BFE_UNLOCK(sc); return (0); } static int bfe_resume(device_t dev) { struct bfe_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bfe_ifp; BFE_LOCK(sc); bfe_chip_reset(sc); if (if_getflags(ifp) & IFF_UP) { bfe_init_locked(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !if_sendq_empty(ifp)) bfe_start_locked(ifp); } BFE_UNLOCK(sc); return (0); } static int bfe_miibus_readreg(device_t dev, int phy, int reg) { struct bfe_softc *sc; u_int32_t ret; sc = device_get_softc(dev); bfe_readphy(sc, reg, &ret); return (ret); } static int bfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bfe_softc *sc; sc = device_get_softc(dev); bfe_writephy(sc, reg, val); return (0); } static void bfe_miibus_statchg(device_t dev) { struct bfe_softc *sc; struct mii_data *mii; u_int32_t val; #ifdef notyet u_int32_t flow; #endif sc = device_get_softc(dev); mii = device_get_softc(sc->bfe_miibus); sc->bfe_flags &= ~BFE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->bfe_flags |= BFE_FLAG_LINK; break; default: break; } } /* XXX Should stop Rx/Tx engine prior to touching MAC. */ val = CSR_READ_4(sc, BFE_TX_CTRL); val &= ~BFE_TX_DUPLEX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= BFE_TX_DUPLEX; #ifdef notyet flow = CSR_READ_4(sc, BFE_RXCONF); flow &= ~BFE_RXCONF_FLOW; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) flow |= BFE_RXCONF_FLOW; CSR_WRITE_4(sc, BFE_RXCONF, flow); /* * It seems that the hardware has Tx pause issues * so enable only Rx pause. */ flow = CSR_READ_4(sc, BFE_MAC_FLOW); flow &= ~BFE_FLOW_PAUSE_ENAB; CSR_WRITE_4(sc, BFE_MAC_FLOW, flow); #endif } CSR_WRITE_4(sc, BFE_TX_CTRL, val); } static void bfe_tx_ring_free(struct bfe_softc *sc) { int i; for(i = 0; i < BFE_TX_LIST_CNT; i++) { if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) { bus_dmamap_sync(sc->bfe_txmbuf_tag, sc->bfe_tx_ring[i].bfe_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bfe_txmbuf_tag, sc->bfe_tx_ring[i].bfe_map); m_freem(sc->bfe_tx_ring[i].bfe_mbuf); sc->bfe_tx_ring[i].bfe_mbuf = NULL; } } bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void bfe_rx_ring_free(struct bfe_softc *sc) { int i; for (i = 0; i < BFE_RX_LIST_CNT; i++) { if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) { bus_dmamap_sync(sc->bfe_rxmbuf_tag, sc->bfe_rx_ring[i].bfe_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bfe_rxmbuf_tag, sc->bfe_rx_ring[i].bfe_map); m_freem(sc->bfe_rx_ring[i].bfe_mbuf); sc->bfe_rx_ring[i].bfe_mbuf = NULL; } } bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int bfe_list_rx_init(struct bfe_softc *sc) { struct bfe_rx_data *rd; int i; sc->bfe_rx_prod = sc->bfe_rx_cons = 0; bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); for (i = 0; i < BFE_RX_LIST_CNT; i++) { rd = &sc->bfe_rx_ring[i]; rd->bfe_mbuf = NULL; rd->bfe_ctrl = 0; if (bfe_list_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc))); return (0); } static void bfe_list_tx_init(struct bfe_softc *sc) { int i; sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); for (i = 0; i < BFE_TX_LIST_CNT; i++) sc->bfe_tx_ring[i].bfe_mbuf = NULL; bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void bfe_discard_buf(struct bfe_softc *sc, int c) { struct bfe_rx_data *r; struct bfe_desc *d; r = &sc->bfe_rx_ring[c]; d = &sc->bfe_rx_list[c]; d->bfe_ctrl = htole32(r->bfe_ctrl); } static int bfe_list_newbuf(struct bfe_softc *sc, int c) { struct bfe_rxheader *rx_header; struct bfe_desc *d; struct bfe_rx_data *r; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; u_int32_t ctrl; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); r = &sc->bfe_rx_ring[c]; if (r->bfe_mbuf != NULL) { bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bfe_rxmbuf_tag, r->bfe_map); } map = r->bfe_map; r->bfe_map = sc->bfe_rx_sparemap; sc->bfe_rx_sparemap = map; r->bfe_mbuf = m; rx_header = mtod(m, struct bfe_rxheader *); rx_header->len = 0; rx_header->flags = 0; bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_PREREAD); ctrl = segs[0].ds_len & BFE_DESC_LEN; KASSERT(ctrl > ETHER_MAX_LEN + 32, ("%s: buffer size too small(%d)!", __func__, ctrl)); if (c == BFE_RX_LIST_CNT - 1) ctrl |= BFE_DESC_EOT; r->bfe_ctrl = ctrl; d = &sc->bfe_rx_list[c]; d->bfe_ctrl = htole32(ctrl); /* The chip needs all addresses to be added to BFE_PCI_DMA. */ d->bfe_addr = htole32(BFE_ADDR_LO(segs[0].ds_addr) + BFE_PCI_DMA); return (0); } static void bfe_get_config(struct bfe_softc *sc) { u_int8_t eeprom[128]; bfe_read_eeprom(sc, eeprom); sc->bfe_enaddr[0] = eeprom[79]; sc->bfe_enaddr[1] = eeprom[78]; sc->bfe_enaddr[2] = eeprom[81]; sc->bfe_enaddr[3] = eeprom[80]; sc->bfe_enaddr[4] = eeprom[83]; sc->bfe_enaddr[5] = eeprom[82]; sc->bfe_phyaddr = eeprom[90] & 0x1f; sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1; sc->bfe_core_unit = 0; sc->bfe_dma_offset = BFE_PCI_DMA; } static void bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores) { u_int32_t bar_orig, val; bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4); val = CSR_READ_4(sc, BFE_SBINTVEC); val |= cores; CSR_WRITE_4(sc, BFE_SBINTVEC, val); val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2); val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST; CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4); } static void bfe_clear_stats(struct bfe_softc *sc) { uint32_t reg; BFE_LOCK_ASSERT(sc); CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) CSR_READ_4(sc, reg); for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) CSR_READ_4(sc, reg); } static int bfe_resetphy(struct bfe_softc *sc) { u_int32_t val; bfe_writephy(sc, 0, BMCR_RESET); DELAY(100); bfe_readphy(sc, 0, &val); if (val & BMCR_RESET) { device_printf(sc->bfe_dev, "PHY Reset would not complete.\n"); return (ENXIO); } return (0); } static void bfe_chip_halt(struct bfe_softc *sc) { BFE_LOCK_ASSERT(sc); /* disable interrupts - not that it actually does..*/ CSR_WRITE_4(sc, BFE_IMASK, 0); CSR_READ_4(sc, BFE_IMASK); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); DELAY(10); } static void bfe_chip_reset(struct bfe_softc *sc) { u_int32_t val; BFE_LOCK_ASSERT(sc); /* Set the interrupt vector for the enet core */ bfe_pci_setup(sc, BFE_INTVEC_ENET0); /* is core up? */ val = CSR_READ_4(sc, BFE_SBTMSLOW) & (BFE_RESET | BFE_REJECT | BFE_CLOCK); if (val == BFE_CLOCK) { /* It is, so shut it down */ CSR_WRITE_4(sc, BFE_RCV_LAZY, 0); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK) bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 100, 0); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); } bfe_core_reset(sc); bfe_clear_stats(sc); /* * We want the phy registers to be accessible even when * the driver is "downed" so initialize MDC preamble, frequency, * and whether internal or external phy here. */ /* 4402 has 62.5Mhz SB clock and internal phy */ CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d); /* Internal or external PHY? */ val = CSR_READ_4(sc, BFE_DEVCTRL); if (!(val & BFE_IPP)) CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL); else if (CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) { BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR); DELAY(100); } /* Enable CRC32 generation and set proper LED modes */ BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED); /* Reset or clear powerdown control bit */ BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN); CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) & BFE_LAZY_FC_MASK)); /* * We don't want lazy interrupts, so just send them at * the end of a frame, please */ BFE_OR(sc, BFE_RCV_LAZY, 0); /* Set max lengths, accounting for VLAN tags */ CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32); CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32); /* Set watermark XXX - magic */ CSR_WRITE_4(sc, BFE_TX_WMARK, 56); /* * Initialise DMA channels * - not forgetting dma addresses need to be added to BFE_PCI_DMA */ CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA); CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) | BFE_RX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA); bfe_resetphy(sc); bfe_setupphy(sc); } static void bfe_core_disable(struct bfe_softc *sc) { if ((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET) return; /* * Set reject, wait for it set, then wait for the core to stop * being busy, then set reset and reject and enable the clocks. */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK)); bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0); bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1); CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave reset and reject set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET)); DELAY(10); } static void bfe_core_reset(struct bfe_softc *sc) { u_int32_t val; /* Disable the core */ bfe_core_disable(sc); /* and bring it back up */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Chip bug, clear SERR, IB and TO if they are set. */ if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR) CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0); val = CSR_READ_4(sc, BFE_SBIMSTATE); if (val & (BFE_IBE | BFE_TO)) CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO)); /* Clear reset and allow it to move through the core */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave the clock set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); } static void bfe_cam_write(struct bfe_softc *sc, u_char *data, int index) { u_int32_t val; val = ((u_int32_t) data[2]) << 24; val |= ((u_int32_t) data[3]) << 16; val |= ((u_int32_t) data[4]) << 8; val |= ((u_int32_t) data[5]); CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val); val = (BFE_CAM_HI_VALID | (((u_int32_t) data[0]) << 8) | (((u_int32_t) data[1]))); CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val); CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE | ((u_int32_t) index << BFE_CAM_INDEX_SHIFT))); bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1); } static u_int bfe_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct bfe_softc *sc = arg; bfe_cam_write(sc, LLADDR(sdl), cnt + 1); return (1); } static void bfe_set_rx_mode(struct bfe_softc *sc) { if_t ifp = sc->bfe_ifp; u_int32_t val; BFE_LOCK_ASSERT(sc); val = CSR_READ_4(sc, BFE_RXCONF); if (if_getflags(ifp) & IFF_PROMISC) val |= BFE_RXCONF_PROMISC; else val &= ~BFE_RXCONF_PROMISC; if (if_getflags(ifp) & IFF_BROADCAST) val &= ~BFE_RXCONF_DBCAST; else val |= BFE_RXCONF_DBCAST; CSR_WRITE_4(sc, BFE_CAM_CTRL, 0); bfe_cam_write(sc, if_getlladdr(sc->bfe_ifp), 0); if (if_getflags(ifp) & IFF_ALLMULTI) val |= BFE_RXCONF_ALLMULTI; else { val &= ~BFE_RXCONF_ALLMULTI; if_foreach_llmaddr(ifp, bfe_write_maddr, sc); } CSR_WRITE_4(sc, BFE_RXCONF, val); BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE); } static void bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bfe_dmamap_arg *ctx; if (error != 0) return; KASSERT(nseg == 1, ("%s : %d segments returned!", __func__, nseg)); ctx = (struct bfe_dmamap_arg *)arg; ctx->bfe_busaddr = segs[0].ds_addr; } static void bfe_release_resources(struct bfe_softc *sc) { if (sc->bfe_intrhand != NULL) bus_teardown_intr(sc->bfe_dev, sc->bfe_irq, sc->bfe_intrhand); if (sc->bfe_irq != NULL) bus_release_resource(sc->bfe_dev, SYS_RES_IRQ, 0, sc->bfe_irq); if (sc->bfe_res != NULL) bus_release_resource(sc->bfe_dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->bfe_res); if (sc->bfe_ifp != NULL) if_free(sc->bfe_ifp); } static void bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data) { long i; u_int16_t *ptr = (u_int16_t *)data; for(i = 0; i < 128; i += 2) ptr[i/2] = CSR_READ_4(sc, 4096 + i); } static int bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit, u_long timeout, const int clear) { u_long i; for (i = 0; i < timeout; i++) { u_int32_t val = CSR_READ_4(sc, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; DELAY(10); } if (i == timeout) { device_printf(sc->bfe_dev, "BUG! Timeout waiting for bit %08x of register " "%x to %s.\n", bit, reg, (clear ? "clear" : "set")); return (-1); } return (0); } static int bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val) { int err; /* Clear MII ISR */ CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT))); err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA; return (err); } static int bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val) { int status; CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) | (val & BFE_MDIO_DATA_DATA))); status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); return (status); } /* * XXX - I think this is handled by the PHY driver, but it can't hurt to do it * twice */ static int bfe_setupphy(struct bfe_softc *sc) { u_int32_t val; /* Enable activity LED */ bfe_readphy(sc, 26, &val); bfe_writephy(sc, 26, val & 0x7fff); bfe_readphy(sc, 26, &val); /* Enable traffic meter LED mode */ bfe_readphy(sc, 27, &val); bfe_writephy(sc, 27, val | (1 << 6)); return (0); } static void bfe_stats_update(struct bfe_softc *sc) { struct bfe_hw_stats *stats; if_t ifp; uint32_t mib[BFE_MIB_CNT]; uint32_t reg, *val; BFE_LOCK_ASSERT(sc); val = mib; CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) *val++ = CSR_READ_4(sc, reg); for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) *val++ = CSR_READ_4(sc, reg); ifp = sc->bfe_ifp; stats = &sc->bfe_stats; /* Tx stat. */ stats->tx_good_octets += mib[MIB_TX_GOOD_O]; stats->tx_good_frames += mib[MIB_TX_GOOD_P]; stats->tx_octets += mib[MIB_TX_O]; stats->tx_frames += mib[MIB_TX_P]; stats->tx_bcast_frames += mib[MIB_TX_BCAST]; stats->tx_mcast_frames += mib[MIB_TX_MCAST]; stats->tx_pkts_64 += mib[MIB_TX_64]; stats->tx_pkts_65_127 += mib[MIB_TX_65_127]; stats->tx_pkts_128_255 += mib[MIB_TX_128_255]; stats->tx_pkts_256_511 += mib[MIB_TX_256_511]; stats->tx_pkts_512_1023 += mib[MIB_TX_512_1023]; stats->tx_pkts_1024_max += mib[MIB_TX_1024_MAX]; stats->tx_jabbers += mib[MIB_TX_JABBER]; stats->tx_oversize_frames += mib[MIB_TX_OSIZE]; stats->tx_frag_frames += mib[MIB_TX_FRAG]; stats->tx_underruns += mib[MIB_TX_URUNS]; stats->tx_colls += mib[MIB_TX_TCOLS]; stats->tx_single_colls += mib[MIB_TX_SCOLS]; stats->tx_multi_colls += mib[MIB_TX_MCOLS]; stats->tx_excess_colls += mib[MIB_TX_ECOLS]; stats->tx_late_colls += mib[MIB_TX_LCOLS]; stats->tx_deferrals += mib[MIB_TX_DEFERED]; stats->tx_carrier_losts += mib[MIB_TX_CLOST]; stats->tx_pause_frames += mib[MIB_TX_PAUSE]; /* Rx stat. */ stats->rx_good_octets += mib[MIB_RX_GOOD_O]; stats->rx_good_frames += mib[MIB_RX_GOOD_P]; stats->rx_octets += mib[MIB_RX_O]; stats->rx_frames += mib[MIB_RX_P]; stats->rx_bcast_frames += mib[MIB_RX_BCAST]; stats->rx_mcast_frames += mib[MIB_RX_MCAST]; stats->rx_pkts_64 += mib[MIB_RX_64]; stats->rx_pkts_65_127 += mib[MIB_RX_65_127]; stats->rx_pkts_128_255 += mib[MIB_RX_128_255]; stats->rx_pkts_256_511 += mib[MIB_RX_256_511]; stats->rx_pkts_512_1023 += mib[MIB_RX_512_1023]; stats->rx_pkts_1024_max += mib[MIB_RX_1024_MAX]; stats->rx_jabbers += mib[MIB_RX_JABBER]; stats->rx_oversize_frames += mib[MIB_RX_OSIZE]; stats->rx_frag_frames += mib[MIB_RX_FRAG]; stats->rx_missed_frames += mib[MIB_RX_MISS]; stats->rx_crc_align_errs += mib[MIB_RX_CRCA]; stats->rx_runts += mib[MIB_RX_USIZE]; stats->rx_crc_errs += mib[MIB_RX_CRC]; stats->rx_align_errs += mib[MIB_RX_ALIGN]; stats->rx_symbol_errs += mib[MIB_RX_SYM]; stats->rx_pause_frames += mib[MIB_RX_PAUSE]; stats->rx_control_frames += mib[MIB_RX_NPAUSE]; /* Update counters in ifnet. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)mib[MIB_TX_GOOD_P]); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (u_long)mib[MIB_TX_TCOLS]); if_inc_counter(ifp, IFCOUNTER_OERRORS, (u_long)mib[MIB_TX_URUNS] + (u_long)mib[MIB_TX_ECOLS] + (u_long)mib[MIB_TX_DEFERED] + (u_long)mib[MIB_TX_CLOST]); if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)mib[MIB_RX_GOOD_P]); if_inc_counter(ifp, IFCOUNTER_IERRORS, mib[MIB_RX_JABBER] + mib[MIB_RX_MISS] + mib[MIB_RX_CRCA] + mib[MIB_RX_USIZE] + mib[MIB_RX_CRC] + mib[MIB_RX_ALIGN] + mib[MIB_RX_SYM]); } static void bfe_txeof(struct bfe_softc *sc) { struct bfe_tx_data *r; if_t ifp; int i, chipidx; BFE_LOCK_ASSERT(sc); ifp = sc->bfe_ifp; chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK; chipidx /= sizeof(struct bfe_desc); i = sc->bfe_tx_cons; if (i == chipidx) return; bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Go through the mbufs and free those that have been transmitted */ for (; i != chipidx; BFE_INC(i, BFE_TX_LIST_CNT)) { r = &sc->bfe_tx_ring[i]; sc->bfe_tx_cnt--; if (r->bfe_mbuf == NULL) continue; bus_dmamap_sync(sc->bfe_txmbuf_tag, r->bfe_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map); m_freem(r->bfe_mbuf); r->bfe_mbuf = NULL; } if (i != sc->bfe_tx_cons) { /* we freed up some mbufs */ sc->bfe_tx_cons = i; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (sc->bfe_tx_cnt == 0) sc->bfe_watchdog_timer = 0; } /* Pass a received packet up the stack */ static void bfe_rxeof(struct bfe_softc *sc) { struct mbuf *m; if_t ifp; struct bfe_rxheader *rxheader; struct bfe_rx_data *r; int cons, prog; u_int32_t status, current, len, flags; BFE_LOCK_ASSERT(sc); cons = sc->bfe_rx_cons; status = CSR_READ_4(sc, BFE_DMARX_STAT); current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc); ifp = sc->bfe_ifp; bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; current != cons; prog++, BFE_INC(cons, BFE_RX_LIST_CNT)) { r = &sc->bfe_rx_ring[cons]; m = r->bfe_mbuf; /* * Rx status should be read from mbuf such that we can't * delay bus_dmamap_sync(9). This hardware limiation * results in inefficient mbuf usage as bfe(4) couldn't * reuse mapped buffer from errored frame. */ if (bfe_list_newbuf(sc, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); bfe_discard_buf(sc, cons); continue; } rxheader = mtod(m, struct bfe_rxheader*); len = le16toh(rxheader->len); flags = le16toh(rxheader->flags); /* Remove CRC bytes. */ len -= ETHER_CRC_LEN; /* flag an error and try again */ if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) { m_freem(m); continue; } /* Make sure to skip header bytes written by hardware. */ m_adj(m, BFE_RX_OFFSET); m->m_len = m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; BFE_UNLOCK(sc); if_input(ifp, m); BFE_LOCK(sc); } if (prog > 0) { sc->bfe_rx_cons = cons; bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } static void bfe_intr(void *xsc) { struct bfe_softc *sc = xsc; if_t ifp; u_int32_t istat; ifp = sc->bfe_ifp; BFE_LOCK(sc); istat = CSR_READ_4(sc, BFE_ISTAT); /* * Defer unsolicited interrupts - This is necessary because setting the * chips interrupt mask register to 0 doesn't actually stop the * interrupts */ istat &= BFE_IMASK_DEF; CSR_WRITE_4(sc, BFE_ISTAT, istat); CSR_READ_4(sc, BFE_ISTAT); /* not expecting this interrupt, disregard it */ if (istat == 0 || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { BFE_UNLOCK(sc); return; } /* A packet was received */ if (istat & BFE_ISTAT_RX) bfe_rxeof(sc); /* A packet was sent */ if (istat & BFE_ISTAT_TX) bfe_txeof(sc); if (istat & BFE_ISTAT_ERRORS) { if (istat & BFE_ISTAT_DSCE) { device_printf(sc->bfe_dev, "Descriptor Error\n"); bfe_stop(sc); BFE_UNLOCK(sc); return; } if (istat & BFE_ISTAT_DPE) { device_printf(sc->bfe_dev, "Descriptor Protocol Error\n"); bfe_stop(sc); BFE_UNLOCK(sc); return; } if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bfe_init_locked(sc); } /* We have packets pending, fire them out */ if (!if_sendq_empty(ifp)) bfe_start_locked(ifp); BFE_UNLOCK(sc); } static int bfe_encap(struct bfe_softc *sc, struct mbuf **m_head) { struct bfe_desc *d; struct bfe_tx_data *r, *r1; struct mbuf *m; bus_dmamap_t map; bus_dma_segment_t txsegs[BFE_MAXTXSEGS]; uint32_t cur, si; int error, i, nsegs; BFE_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); si = cur = sc->bfe_tx_prod; r = &sc->bfe_tx_ring[cur]; error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, BFE_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc->bfe_tx_cnt + nsegs > BFE_TX_LIST_CNT - 1) { bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map); return (ENOBUFS); } for (i = 0; i < nsegs; i++) { d = &sc->bfe_tx_list[cur]; d->bfe_ctrl = htole32(txsegs[i].ds_len & BFE_DESC_LEN); d->bfe_ctrl |= htole32(BFE_DESC_IOC); if (cur == BFE_TX_LIST_CNT - 1) /* * Tell the chip to wrap to the start of * the descriptor list. */ d->bfe_ctrl |= htole32(BFE_DESC_EOT); /* The chip needs all addresses to be added to BFE_PCI_DMA. */ d->bfe_addr = htole32(BFE_ADDR_LO(txsegs[i].ds_addr) + BFE_PCI_DMA); BFE_INC(cur, BFE_TX_LIST_CNT); } /* Update producer index. */ sc->bfe_tx_prod = cur; /* Set EOF on the last descriptor. */ cur = (cur + BFE_TX_LIST_CNT - 1) % BFE_TX_LIST_CNT; d = &sc->bfe_tx_list[cur]; d->bfe_ctrl |= htole32(BFE_DESC_EOF); /* Lastly set SOF on the first descriptor to avoid races. */ d = &sc->bfe_tx_list[si]; d->bfe_ctrl |= htole32(BFE_DESC_SOF); r1 = &sc->bfe_tx_ring[cur]; map = r->bfe_map; r->bfe_map = r1->bfe_map; r1->bfe_map = map; r1->bfe_mbuf = *m_head; sc->bfe_tx_cnt += nsegs; bus_dmamap_sync(sc->bfe_txmbuf_tag, map, BUS_DMASYNC_PREWRITE); return (0); } /* * Set up to transmit a packet. */ static void bfe_start(if_t ifp) { BFE_LOCK((struct bfe_softc *)if_getsoftc(ifp)); bfe_start_locked(ifp); BFE_UNLOCK((struct bfe_softc *)if_getsoftc(ifp)); } /* * Set up to transmit a packet. The softc is already locked. */ static void bfe_start_locked(if_t ifp) { struct bfe_softc *sc; struct mbuf *m_head; int queued; sc = if_getsoftc(ifp); BFE_LOCK_ASSERT(sc); /* * Not much point trying to send if the link is down * or we have nothing to send. */ if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->bfe_flags & BFE_FLAG_LINK) == 0) return; for (queued = 0; !if_sendq_empty(ifp) && sc->bfe_tx_cnt < BFE_TX_LIST_CNT - 1;) { m_head = if_dequeue(ifp); if (m_head == NULL) break; /* * Pack the data into the tx ring. If we dont have * enough room, let the chip drain the ring. */ if (bfe_encap(sc, &m_head)) { if (m_head == NULL) break; if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (queued) { bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Transmit - twice due to apparent hardware bug */ CSR_WRITE_4(sc, BFE_DMATX_PTR, sc->bfe_tx_prod * sizeof(struct bfe_desc)); /* * XXX It seems the following write is not necessary * to kick Tx command. What might be required would be * a way flushing PCI posted write. Reading the register * back ensures the flush operation. In addition, * hardware will execute PCI posted write in the long * run and watchdog timer for the kick command was set * to 5 seconds. Therefore I think the second write * access is not necessary or could be replaced with * read operation. */ CSR_WRITE_4(sc, BFE_DMATX_PTR, sc->bfe_tx_prod * sizeof(struct bfe_desc)); /* * Set a timeout in case the chip goes out to lunch. */ sc->bfe_watchdog_timer = 5; } } static void bfe_init(void *xsc) { BFE_LOCK((struct bfe_softc *)xsc); bfe_init_locked(xsc); BFE_UNLOCK((struct bfe_softc *)xsc); } static void bfe_init_locked(void *xsc) { struct bfe_softc *sc = (struct bfe_softc*)xsc; if_t ifp = sc->bfe_ifp; struct mii_data *mii; BFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->bfe_miibus); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; bfe_stop(sc); bfe_chip_reset(sc); if (bfe_list_rx_init(sc) == ENOBUFS) { device_printf(sc->bfe_dev, "%s: Not enough memory for list buffers\n", __func__); bfe_stop(sc); return; } bfe_list_tx_init(sc); bfe_set_rx_mode(sc); /* Enable the chip and core */ BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE); /* Enable interrupts */ CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF); /* Clear link state and change media. */ sc->bfe_flags &= ~BFE_FLAG_LINK; mii_mediachg(mii); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc); } /* * Set media options. */ static int bfe_ifmedia_upd(if_t ifp) { struct bfe_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = if_getsoftc(ifp); BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); BFE_UNLOCK(sc); return (error); } /* * Report current media status. */ static void bfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bfe_softc *sc = if_getsoftc(ifp); struct mii_data *mii; BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BFE_UNLOCK(sc); } static int bfe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bfe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: BFE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bfe_set_rx_mode(sc); else if ((sc->bfe_flags & BFE_FLAG_DETACH) == 0) bfe_init_locked(sc); } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bfe_stop(sc); BFE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: BFE_LOCK(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bfe_set_rx_mode(sc); BFE_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->bfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bfe_watchdog(struct bfe_softc *sc) { if_t ifp; BFE_LOCK_ASSERT(sc); if (sc->bfe_watchdog_timer == 0 || --sc->bfe_watchdog_timer) return; ifp = sc->bfe_ifp; device_printf(sc->bfe_dev, "watchdog timeout -- resetting\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bfe_init_locked(sc); if (!if_sendq_empty(ifp)) bfe_start_locked(ifp); } static void bfe_tick(void *xsc) { struct bfe_softc *sc = xsc; struct mii_data *mii; BFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->bfe_miibus); mii_tick(mii); bfe_stats_update(sc); bfe_watchdog(sc); callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bfe_stop(struct bfe_softc *sc) { if_t ifp; BFE_LOCK_ASSERT(sc); ifp = sc->bfe_ifp; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); sc->bfe_flags &= ~BFE_FLAG_LINK; callout_stop(&sc->bfe_stat_co); sc->bfe_watchdog_timer = 0; bfe_chip_halt(sc); bfe_tx_ring_free(sc); bfe_rx_ring_free(sc); } static int sysctl_bfe_stats(SYSCTL_HANDLER_ARGS) { struct bfe_softc *sc; struct bfe_hw_stats *stats; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result != 1) return (error); sc = (struct bfe_softc *)arg1; stats = &sc->bfe_stats; printf("%s statistics:\n", device_get_nameunit(sc->bfe_dev)); printf("Transmit good octets : %ju\n", (uintmax_t)stats->tx_good_octets); printf("Transmit good frames : %ju\n", (uintmax_t)stats->tx_good_frames); printf("Transmit octets : %ju\n", (uintmax_t)stats->tx_octets); printf("Transmit frames : %ju\n", (uintmax_t)stats->tx_frames); printf("Transmit broadcast frames : %ju\n", (uintmax_t)stats->tx_bcast_frames); printf("Transmit multicast frames : %ju\n", (uintmax_t)stats->tx_mcast_frames); printf("Transmit frames 64 bytes : %ju\n", (uint64_t)stats->tx_pkts_64); printf("Transmit frames 65 to 127 bytes : %ju\n", (uint64_t)stats->tx_pkts_65_127); printf("Transmit frames 128 to 255 bytes : %ju\n", (uint64_t)stats->tx_pkts_128_255); printf("Transmit frames 256 to 511 bytes : %ju\n", (uint64_t)stats->tx_pkts_256_511); printf("Transmit frames 512 to 1023 bytes : %ju\n", (uint64_t)stats->tx_pkts_512_1023); printf("Transmit frames 1024 to max bytes : %ju\n", (uint64_t)stats->tx_pkts_1024_max); printf("Transmit jabber errors : %u\n", stats->tx_jabbers); printf("Transmit oversized frames : %ju\n", (uint64_t)stats->tx_oversize_frames); printf("Transmit fragmented frames : %ju\n", (uint64_t)stats->tx_frag_frames); printf("Transmit underruns : %u\n", stats->tx_colls); printf("Transmit total collisions : %u\n", stats->tx_single_colls); printf("Transmit single collisions : %u\n", stats->tx_single_colls); printf("Transmit multiple collisions : %u\n", stats->tx_multi_colls); printf("Transmit excess collisions : %u\n", stats->tx_excess_colls); printf("Transmit late collisions : %u\n", stats->tx_late_colls); printf("Transmit deferrals : %u\n", stats->tx_deferrals); printf("Transmit carrier losts : %u\n", stats->tx_carrier_losts); printf("Transmit pause frames : %u\n", stats->tx_pause_frames); printf("Receive good octets : %ju\n", (uintmax_t)stats->rx_good_octets); printf("Receive good frames : %ju\n", (uintmax_t)stats->rx_good_frames); printf("Receive octets : %ju\n", (uintmax_t)stats->rx_octets); printf("Receive frames : %ju\n", (uintmax_t)stats->rx_frames); printf("Receive broadcast frames : %ju\n", (uintmax_t)stats->rx_bcast_frames); printf("Receive multicast frames : %ju\n", (uintmax_t)stats->rx_mcast_frames); printf("Receive frames 64 bytes : %ju\n", (uint64_t)stats->rx_pkts_64); printf("Receive frames 65 to 127 bytes : %ju\n", (uint64_t)stats->rx_pkts_65_127); printf("Receive frames 128 to 255 bytes : %ju\n", (uint64_t)stats->rx_pkts_128_255); printf("Receive frames 256 to 511 bytes : %ju\n", (uint64_t)stats->rx_pkts_256_511); printf("Receive frames 512 to 1023 bytes : %ju\n", (uint64_t)stats->rx_pkts_512_1023); printf("Receive frames 1024 to max bytes : %ju\n", (uint64_t)stats->rx_pkts_1024_max); printf("Receive jabber errors : %u\n", stats->rx_jabbers); printf("Receive oversized frames : %ju\n", (uint64_t)stats->rx_oversize_frames); printf("Receive fragmented frames : %ju\n", (uint64_t)stats->rx_frag_frames); printf("Receive missed frames : %u\n", stats->rx_missed_frames); printf("Receive CRC align errors : %u\n", stats->rx_crc_align_errs); printf("Receive undersized frames : %u\n", stats->rx_runts); printf("Receive CRC errors : %u\n", stats->rx_crc_errs); printf("Receive align errors : %u\n", stats->rx_align_errs); printf("Receive symbol errors : %u\n", stats->rx_symbol_errs); printf("Receive pause frames : %u\n", stats->rx_pause_frames); printf("Receive control frames : %u\n", stats->rx_control_frames); return (error); } diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c index 6c3301b1473a..cf3084f9b768 100644 --- a/sys/dev/bge/if_bge.c +++ b/sys/dev/bge/if_bge.c @@ -1,6839 +1,6838 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include /* * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overridden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ static const struct bge_type { uint16_t bge_vid; uint16_t bge_did; } bge_devs[] = { { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 }, { SK_VENDORID, SK_DEVICEID_ALTIMA }, { TC_VENDORID, TC_DEVICEID_3C996 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, { 0, 0 } }; static const struct bge_vendor { uint16_t v_id; const char *v_name; } bge_vendors[] = { { ALTEON_VENDORID, "Alteon" }, { ALTIMA_VENDORID, "Altima" }, { APPLE_VENDORID, "Apple" }, { BCOM_VENDORID, "Broadcom" }, { SK_VENDORID, "SysKonnect" }, { TC_VENDORID, "3Com" }, { FJTSU_VENDORID, "Fujitsu" }, { 0, NULL } }; static const struct bge_revision { uint32_t br_chipid; const char *br_name; } bge_revisions[] = { { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, { BGE_CHIPID_BCM5717_C0, "BCM5717 C0" }, { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, { 0, NULL } }; /* * Some defaults for major revisions, so that newer steppings * that we don't know about have a shot at working. */ static const struct bge_revision bge_majorrevs[] = { { BGE_ASICREV_BCM5700, "unknown BCM5700" }, { BGE_ASICREV_BCM5701, "unknown BCM5701" }, { BGE_ASICREV_BCM5703, "unknown BCM5703" }, { BGE_ASICREV_BCM5704, "unknown BCM5704" }, { BGE_ASICREV_BCM5705, "unknown BCM5705" }, { BGE_ASICREV_BCM5750, "unknown BCM5750" }, { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, { BGE_ASICREV_BCM5752, "unknown BCM5752" }, { BGE_ASICREV_BCM5780, "unknown BCM5780" }, { BGE_ASICREV_BCM5714, "unknown BCM5714" }, { BGE_ASICREV_BCM5755, "unknown BCM5755" }, { BGE_ASICREV_BCM5761, "unknown BCM5761" }, { BGE_ASICREV_BCM5784, "unknown BCM5784" }, { BGE_ASICREV_BCM5785, "unknown BCM5785" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, { BGE_ASICREV_BCM5906, "unknown BCM5906" }, { BGE_ASICREV_BCM57765, "unknown BCM57765" }, { BGE_ASICREV_BCM57766, "unknown BCM57766" }, { BGE_ASICREV_BCM57780, "unknown BCM57780" }, { BGE_ASICREV_BCM5717, "unknown BCM5717" }, { BGE_ASICREV_BCM5719, "unknown BCM5719" }, { BGE_ASICREV_BCM5720, "unknown BCM5720" }, { BGE_ASICREV_BCM5762, "unknown BCM5762" }, { 0, NULL } }; #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS) #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS) static uint32_t bge_chipid(device_t); static const struct bge_vendor * bge_lookup_vendor(uint16_t); static const struct bge_revision * bge_lookup_rev(uint32_t); typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); static int bge_probe(device_t); static int bge_attach(device_t); static int bge_detach(device_t); static int bge_suspend(device_t); static int bge_resume(device_t); static void bge_release_resources(struct bge_softc *); static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int bge_dma_alloc(struct bge_softc *); static void bge_dma_free(struct bge_softc *); static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); static void bge_devinfo(struct bge_softc *); static int bge_mbox_reorder(struct bge_softc *); static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); static int bge_get_eaddr(struct bge_softc *, uint8_t[]); static void bge_txeof(struct bge_softc *, uint16_t); static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); static int bge_rxeof(struct bge_softc *, uint16_t, int); static void bge_asf_driver_up (struct bge_softc *); static void bge_tick(void *); static void bge_stats_clear_regs(struct bge_softc *); static void bge_stats_update(struct bge_softc *); static void bge_stats_update_regs(struct bge_softc *); static struct mbuf *bge_check_short_dma(struct mbuf *); static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, uint16_t *, uint16_t *); static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); static void bge_intr(void *); static int bge_msi_intr(void *); static void bge_intr_task(void *, int); static void bge_start(if_t); static void bge_start_locked(if_t); static void bge_start_tx(struct bge_softc *, uint32_t); static int bge_ioctl(if_t, u_long, caddr_t); static void bge_init_locked(struct bge_softc *); static void bge_init(void *); static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); static void bge_stop(struct bge_softc *); static void bge_watchdog(struct bge_softc *); static int bge_shutdown(device_t); static int bge_ifmedia_upd_locked(if_t); static int bge_ifmedia_upd(if_t); static void bge_ifmedia_sts(if_t, struct ifmediareq *); static uint64_t bge_get_counter(if_t, ift_counter); static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); static void bge_setpromisc(struct bge_softc *); static void bge_setmulti(struct bge_softc *); static void bge_setvlan(struct bge_softc *); static __inline void bge_rxreuse_std(struct bge_softc *, int); static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); static int bge_newbuf_std(struct bge_softc *, int); static int bge_newbuf_jumbo(struct bge_softc *, int); static int bge_init_rx_ring_std(struct bge_softc *); static void bge_free_rx_ring_std(struct bge_softc *); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); static void bge_free_tx_ring(struct bge_softc *); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); static int bge_blockinit(struct bge_softc *); static uint32_t bge_dma_swap_options(struct bge_softc *); static int bge_has_eaddr(struct bge_softc *); static uint32_t bge_readmem_ind(struct bge_softc *, int); static void bge_writemem_ind(struct bge_softc *, int, int); static void bge_writembx(struct bge_softc *, int, int); #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *, int); #endif static void bge_writemem_direct(struct bge_softc *, int, int); static void bge_writereg_ind(struct bge_softc *, int, int); static int bge_miibus_readreg(device_t, int, int); static int bge_miibus_writereg(device_t, int, int, int); static void bge_miibus_statchg(device_t); #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count); #endif #define BGE_RESET_SHUTDOWN 0 #define BGE_RESET_START 1 #define BGE_RESET_SUSPEND 2 static void bge_sig_post_reset(struct bge_softc *, int); static void bge_sig_legacy(struct bge_softc *, int); static void bge_sig_pre_reset(struct bge_softc *, int); static void bge_stop_fw(struct bge_softc *); static int bge_reset(struct bge_softc *); static void bge_link_upd(struct bge_softc *); static void bge_ape_lock_init(struct bge_softc *); static void bge_ape_read_fw_ver(struct bge_softc *); static int bge_ape_lock(struct bge_softc *, int); static void bge_ape_unlock(struct bge_softc *, int); static void bge_ape_send_event(struct bge_softc *, uint32_t); static void bge_ape_driver_state_change(struct bge_softc *, int); /* * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may * leak information to untrusted users. It is also known to cause alignment * traps on certain architectures. */ #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); #endif static void bge_add_sysctls(struct bge_softc *); static void bge_add_sysctl_stats_regs(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); DEBUGNET_DEFINE(bge); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), DEVMETHOD(device_suspend, bge_suspend), DEVMETHOD(device_resume, bge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), DEVMETHOD_END }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; DRIVER_MODULE(bge, pci, bge_driver, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs, nitems(bge_devs) - 1); DRIVER_MODULE(miibus, bge, miibus_driver, 0, 0); static int bge_allow_asf = 1; static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "BGE driver parameters"); SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, "Allow ASF mode if available"); static int bge_has_eaddr(struct bge_softc *sc) { return (1); } static uint32_t bge_readmem_ind(struct bge_softc *sc, int off) { device_t dev; uint32_t val; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return (0); dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); return (val); } static void bge_writemem_ind(struct bge_softc *sc, int off, int val) { device_t dev; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *sc, int off) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); } static void bge_writemem_direct(struct bge_softc *sc, int off, int val) { CSR_WRITE_4(sc, off, val); } static void bge_writembx(struct bge_softc *sc, int off, int val) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; CSR_WRITE_4(sc, off, val); if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) CSR_READ_4(sc, off); } /* * Clear all stale locks and select the lock for this driver instance. */ static void bge_ape_lock_init(struct bge_softc *sc) { uint32_t bit, regbase; int i; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) regbase = BGE_APE_LOCK_GRANT; else regbase = BGE_APE_PER_LOCK_GRANT; /* Clear any stale locks. */ for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { switch (i) { case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); } APE_WRITE_4(sc, regbase + 4 * i, bit); } /* Select the PHY lock based on the device's function number. */ switch (sc->bge_func_addr) { case 0: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; break; case 1: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; break; case 2: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; break; case 3: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; break; default: device_printf(sc->bge_dev, "PHY lock not supported on this function\n"); } } /* * Check for APE firmware, set flags, and print version info. */ static void bge_ape_read_fw_ver(struct bge_softc *sc) { const char *fwtype; uint32_t apedata, features; /* Check for a valid APE signature in shared memory. */ apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); if (apedata != BGE_APE_SEG_SIG_MAGIC) { sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; return; } /* Check if APE firmware is running. */ apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { device_printf(sc->bge_dev, "APE signature found " "but FW status not ready! 0x%08x\n", apedata); return; } sc->bge_mfw_flags |= BGE_MFW_ON_APE; /* Fetch the APE firmware type and version. */ apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); features = APE_READ_4(sc, BGE_APE_FW_FEATURES); if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; fwtype = "NCSI"; } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; fwtype = "DASH"; } else fwtype = "UNKN"; /* Print the APE firmware version. */ device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", fwtype, (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, (apedata & BGE_APE_FW_VERSION_BLDMSK)); } static int bge_ape_lock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt, req, status; int i, off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return (0); /* Lock request/grant registers have different bases. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { req = BGE_APE_LOCK_REQ; gnt = BGE_APE_LOCK_GRANT; } else { req = BGE_APE_PER_LOCK_REQ; gnt = BGE_APE_PER_LOCK_GRANT; } off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: /* Lock required when using GPIO. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return (0); if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: /* Lock required to reset the device. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: /* Lock required when accessing certain APE memory. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: /* Lock required when accessing PHYs. */ bit = BGE_APE_LOCK_REQ_DRIVER0; break; default: return (EINVAL); } /* Request a lock. */ APE_WRITE_4(sc, req + off, bit); /* Wait up to 1 second to acquire lock. */ for (i = 0; i < 20000; i++) { status = APE_READ_4(sc, gnt + off); if (status == bit) break; DELAY(50); } /* Handle any errors. */ if (status != bit) { device_printf(sc->bge_dev, "APE lock %d request failed! " "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", locknum, req + off, bit & 0xFFFF, gnt + off, status & 0xFFFF); /* Revoke the lock request. */ APE_WRITE_4(sc, gnt + off, bit); return (EBUSY); } return (0); } static void bge_ape_unlock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt; int off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) gnt = BGE_APE_LOCK_GRANT; else gnt = BGE_APE_PER_LOCK_GRANT; off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return; if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: return; } APE_WRITE_4(sc, gnt + off, bit); } /* * Send an event to the APE firmware. */ static void bge_ape_send_event(struct bge_softc *sc, uint32_t event) { uint32_t apedata; int i; /* NCSI does not support APE events. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; /* Wait up to 1ms for APE to service previous event. */ for (i = 10; i > 0; i--) { if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) break; apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | BGE_APE_EVENT_STATUS_EVENT_PENDING); bge_ape_unlock(sc, BGE_APE_LOCK_MEM); APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); break; } bge_ape_unlock(sc, BGE_APE_LOCK_MEM); DELAY(100); } if (i == 0) device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", event); } static void bge_ape_driver_state_change(struct bge_softc *sc, int kind) { uint32_t apedata, event; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; switch (kind) { case BGE_RESET_START: /* If this is the first load, clear the load counter. */ apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); else { apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); } APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, BGE_APE_HOST_SEG_SIG_MAGIC); APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, BGE_APE_HOST_SEG_LEN_MAGIC); /* Add some version info if bge(4) supports it. */ APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, BGE_APE_HOST_BEHAV_NO_PHYLOCK); APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, BGE_APE_HOST_HEARTBEAT_INT_DISABLE); APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_START); event = BGE_APE_EVENT_STATUS_STATE_START; break; case BGE_RESET_SHUTDOWN: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_UNLOAD); event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; break; case BGE_RESET_SUSPEND: event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; break; default: return; } bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | BGE_APE_EVENT_STATUS_STATE_CHNGE); } /* * Map a single buffer address. */ static void bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bge_dmamap_arg *ctx; if (error) return; KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); ctx = arg; ctx->bge_busaddr = segs->ds_addr; } static uint8_t bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { uint32_t access, byte = 0; int i; /* Lock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) return (1); /* Enable access. */ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { DELAY(10); break; } } if (i == BGE_TIMEOUT * 10) { if_printf(sc->bge_ifp, "nvram read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; /* Disable access. */ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); /* Unlock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); CSR_READ_4(sc, BGE_NVRAM_SWARB); return (0); } /* * Read a sequence of bytes from NVRAM. */ static int bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; if (sc->bge_asicrev != BGE_ASICREV_BCM5906) return (1); for (i = 0; i < cnt; i++) { err = bge_nvram_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static uint8_t bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { int i; uint32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT * 10) { device_printf(sc->bge_dev, "EEPROM read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int i, error = 0; uint8_t byte = 0; for (i = 0; i < cnt; i++) { error = bge_eeprom_getbyte(sc, off + i, &byte); if (error) break; *(dest + i) = byte; } return (error ? 1 : 0); } static int bge_miibus_readreg(device_t dev, int phy, int reg) { struct bge_softc *sc; uint32_t val; int i; sc = device_get_softc(dev); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg)); /* Poll for the PHY register access to complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = CSR_READ_4(sc, BGE_MI_COMM); if ((val & BGE_MICOMM_BUSY) == 0) { DELAY(5); val = CSR_READ_4(sc, BGE_MI_COMM); break; } } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", phy, reg, val); val = 0; } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (val & BGE_MICOMM_READFAIL) return (0); return (val & 0xFFFF); } static int bge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bge_softc *sc; int i; sc = device_get_softc(dev); if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) return (0); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { DELAY(5); CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ break; } } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (i == BGE_TIMEOUT) device_printf(sc->bge_dev, "PHY write timed out (phy %d, reg %d, val 0x%04x)\n", phy, reg, val); return (0); } static void bge_miibus_statchg(device_t dev) { struct bge_softc *sc; struct mii_data *mii; uint32_t mac_mode, rx_mode, tx_mode; sc = device_get_softc(dev); if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->bge_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->bge_link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: if (sc->bge_asicrev != BGE_ASICREV_BCM5906) sc->bge_link = 1; else sc->bge_link = 0; break; default: sc->bge_link = 0; break; } } else sc->bge_link = 0; if (sc->bge_link == 0) return; /* * APE firmware touches these registers to keep the MAC * connected to the outside world. Try to keep the * accesses atomic. */ /* Set the port mode (MII/GMII) to match the link speed. */ mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); tx_mode = CSR_READ_4(sc, BGE_TX_MODE); rx_mode = CSR_READ_4(sc, BGE_RX_MODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) mac_mode |= BGE_PORTMODE_GMII; else mac_mode |= BGE_PORTMODE_MII; /* Set MAC flow control behavior to match link flow control settings. */ tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; } else mac_mode |= BGE_MACMODE_HALF_DUPLEX; CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); DELAY(40); CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(struct bge_softc *sc, int i) { struct mbuf *m; struct bge_rx_bd *r; bus_dma_segment_t segs[1]; bus_dmamap_t map; int error, nsegs; if (sc->bge_flags & BGE_FLAG_JUMBO_STD && (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } map = sc->bge_cdata.bge_rx_std_dmamap[i]; sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; sc->bge_cdata.bge_rx_std_sparemap = map; sc->bge_cdata.bge_rx_std_chain[i] = m; sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = segs[0].ds_len; r->bge_idx = i; bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(struct bge_softc *sc, int i) { bus_dma_segment_t segs[BGE_NSEG_JUMBO]; bus_dmamap_t map; struct bge_extrx_bd *r; struct mbuf *m; int error, nsegs; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { m_freem(m); return (ENOBUFS); } m->m_len = m->m_pkthdr.len = MJUM9BYTES; if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; sc->bge_cdata.bge_rx_jumbo_dmamap[i] = sc->bge_cdata.bge_rx_jumbo_sparemap; sc->bge_cdata.bge_rx_jumbo_sparemap = map; sc->bge_cdata.bge_rx_jumbo_chain[i] = m; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; /* * Fill in the extended RX buffer descriptor. */ r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_idx = i; r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; switch (nsegs) { case 4: r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); r->bge_len3 = segs[3].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; case 3: r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); r->bge_len2 = segs[2].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; case 2: r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); r->bge_len1 = segs[1].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; case 1: r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_len0 = segs[0].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; break; default: panic("%s: %d segments\n", __func__, nsegs); } bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } static int bge_init_rx_ring_std(struct bge_softc *sc) { int error, i; bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); sc->bge_std = 0; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if ((error = bge_newbuf_std(sc, i)) != 0) return (error); BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_std = 0; bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } static int bge_init_rx_ring_jumbo(struct bge_softc *sc) { struct bge_rcb *rcb; int error, i; bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); sc->bge_jumbo = 0; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if ((error = bge_newbuf_jumbo(sc, i)) != 0) return (error); BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_jumbo = 0; /* Enable the jumbo receive producer ring. */ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_jumbo(struct bge_softc *sc) { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_extrx_bd)); } } static void bge_free_tx_ring(struct bge_softc *sc) { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } } static int bge_init_tx_ring(struct bge_softc *sc) { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Initialize transmit producer index for host-memory send ring. */ sc->bge_tx_prodidx = 0; bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* NIC-memory send ring not used; initialize to zero. */ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return (0); } static void bge_setpromisc(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable promiscuous mode as needed. */ if (if_getflags(ifp) & IFF_PROMISC) BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); else BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } static u_int bge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t *hashes = arg; int h; h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); return (1); } static void bge_setmulti(struct bge_softc *sc) { if_t ifp; uint32_t hashes[4] = { 0, 0, 0, 0 }; int i; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); if_foreach_llmaddr(ifp, bge_hash_maddr, hashes); for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); } static void bge_setvlan(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable VLAN tag stripping as needed. */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); else BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); } static void bge_sig_pre_reset(struct bge_softc *sc, int type) { /* * Some chips don't like this so only do this if ASF is enabled */ if (sc->bge_asf_mode) bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; case BGE_RESET_SUSPEND: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_SUSPEND); break; } } if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) bge_ape_driver_state_change(sc, type); } static void bge_sig_post_reset(struct bge_softc *sc, int type) { if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START_DONE); /* START DONE */ break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD_DONE); break; } } if (type == BGE_RESET_SHUTDOWN) bge_ape_driver_state_change(sc, type); } static void bge_sig_legacy(struct bge_softc *sc, int type) { if (sc->bge_asf_mode) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; } } } static void bge_stop_fw(struct bge_softc *sc) { int i; if (sc->bge_asf_mode) { bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); for (i = 0; i < 100; i++ ) { if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & BGE_RX_CPU_DRV_EVENT)) break; DELAY(10); } } } static uint32_t bge_dma_swap_options(struct bge_softc *sc) { uint32_t dma_options; dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; #if BYTE_ORDER == BIG_ENDIAN dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; #endif return (dma_options); } /* * Do endian, PCI and DMA initialization. */ static int bge_chipinit(struct bge_softc *sc) { uint32_t dma_rw_ctl, misc_ctl, mode_ctl; uint16_t val; int i; /* Set endianness before we access any non-PCI registers. */ misc_ctl = BGE_INIT; if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { /* * Fix data corruption caused by non-qword write with WB. * Fix master abort in PCI mode. * Fix PCI latency timer. */ val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); val |= (1 << 10) | (1 << 12) | (1 << 13); pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); } if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || sc->bge_asicrev == BGE_ASICREV_BCM57766) { /* * For the 57766 and non Ax versions of 57765, bootcode * needs to setup the PCIE Fast Training Sequence (FTS) * value to prevent transmit hangs. */ if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | BGE_CPMU_PADRNG_CTL_RDIV2); } } /* * Set up the PCI DMA control register. */ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_mps >= 256) dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); else dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_flags & BGE_FLAG_PCIX) { if (BGE_IS_5714_FAMILY(sc)) { /* 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { /* * In the BCM5703, the DMA read watermark should * be set to less than or equal to the maximum * memory read byte count of the PCI-X command * register. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { /* 1536 bytes for read, 384 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else { /* 384 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t tmp; /* Set ONE_DMA_AT_ONCE for hardware workaround. */ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; if (tmp == 6 || tmp == 7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; /* Set PCI-X DMA write workaround. */ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; } } else { /* Conventional PCI bus: 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) dma_rw_ctl |= 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_asicrev == BGE_ASICREV_BCM5701) dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | BGE_PCIDMARWCTL_ASRT_ALL_BE; if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; if (BGE_IS_5717_PLUS(sc)) { dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; /* * Enable HW workaround for controllers that misinterpret * a status tag update and leave interrupts permanently * disabled. */ if (!BGE_IS_57765_PLUS(sc) && sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_asicrev != BGE_ASICREV_BCM5762) dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; } pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ mode_ctl = bge_dma_swap_options(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { /* Retain Host-2-BMC settings written by APE firmware. */ mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & (BGE_MODECTL_BYTESWAP_B2HRX_DATA | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); } mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; /* * BCM5701 B5 have a bug causing data corruption when using * 64-bit DMA reads, which can be terminated early and then * completed later as 32-bit accesses, in combination with * certain bridges. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_chipid == BGE_CHIPID_BCM5701_B5) mode_ctl |= BGE_MODECTL_FORCE_PCI32; /* * Tell the firmware the driver is running */ if (sc->bge_asf_mode & ASF_STACKUP) mode_ctl |= BGE_MODECTL_STACKUP; CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); /* Set the timer prescaler (always 66 MHz). */ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { DELAY(40); /* XXX */ /* Put PHY into ready state */ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ DELAY(40); } return (0); } static int bge_blockinit(struct bge_softc *sc) { struct bge_rcb *rcb; bus_size_t vrcb; caddr_t lladdr; bge_hostaddr taddr; uint32_t dmactl, rdmareg, val; int i, limit; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (!(BGE_IS_5705_PLUS(sc))) { /* Configure mbuf memory pool */ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); if (if_getmtu(sc->bge_ifp) > ETHERMTU) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); } } else if (!BGE_IS_5705_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; /* * Change the arbitration algorithm of TXMBUF read request to * round-robin instead of priority based for BCM5719. When * TXFIFO is almost empty, RDMA will hold its request until * TXFIFO is not almost empty. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_BMANMODE_NO_TX_UNDERRUN; CSR_WRITE_4(sc, BGE_BMAN_MODE, val); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "buffer manager failed to start\n"); return (ENXIO); } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "flow-through queue init failed\n"); return (ENXIO); } /* * Summary of rings supported by the controller: * * Standard Receive Producer Ring * - This ring is used to feed receive buffers for "standard" * sized frames (typically 1536 bytes) to the controller. * * Jumbo Receive Producer Ring * - This ring is used to feed receive buffers for jumbo sized * frames (i.e. anything bigger than the "standard" frames) * to the controller. * * Mini Receive Producer Ring * - This ring is used to feed receive buffers for "mini" * sized frames to the controller. * - This feature required external memory for the controller * but was never used in a production system. Should always * be disabled. * * Receive Return Ring * - After the controller has placed an incoming frame into a * receive buffer that buffer is moved into a receive return * ring. The driver is then responsible to passing the * buffer up to the stack. Many versions of the controller * support multiple RR rings. * * Send Ring * - This ring is used for outgoing frames. Many versions of * the controller support multiple send rings. */ /* Initialize the standard receive producer ring control block. */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (BGE_IS_5717_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) * Bits 15-2 : Maximum RX frame size * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); } else if (BGE_IS_5705_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); } else { /* * Ring size is always XXX entries * Bits 31-16: Maximum RX frame size * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); } if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; /* Write the standard receive producer ring control block. */ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the standard receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); /* * Initialize the jumbo RX producer ring control * block. We set the 'ring disabled' bit in the * flags field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (BGE_IS_JUMBO_CAPABLE(sc)) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; /* Get the jumbo receive producer ring RCB parameters. */ rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); /* Program the jumbo receive producer ring RCB parameters. */ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the jumbo receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); } /* Disable the mini receive producer ring RCB. */ if (BGE_IS_5700_FAMILY(sc)) { rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); /* Reset the mini receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); } /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || sc->bge_chipid == BGE_CHIPID_BCM5906_A2) CSR_WRITE_4(sc, BGE_ISO_PKT_TX, (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); } /* * The BD ring replenish thresholds control how often the * hardware fetches new BD's from the producer rings in host * memory. Setting the value too low on a busy system can * starve the hardware and reduce the throughput. * * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. * XXX The 5754 requires a lower threshold, so it might be a * requirement of all 575x family chips. The Linux driver sets * the lower threshold for all 5705 family chips as well, but there * are reports that it might not need to be so strict. * * XXX Linux does some extra fiddling here for the 5906 parts as * well. */ if (BGE_IS_5705_PLUS(sc)) val = 8; else val = BGE_STD_RX_RING_CNT / 8; CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); if (BGE_IS_JUMBO_CAPABLE(sc)) CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); } /* * Disable all send rings by setting the 'ring disabled' bit * in the flags field of all the TX send ring control blocks, * located in NIC memory. */ if (!BGE_IS_5705_PLUS(sc)) /* 5700 to 5704 had 16 send rings. */ limit = BGE_TX_RINGS_EXTSSRAM_MAX; else if (BGE_IS_57765_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5762) limit = 2; else if (BGE_IS_5717_PLUS(sc)) limit = 4; else limit = 1; vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); vrcb += sizeof(struct bge_rcb); } /* Configure send ring RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); else RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); /* * Disable all receive return rings by setting the * 'ring diabled' bit in the flags field of all the receive * return ring control blocks, located in NIC memory. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* Should be 17, use 16 until we get an SRAM map. */ limit = 16; } else if (!BGE_IS_5705_PLUS(sc)) limit = BGE_RX_RINGS_MAX; else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5762 || BGE_IS_57765_PLUS(sc)) limit = 4; else limit = 1; /* Disable all receive return rings. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_FLAG_RING_DISABLED); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); bge_writembx(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(uint64_t))), 0); vrcb += sizeof(struct bge_rcb); } /* * Set up receive return ring 0. Note that the NIC address * for RX return rings is 0x0. The return rings live entirely * within the host, so the nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); lladdr = if_getlladdr(sc->bge_ifp); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, (lladdr[0] + lladdr[1] + lladdr[2] + lladdr[3] + lladdr[4] + lladdr[5]) & BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ val = 0x2620; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Initialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "host coalescing engine failed to idle\n"); return (ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); /* Set up address of statistics block */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); /* Set up status block size. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { val = BGE_STATBLKSZ_FULL; bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); } else { val = BGE_STATBLKSZ_32BYTE; bzero(sc->bge_ldata.bge_status_block, 32); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats. */ val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB; if (sc->bge_flags & BGE_FLAG_TBI) val |= BGE_PORTMODE_TBI; else if (sc->bge_flags & BGE_FLAG_MII_SERDES) val |= BGE_PORTMODE_GMII; else val |= BGE_PORTMODE_MII; /* Allow APE to send/receive frames. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); /* Set misc. local control, enable interrupts on attentions */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; /* Enable host coalescing bug fix. */ if (BGE_IS_5755_PLUS(sc)) val |= BGE_WDMAMODE_STATUS_TAG_FIX; /* Request larger DMA burst size to get better performance. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5785) val |= BGE_WDMAMODE_BURST_ALL_DATA; /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); /* Turn on read DMA state machine */ val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; if (sc->bge_asicrev == BGE_ASICREV_BCM5717) val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; if (sc->bge_flags & BGE_FLAG_PCIE) val |= BGE_RDMAMODE_FIFO_LONG_BURST; if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { val |= BGE_RDMAMODE_TSO4_ENABLE; if (sc->bge_flags & BGE_FLAG_TSO3 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_TSO6_ENABLE; } if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_RDMA_MODE) & BGE_RDMAMODE_H2BNC_VLAN_DET; /* * Allow multiple outstanding read requests from * non-LSO read DMA engine. */ val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; } if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780 || BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5762) rdmareg = BGE_RDMA_RSRVCTRL_REG2; else rdmareg = BGE_RDMA_RSRVCTRL; dmactl = CSR_READ_4(sc, rdmareg); /* * Adjust tx margin to prevent TX data corruption and * fix internal FIFO overflow. */ if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | BGE_RDMA_RSRVCTRL_TXMRGN_320B; } /* * Enable fix for read DMA FIFO overruns. * The fix is to limit the number of RX BDs * the hardware would fetch at a fime. */ CSR_WRITE_4(sc, rdmareg, dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Allow 4KB burst length reads for non-LSO frames. * Enable 512B burst length reads for buffer descriptors. */ CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); if ((val & 0xFFFF) > BGE_FRAMELEN) break; if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) break; } if (i != BGE_NUM_RDMA_CHANNELS / 2) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_RDMA_TX_LENGTH_WA_5719; else val |= BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); } } /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ val = BGE_SDCMODE_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) val |= BGE_SDCMODE_CDELAY; CSR_WRITE_4(sc, BGE_SDC_MODE, val); /* Turn on send data initiator state machine */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | BGE_SDIMODE_HW_LSO_PRE_DMA); else CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* * Enable attention when the link has changed state for * devices that use auto polling. */ if (sc->bge_flags & BGE_FLAG_TBI) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* * Clear any pending link state attention. * Otherwise some link state change events may be lost until attention * is cleared by bge_intr() -> bge_link_upd() sequence. * It's not necessary on newer BCM chips - perhaps enabling link * state change attentions implies clearing pending attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return (0); } static const struct bge_revision * bge_lookup_rev(uint32_t chipid) { const struct bge_revision *br; for (br = bge_revisions; br->br_name != NULL; br++) { if (br->br_chipid == chipid) return (br); } for (br = bge_majorrevs; br->br_name != NULL; br++) { if (br->br_chipid == BGE_ASICREV(chipid)) return (br); } return (NULL); } static const struct bge_vendor * bge_lookup_vendor(uint16_t vid) { const struct bge_vendor *v; for (v = bge_vendors; v->v_name != NULL; v++) if (v->v_id == vid) return (v); return (NULL); } static uint32_t bge_chipid(device_t dev) { uint32_t id; id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> BGE_PCIMISCCTL_ASICREV_SHIFT; if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { /* * Find the ASCI revision. Different chips use different * registers. */ switch (pci_get_device(dev)) { case BCOM_DEVICEID_BCM5717C: /* 5717 C0 seems to belong to 5720 line. */ id = BGE_CHIPID_BCM5720_A0; break; case BCOM_DEVICEID_BCM5717: case BCOM_DEVICEID_BCM5718: case BCOM_DEVICEID_BCM5719: case BCOM_DEVICEID_BCM5720: case BCOM_DEVICEID_BCM5725: case BCOM_DEVICEID_BCM5727: case BCOM_DEVICEID_BCM5762: case BCOM_DEVICEID_BCM57764: case BCOM_DEVICEID_BCM57767: case BCOM_DEVICEID_BCM57787: id = pci_read_config(dev, BGE_PCI_GEN2_PRODID_ASICREV, 4); break; case BCOM_DEVICEID_BCM57761: case BCOM_DEVICEID_BCM57762: case BCOM_DEVICEID_BCM57765: case BCOM_DEVICEID_BCM57766: case BCOM_DEVICEID_BCM57781: case BCOM_DEVICEID_BCM57782: case BCOM_DEVICEID_BCM57785: case BCOM_DEVICEID_BCM57786: case BCOM_DEVICEID_BCM57791: case BCOM_DEVICEID_BCM57795: id = pci_read_config(dev, BGE_PCI_GEN15_PRODID_ASICREV, 4); break; default: id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); } } return (id); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. * * Note that since the Broadcom controller contains VPD support, we * try to get the device name string from the controller itself instead * of the compiled-in string. It guarantees we'll always announce the * right product name. We fall back to the compiled-in string when * VPD is unavailable or corrupt. */ static int bge_probe(device_t dev) { char model[64]; const struct bge_revision *br; const char *pname; struct bge_softc *sc; const struct bge_type *t = bge_devs; const struct bge_vendor *v; uint32_t id; uint16_t did, vid; sc = device_get_softc(dev); sc->bge_dev = dev; vid = pci_get_vendor(dev); did = pci_get_device(dev); while(t->bge_vid != 0) { if ((vid == t->bge_vid) && (did == t->bge_did)) { id = bge_chipid(dev); br = bge_lookup_rev(id); if (bge_has_eaddr(sc) && pci_get_vpd_ident(dev, &pname) == 0) snprintf(model, sizeof(model), "%s", pname); else { v = bge_lookup_vendor(vid); snprintf(model, sizeof(model), "%s %s", v != NULL ? v->v_name : "Unknown", br != NULL ? br->br_name : "NetXtreme/NetLink Ethernet Controller"); } device_set_descf(dev, "%s, %sASIC rev. %#08x", model, br != NULL ? "" : "unknown ", id); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bge_dma_free(struct bge_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } if (sc->bge_cdata.bge_rx_std_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap); /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } if (sc->bge_cdata.bge_rx_jumbo_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap); /* Destroy DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_rx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); if (sc->bge_cdata.bge_mtag_jumbo) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); if (sc->bge_cdata.bge_tx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); /* Destroy standard RX ring. */ if (sc->bge_ldata.bge_rx_std_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring. */ if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring. */ if (sc->bge_ldata.bge_rx_return_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring. */ if (sc->bge_ldata.bge_tx_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block. */ if (sc->bge_ldata.bge_status_block_paddr) bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block. */ if (sc->bge_ldata.bge_stats_paddr) bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); if (sc->bge_cdata.bge_buffer_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); /* Destroy the parent tag. */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); } static int bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) { struct bge_dmamap_arg ctx; bus_addr_t lowaddr; bus_size_t ring_end; int error; lowaddr = BUS_SPACE_MAXADDR; again: error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); if (error != 0) { device_printf(sc->bge_dev, "could not create %s dma tag\n", msg); return (ENOMEM); } /* Allocate DMA'able memory for ring. */ error = bus_dmamem_alloc(*tag, (void **)ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); if (error != 0) { device_printf(sc->bge_dev, "could not allocate DMA'able memory for %s\n", msg); return (ENOMEM); } /* Load the address of the ring. */ ctx.bge_busaddr = 0; error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->bge_dev, "could not load DMA'able memory for %s\n", msg); return (ENOMEM); } *paddr = ctx.bge_busaddr; ring_end = *paddr + maxsize; if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 && BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) { /* * 4GB boundary crossed. Limit maximum allowable DMA * address space to 32bit and try again. */ bus_dmamap_unload(*tag, *map); bus_dmamem_free(*tag, *ring, *map); bus_dma_tag_destroy(*tag); if (bootverbose) device_printf(sc->bge_dev, "4GB boundary crossed, " "limit DMA address space to 32bit for %s\n", msg); *ring = NULL; *tag = NULL; *map = NULL; lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } return (0); } static int bge_dma_alloc(struct bge_softc *sc) { bus_addr_t lowaddr; bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz; int i, error; lowaddr = BUS_SPACE_MAXADDR; if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) lowaddr = BGE_DMA_MAXADDR; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } /* Create tag for standard RX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, &sc->bge_cdata.bge_rx_std_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, &sc->bge_cdata.bge_rx_std_ring_map, &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); if (error) return (error); /* Create tag for RX return ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), &sc->bge_cdata.bge_rx_return_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, &sc->bge_cdata.bge_rx_return_ring_map, &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); if (error) return (error); /* Create tag for TX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, &sc->bge_cdata.bge_tx_ring_tag, (uint8_t **)&sc->bge_ldata.bge_tx_ring, &sc->bge_cdata.bge_tx_ring_map, &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); if (error) return (error); /* * Create tag for status block. * Because we only use single Tx/Rx/Rx return ring, use * minimum status block size except BCM5700 AX/BX which * seems to want to see full status block size regardless * of configured number of ring. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, &sc->bge_cdata.bge_status_tag, (uint8_t **)&sc->bge_ldata.bge_status_block, &sc->bge_cdata.bge_status_map, &sc->bge_ldata.bge_status_block_paddr, "status block"); if (error) return (error); /* Create tag for statistics block. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, &sc->bge_cdata.bge_stats_tag, (uint8_t **)&sc->bge_ldata.bge_stats, &sc->bge_cdata.bge_stats_map, &sc->bge_ldata.bge_stats_paddr, "statistics block"); if (error) return (error); /* Create tag for jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, &sc->bge_cdata.bge_rx_jumbo_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, &sc->bge_cdata.bge_rx_jumbo_ring_map, &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); if (error) return (error); } /* Create parent tag for buffers. */ boundary = 0; if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { boundary = BGE_DMA_BNDRY; /* * XXX * watchdog timeout issue was observed on BCM5704 which * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). * Both limiting DMA address space to 32bits and flushing * mailbox write seem to address the issue. */ if (sc->bge_pcixcap != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; } error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate buffer dma tag\n"); return (ENOMEM); } /* Create tag for Tx mbufs. */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { txsegsz = BGE_TSOSEG_SZ; txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); } else { txsegsz = MCLBYTES; txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; } error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_tx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); return (ENOMEM); } /* Create tag for Rx mbufs. */ if (sc->bge_flags & BGE_FLAG_JUMBO_STD) rxmaxsegsz = MJUM9BYTES; else rxmaxsegsz = MCLBYTES; error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1, rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for RX\n"); return (ENOMEM); } for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* Create tags for jumbo RX buffers. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo dma tag\n"); return (ENOMEM); } /* Create DMA maps for jumbo RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for jumbo RX\n"); return (ENOMEM); } for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for jumbo RX\n"); return (ENOMEM); } } } return (0); } /* * Return true if this device has more than one port. */ static int bge_has_multiple_ports(struct bge_softc *sc) { device_t dev = sc->bge_dev; u_int b, d, f, fscan, s; d = pci_get_domain(dev); b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) return (1); return (0); } /* * Return true if MSI can be used with this device. */ static int bge_can_use_msi(struct bge_softc *sc) { int can_use_msi = 0; if (sc->bge_msi == 0) return (0); /* Disable MSI for polling(4). */ #ifdef DEVICE_POLLING return (0); #endif switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5714: /* * Apparently, MSI doesn't work when these chips are * configured in single-port mode. */ if (bge_has_multiple_ports(sc)) can_use_msi = 1; break; case BGE_ASICREV_BCM5750: if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && sc->bge_chiprev != BGE_CHIPREV_5750_BX) can_use_msi = 1; break; case BGE_ASICREV_BCM5784: /* * Prevent infinite "watchdog timeout" errors * in some MacBook Pro and make it work out-of-the-box. */ if (sc->bge_chiprev == BGE_CHIPREV_5784_AX) break; /* FALLTHROUGH */ default: if (BGE_IS_575X_PLUS(sc)) can_use_msi = 1; } return (can_use_msi); } static int bge_mbox_reorder(struct bge_softc *sc) { /* Lists of PCI bridges that are known to reorder mailbox writes. */ static const struct mbox_reorder { const uint16_t vendor; const uint16_t device; const char *desc; } mbox_reorder_lists[] = { { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, }; devclass_t pci, pcib; device_t bus, dev; int i; pci = devclass_find("pci"); pcib = devclass_find("pcib"); dev = sc->bge_dev; bus = device_get_parent(dev); for (;;) { dev = device_get_parent(bus); bus = device_get_parent(dev); if (device_get_devclass(dev) != pcib) break; if (device_get_devclass(bus) != pci) break; for (i = 0; i < nitems(mbox_reorder_lists); i++) { if (pci_get_vendor(dev) == mbox_reorder_lists[i].vendor && pci_get_device(dev) == mbox_reorder_lists[i].device) { device_printf(sc->bge_dev, "enabling MBOX workaround for %s\n", mbox_reorder_lists[i].desc); return (1); } } } return (0); } static void bge_devinfo(struct bge_softc *sc) { uint32_t cfg, clk; device_printf(sc->bge_dev, "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ", sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); if (sc->bge_flags & BGE_FLAG_PCIE) printf("PCI-E\n"); else if (sc->bge_flags & BGE_FLAG_PCIX) { printf("PCI-X "); cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE) clk = 133; else { clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; switch (clk) { case 0: clk = 33; break; case 2: clk = 50; break; case 4: clk = 66; break; case 6: clk = 100; break; case 7: clk = 133; break; } } printf("%u MHz\n", clk); } else { if (sc->bge_pcixcap != 0) printf("PCI on PCI-X "); else printf("PCI "); cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); if (cfg & BGE_PCISTATE_PCI_BUSSPEED) clk = 66; else clk = 33; if (cfg & BGE_PCISTATE_32BIT_BUS) printf("%u MHz; 32bit\n", clk); else printf("%u MHz; 64bit\n", clk); } } static int bge_attach(device_t dev) { if_t ifp; struct bge_softc *sc; uint32_t hwcfg = 0, misccfg, pcistate; u_char eaddr[ETHER_ADDR_LEN]; int capmask, error, reg, rid, trys; sc = device_get_softc(dev); sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); /* * Allocate control/status registers. */ rid = PCIR_BAR(0); sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res == NULL) { device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); error = ENXIO; goto fail; } /* Save various chip information. */ sc->bge_func_addr = pci_get_function(dev); sc->bge_chipid = bge_chipid(dev); sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* Set default PHY address. */ sc->bge_phy_addr = 1; /* * PHY address mapping for various devices. * * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | 1 | X | X | X | * BCM5704 | 1 | X | 1 | X | * BCM5717 | 1 | 8 | 2 | 9 | * BCM5719 | 1 | 8 | 2 | 9 | * BCM5720 | 1 | 8 | 2 | 9 | * * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | X | X | X | X | * BCM5704 | X | X | X | X | * BCM5717 | X | X | X | X | * BCM5719 | 3 | 10 | 4 | 11 | * BCM5720 | X | X | X | X | * * Other addresses may respond but they are not * IEEE compliant PHYs and should be ignored. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { if (CSR_READ_4(sc, BGE_SGDIG_STS) & BGE_SGDIGSTS_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } else { if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & BGE_CPMU_PHY_STRAP_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } } if (bge_has_eaddr(sc)) sc->bge_flags |= BGE_FLAG_EADDR; /* Save chipset family. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5762: case BGE_ASICREV_BCM57765: case BGE_ASICREV_BCM57766: sc->bge_flags |= BGE_FLAG_57765_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO | BGE_FLAG_JUMBO_FRAME; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Enable work around for DMA engine miscalculation * of TXMBUF available space. */ sc->bge_flags |= BGE_FLAG_RDMA_BUG; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* Jumbo frame on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_JUMBO; } } break; case BGE_ASICREV_BCM5755: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5784: case BGE_ASICREV_BCM5785: case BGE_ASICREV_BCM5787: case BGE_ASICREV_BCM57780: sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS; break; case BGE_ASICREV_BCM5700: case BGE_ASICREV_BCM5701: case BGE_ASICREV_BCM5703: case BGE_ASICREV_BCM5704: sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; break; case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5780: case BGE_ASICREV_BCM5714: sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; /* FALLTHROUGH */ case BGE_ASICREV_BCM5750: case BGE_ASICREV_BCM5752: case BGE_ASICREV_BCM5906: sc->bge_flags |= BGE_FLAG_575X_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5705: sc->bge_flags |= BGE_FLAG_5705_PLUS; break; } /* Identify chips with APE processor. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5762: sc->bge_flags |= BGE_FLAG_APE; break; } /* Chips with APE need BAR2 access for APE registers/memory. */ if ((sc->bge_flags & BGE_FLAG_APE) != 0) { rid = PCIR_BAR(2); sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res2 == NULL) { device_printf (sc->bge_dev, "couldn't map BAR2 memory\n"); error = ENXIO; goto fail; } /* Enable APE register/memory access by host driver. */ pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); bge_ape_lock_init(sc); bge_ape_read_fw_ver(sc); } /* Add SYSCTLs, requires the chipset family to be set. */ bge_add_sysctls(sc); /* Identify the chips that use an CPMU. */ if (BGE_IS_5717_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; else sc->bge_mi_mode = BGE_MIMODE_BASE; /* Enable auto polling for BCM570[0-5]. */ if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; /* * All Broadcom controllers have 4GB boundary DMA bug. * Whenever an address crosses a multiple of the 4GB boundary * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA * state machine will lockup and cause the device to hang. */ sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; /* BCM5755 or higher and BCM5906 have short DMA bug. */ if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; /* * BCM5719 cannot handle DMA requests for DMA segments that * have larger than 4KB in size. However the maximum DMA * segment size created in DMA tag is 4KB for TSO, so we * wouldn't encounter the issue here. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || misccfg == BGE_MISCCFG_BOARD_ID_5788M) sc->bge_flags |= BGE_FLAG_5788; } capmask = BMSR_DEFCAPMASK; if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && (misccfg == 0x4000 || misccfg == 0x8000)) || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 || pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 || pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) || (pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F || pci_get_device(dev) == BCOM_DEVICEID_BCM5753F || pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) || pci_get_device(dev) == BCOM_DEVICEID_BCM57790 || pci_get_device(dev) == BCOM_DEVICEID_BCM57791 || pci_get_device(dev) == BCOM_DEVICEID_BCM57795 || sc->bge_asicrev == BGE_ASICREV_BCM5906) { /* These chips are 10/100 only. */ capmask &= ~BMSR_EXTSTAT; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } /* * Some controllers seem to require a special firmware to use * TSO. But the firmware is not available to FreeBSD and Linux * claims that the TSO performed by the firmware is slower than * hardware based TSO. Moreover the firmware based TSO has one * known bug which can't handle TSO if Ethernet header + IP/TCP * header is greater than 80 bytes. A workaround for the TSO * bug exist but it seems it's too expensive than not using * TSO at all. Some hardware also have the TSO bug so limit * the TSO to the controllers that are not affected TSO issues * (e.g. 5755 or higher). */ if (BGE_IS_5717_PLUS(sc)) { /* BCM5717 requires different TSO configuration. */ sc->bge_flags |= BGE_FLAG_TSO3; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* TSO on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_TSO3; } } else if (BGE_IS_5755_PLUS(sc)) { /* * BCM5754 and BCM5787 shares the same ASIC id so * explicit device id check is required. * Due to unknown reason TSO does not work on BCM5755M. */ if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) sc->bge_flags |= BGE_FLAG_TSO; } /* * Check if this is a PCI-X or PCI Express device. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { /* * Found a PCI Express capabilities register, this * must be a PCI Express device. */ sc->bge_flags |= BGE_FLAG_PCIE; sc->bge_expcap = reg; /* Extract supported maximum payload size. */ sc->bge_mps = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CAP, 2); sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) sc->bge_expmrq = 2048; else sc->bge_expmrq = 4096; pci_set_max_read_req(dev, sc->bge_expmrq); } else { /* * Check if the device is in PCI-X Mode. * (This bit is not valid on PCI Express controllers.) */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) sc->bge_pcixcap = reg; if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) == 0) sc->bge_flags |= BGE_FLAG_PCIX; } /* * The 40bit DMA bug applies to the 5714/5715 controllers and is * not actually a MAC controller bug but an issue with the embedded * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. */ if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) sc->bge_flags |= BGE_FLAG_40BIT_BUG; /* * Some PCI-X bridges are known to trigger write reordering to * the mailbox registers. Typical phenomena is watchdog timeouts * caused by out-of-order TX completions. Enable workaround for * PCI-X devices that live behind these bridges. * Note, PCI-X controllers can run in PCI mode so we can't use * BGE_FLAG_PCIX flag to detect PCI-X controllers. */ if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) sc->bge_flags |= BGE_FLAG_MBOX_REORDER; /* * Allocate the interrupt, using MSI if possible. These devices * support 8 MSI messages, but only the first one is used in * normal operation. */ rid = 0; if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { sc->bge_msicap = reg; reg = 1; if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) { rid = 1; sc->bge_flags |= BGE_FLAG_MSI; } } /* * All controllers except BCM5700 supports tagged status but * we use tagged status only for MSI case on BCM5717. Otherwise * MSI on BCM5717 does not work. */ #ifndef DEVICE_POLLING if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; #endif sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->bge_irq == NULL) { device_printf(sc->bge_dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } bge_devinfo(sc); sc->bge_asf_mode = 0; /* No ASF if APE present. */ if ((sc->bge_flags & BGE_FLAG_APE) == 0) { if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)) { if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & BGE_HWCFG_ASF) { sc->bge_asf_mode |= ASF_ENABLE; sc->bge_asf_mode |= ASF_STACKUP; if (BGE_IS_575X_PLUS(sc)) sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; } } } bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); if (bge_chipinit(sc)) { device_printf(sc->bge_dev, "chip initialization failed\n"); error = ENXIO; goto fail; } error = bge_get_eaddr(sc, eaddr); if (error) { device_printf(sc->bge_dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (BGE_IS_5717_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; else if (BGE_IS_5705_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(sc)) { device_printf(sc->bge_dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 10; sc->bge_tx_max_coal_bds = 10; /* Initialize checksum features to use. */ sc->bge_csum_features = BGE_CSUM_FEATURES; if (sc->bge_forced_udpcsum != 0) sc->bge_csum_features |= CSUM_UDP; /* Set up ifnet structure */ ifp = sc->bge_ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, bge_ioctl); if_setstartfn(ifp, bge_start); if_setinitfn(ifp, bge_init); if_setgetcounterfn(ifp, bge_get_counter); if_setsendqlen(ifp, BGE_TX_RING_CNT - 1); if_setsendqready(ifp); if_sethwassist(ifp, sc->bge_csum_features); if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU); if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { if_sethwassistbits(ifp, CSUM_TSO, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0); } #ifdef IFCAP_VLAN_HWCSUM if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); #endif if_setcapenable(ifp, if_getcapabilities(ifp)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif /* * 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM); if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); if_sethwassist(ifp, 0); } /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); else if ((sc->bge_flags & BGE_FLAG_EADDR) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg))) { device_printf(sc->bge_dev, "failed to read EEPROM\n"); error = ENXIO; goto fail; } hwcfg = ntohl(hwcfg); } /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { if (BGE_IS_5705_PLUS(sc)) { sc->bge_flags |= BGE_FLAG_MII_SERDES; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } else sc->bge_flags |= BGE_FLAG_TBI; } /* Set various PHY bug flags. */ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || sc->bge_chipid == BGE_CHIPID_BCM5701_B0) sc->bge_phy_flags |= BGE_PHY_CRC_BUG; if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || sc->bge_chiprev == BGE_CHIPREV_5704_AX) sc->bge_phy_flags |= BGE_PHY_ADC_BUG; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_phy_flags |= BGE_PHY_NO_3LED; if ((BGE_IS_5705_PLUS(sc)) && sc->bge_asicrev != BGE_ASICREV_BCM5906 && sc->bge_asicrev != BGE_ASICREV_BCM5785 && sc->bge_asicrev != BGE_ASICREV_BCM57780 && !BGE_IS_5717_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && pci_get_device(dev) != BCOM_DEVICEID_BCM5756) sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; } else sc->bge_phy_flags |= BGE_PHY_BER_BUG; } /* * Don't enable Ethernet@WireSpeed for the 5700 or the * 5705 A0 and A1 chips. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup and tell the firmware the * driver is down so we can try to get access the * probe if ASF is running. Retry a couple of times * if we get a conflict with the ASF firmware accessing * the PHY. */ trys = 0; BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); again: bge_asf_driver_up(sc); error = mii_attach(dev, &sc->bge_miibus, ifp, (ifm_change_cb_t)bge_ifmedia_upd, (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { if (trys++ < 4) { device_printf(sc->bge_dev, "Try again\n"); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, MII_BMCR, BMCR_RESET); goto again; } device_printf(sc->bge_dev, "attaching PHYs failed\n"); goto fail; } /* * Now tell the firmware we are going up after probing the PHY */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_flags & BGE_FLAG_PCIX) sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Tell upper layer we support long frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* * Hookup IRQ last. */ if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { /* Take advantage of single-shot MSI. */ CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & ~BGE_MSIMODE_ONE_SHOT_DISABLE); sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->bge_tq); error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->bge_dev)); if (error != 0) { device_printf(dev, "could not start threads.\n"); ether_ifdetach(ifp); goto fail; } error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, &sc->bge_intrhand); } else error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, &sc->bge_intrhand); if (error) { ether_ifdetach(ifp); device_printf(sc->bge_dev, "couldn't set up irq\n"); goto fail; } /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, bge); fail: if (error) bge_detach(dev); return (error); } static int bge_detach(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { ether_ifdetach(ifp); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); callout_drain(&sc->bge_stat_ch); } if (sc->bge_tq) taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); if (sc->bge_flags & BGE_FLAG_TBI) ifmedia_removeall(&sc->bge_ifmedia); else if (sc->bge_miibus != NULL) { bus_generic_detach(dev); - device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); return (0); } static void bge_release_resources(struct bge_softc *sc) { device_t dev; dev = sc->bge_dev; if (sc->bge_tq != NULL) taskqueue_free(sc->bge_tq); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bge_irq), sc->bge_irq); pci_release_msi(dev); } if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res), sc->bge_res); if (sc->bge_res2 != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res2), sc->bge_res2); if (sc->bge_ifp != NULL) if_free(sc->bge_ifp); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); } static int bge_reset(struct bge_softc *sc) { device_t dev; uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val; void (*write_op)(struct bge_softc *, int, int); uint16_t devctl; int i; dev = sc->bge_dev; mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (sc->bge_flags & BGE_FLAG_PCIE) write_op = bge_writemem_direct; else write_op = bge_writemem_ind; } else write_op = bge_writereg_ind; if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && sc->bge_asicrev != BGE_ASICREV_BCM5701) { CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) { if (bootverbose) device_printf(dev, "NVRAM lock timedout!\n"); } } /* Take APE lock when performing reset. */ bge_ape_lock(sc, BGE_APE_LOCK_GRC); /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Disable fastboot on controllers that support it. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || BGE_IS_5755_PLUS(sc)) { if (bootverbose) device_printf(dev, "Disabling fastboot\n"); CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); } /* * Write the magic number to SRAM at offset 0xB50. * When firmware finishes its initialization it will * write ~BGE_SRAM_FW_MB_MAGIC to the same location. */ bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7E2C, 0x20); } if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); reset |= 1 << 29; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); CSR_WRITE_4(sc, BGE_VCPU_STATUS, val | BGE_VCPU_STATUS_DRV_RESET); val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); } /* * Set GPHY Power Down Override to leave GPHY * powered up in D0 uninitialized. */ if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); if (sc->bge_flags & BGE_FLAG_PCIE) DELAY(100 * 1000); else DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { DELAY(500000); /* wait for link training to complete */ val = pci_read_config(dev, 0xC4, 4); pci_write_config(dev, 0xC4, val | (1 << 15), 4); } devctl = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, 2); /* Clear enable no snoop and disable relaxed ordering. */ devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE); pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, devctl, 2); pci_set_max_read_req(dev, sc->bge_expmrq); /* Clear error status. */ pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ, 2); } /* Reset some of the PCI state that got zapped by reset. */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && (sc->bge_flags & BGE_FLAG_PCIX) != 0) val |= BGE_PCISTATE_RETRY_SAME_DMA; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); /* * Disable PCI-X relaxed ordering to ensure status block update * comes first then packet buffer DMA. Otherwise driver may * read stale status block. */ if (sc->bge_flags & BGE_FLAG_PCIX) { devctl = pci_read_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2); devctl &= ~PCIXM_COMMAND_ERO; if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { devctl &= ~PCIXM_COMMAND_MAX_READ; devctl |= PCIXM_COMMAND_MAX_READ_2048; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | PCIXM_COMMAND_MAX_READ); devctl |= PCIXM_COMMAND_MAX_READ_2048; } pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, devctl, 2); } /* Re-enable MSI, if necessary, and enable the memory arbiter. */ if (BGE_IS_5714_FAMILY(sc)) { /* This chip disables MSI on reset. */ if (sc->bge_flags & BGE_FLAG_MSI) { val = pci_read_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, 2); pci_write_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, val | PCIM_MSICTRL_MSI_ENABLE, 2); val = CSR_READ_4(sc, BGE_MSI_MODE); CSR_WRITE_4(sc, BGE_MSI_MODE, val | BGE_MSIMODE_ENABLE); } val = CSR_READ_4(sc, BGE_MARB_MODE); CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); } else CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* Fix up byte swapping. */ CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); val = CSR_READ_4(sc, BGE_MAC_MODE); val = (val & ~mac_mode_mask) | mac_mode; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); bge_ape_unlock(sc, BGE_APE_LOCK_GRC); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); if (val & BGE_VCPU_STATUS_INIT_DONE) break; DELAY(100); } if (i == BGE_TIMEOUT) { device_printf(dev, "reset timed out\n"); return (1); } } else { /* * Poll until we see the 1's complement of the magic number. * This indicates that the firmware initialization is complete. * We expect this to fail if no chip containing the Ethernet * address is fitted though. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); if (val == ~BGE_SRAM_FW_MB_MAGIC) break; } if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) device_printf(dev, "firmware handshake timed out, found 0x%08x\n", val); /* BCM57765 A0 needs additional time before accessing. */ if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) DELAY(10 * 1000); /* XXX */ } /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_flags & BGE_FLAG_TBI) { val = CSR_READ_4(sc, BGE_SERDES_CFG); val = (val & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, val); } /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE && !BGE_IS_5717_PLUS(sc) && sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && sc->bge_asicrev != BGE_ASICREV_BCM5785) { /* Enable Data FIFO protection. */ val = CSR_READ_4(sc, 0x7C00); CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); } if (sc->bge_asicrev == BGE_ASICREV_BCM5720) BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); return (0); } static __inline void bge_rxreuse_std(struct bge_softc *sc, int i) { struct bge_rx_bd *r; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; r->bge_idx = i; BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } static __inline void bge_rxreuse_jumbo(struct bge_softc *sc, int i) { struct bge_extrx_bd *r; r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; r->bge_idx = i; BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo receive ring * 2) the frame is from the standard receive ring */ static int bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) { if_t ifp; int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; uint16_t rx_cons; rx_cons = sc->bge_rx_saved_considx; /* Nothing to do. */ if (rx_cons == rx_prod) return (rx_npkts); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); while (rx_cons != rx_prod) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; rxidx = cur_rx->bge_idx; BGE_INC(rx_cons, sc->bge_return_ring_cnt); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { jumbocnt++; m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_jumbo(sc, rxidx); continue; } if (bge_newbuf_jumbo(sc, rxidx) != 0) { bge_rxreuse_jumbo(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } else { stdcnt++; m = sc->bge_cdata.bge_rx_std_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_std(sc, rxidx); continue; } if (bge_newbuf_std(sc, rxidx) != 0) { bge_rxreuse_std(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifndef __NO_STRICT_ALIGNMENT /* * For architectures with strict alignment we must make sure * the payload is aligned. */ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; if (if_getcapenable(ifp) & IFCAP_RXCSUM) bge_rxcsum(sc, cur_rx, m); /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) { m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; } if (holdlck != 0) { BGE_UNLOCK(sc); if_input(ifp, m); BGE_LOCK(sc); } else if_input(ifp, m); rx_npkts++; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); if (stdcnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); if (jumbocnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_rx_saved_considx = rx_cons; bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); #ifdef notyet /* * This register wraps very quickly under heavy packet drops. * If you need correct statistics, you can enable this check. */ if (BGE_IS_5705_PLUS(sc)) if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); #endif return (rx_npkts); } static void bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) { if (BGE_IS_5717_PLUS(sc)) { if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } else { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && m->m_pkthdr.len >= ETHER_MIN_NOPAD) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } static void bge_txeof(struct bge_softc *sc, uint16_t tx_cons) { struct bge_tx_bd *cur_tx; if_t ifp; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_tx_saved_considx == tx_cons) return; ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != tx_cons) { uint32_t idx; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); } if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (sc->bge_txcnt == 0) sc->bge_timer = 0; } #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count) { struct bge_softc *sc = if_getsoftc(ifp); uint16_t rx_prod, tx_cons; uint32_t statusword; int rx_npkts = 0; BGE_LOCK(sc); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Fetch updates from the status block. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; statusword = sc->bge_ldata.bge_status_block->bge_status; /* Clear the status so the next pass only sees the changes. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) sc->bge_link_evt++; if (cmd == POLL_AND_CHECK_STATUS) if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) bge_link_upd(sc); sc->rxcycles = count; rx_npkts = bge_rxeof(sc, rx_prod, 1); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static int bge_msi_intr(void *arg) { struct bge_softc *sc; sc = (struct bge_softc *)arg; /* * This interrupt is not shared and controller already * disabled further interrupt. */ taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); return (FILTER_HANDLED); } static void bge_intr_task(void *arg, int pending) { struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; uint16_t rx_prod, tx_cons; sc = (struct bge_softc *)arg; ifp = sc->bge_ifp; BGE_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { BGE_UNLOCK(sc); return; } /* Get updated status block. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Save producer/consumer indices. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; status = sc->bge_ldata.bge_status_block->bge_status; status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; /* Dirty the status flag. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) status_tag = 0; if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) bge_link_upd(sc); /* Let controller work. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); bge_rxeof(sc, rx_prod, 0); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); } BGE_UNLOCK(sc); } static void bge_intr(void *xsc) { struct bge_softc *sc; if_t ifp; uint32_t statusword; uint16_t rx_prod, tx_cons; sc = xsc; BGE_LOCK(sc); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_UNLOCK(sc); return; } #endif /* * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't * disable interrupts by writing nonzero like we used to, since with * our current organization this just gives complications and * pessimizations for re-enabling interrupts. We used to have races * instead of the necessary complications. Disabling interrupts * would just reduce the chance of a status update while we are * running (by switching to the interrupt-mode coalescence * parameters), but this chance is already very low so it is more * efficient to get another interrupt than prevent it. * * We do the ack first to ensure another interrupt if there is a * status update after the ack. We don't check for the status * changing later because it is more efficient to get another * interrupt than prevent it, not quite as above (not checking is * a smaller optimization than not toggling the interrupt enable, * since checking doesn't involve PCI accesses and toggling require * the status check). So toggling would probably be a pessimization * even with MSI. It would only be needed for using a task queue. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); /* * Do the mandatory PCI flush as well as get the link status. */ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; /* Make sure the descriptor ring indexes are coherent. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || statusword || sc->bge_link_evt) bge_link_upd(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check RX return ring producer/consumer. */ bge_rxeof(sc, rx_prod, 1); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_asf_driver_up(struct bge_softc *sc) { if (sc->bge_asf_mode & ASF_STACKUP) { /* Send ASF heartbeat aprox. every 2s */ if (sc->bge_asf_count) sc->bge_asf_count --; else { sc->bge_asf_count = 2; bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_DRV_ALIVE); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, BGE_FW_HB_TIMEOUT_SEC); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); } } } static void bge_tick(void *xsc) { struct bge_softc *sc = xsc; struct mii_data *mii = NULL; BGE_LOCK_ASSERT(sc); /* Synchronize with possible callout reset/stop. */ if (callout_pending(&sc->bge_stat_ch) || !callout_active(&sc->bge_stat_ch)) return; if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); else bge_stats_update(sc); /* XXX Add APE heartbeat check here? */ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { mii = device_get_softc(sc->bge_miibus); /* * Do not touch PHY if we have link up. This could break * IPMI/ASF mode or produce extra input errors * (extra errors was reported for bcm5701 & bcm5704). */ if (!sc->bge_link) mii_tick(mii); } else { /* * Since in TBI mode auto-polling can't be used we should poll * link status manually. Here we register pending link event * and trigger interrupt. */ #ifdef DEVICE_POLLING /* In polling mode we poll link state in bge_poll(). */ if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) #endif { sc->bge_link_evt++; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); } } bge_asf_driver_up(sc); bge_watchdog(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_stats_update_regs(struct bge_softc *sc) { struct bge_mac_stats *stats; uint32_t val; stats = &sc->bge_mac_stats; stats->ifHCOutOctets += CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); stats->etherStatsCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); stats->outXonSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); stats->outXoffSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); stats->dot3StatsInternalMacTransmitErrors += CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); stats->dot3StatsSingleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); stats->dot3StatsMultipleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); stats->dot3StatsDeferredTransmissions += CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); stats->dot3StatsExcessiveCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); stats->dot3StatsLateCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); stats->ifHCOutUcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); stats->ifHCOutMulticastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); stats->ifHCOutBroadcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); stats->ifHCInOctets += CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); stats->etherStatsFragments += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); stats->ifHCInUcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); stats->ifHCInMulticastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); stats->ifHCInBroadcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); stats->dot3StatsFCSErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); stats->dot3StatsAlignmentErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); stats->xonPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); stats->xoffPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); stats->macControlFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); stats->xoffStateEntered += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); stats->dot3StatsFramesTooLong += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); stats->etherStatsJabbers += CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); stats->etherStatsUndersizePkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); stats->FramesDroppedDueToFilters += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); stats->DmaWriteQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); stats->DmaWriteHighPriQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); stats->NoMoreRxBDs += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); /* * XXX * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0 * includes number of unwanted multicast frames. This comes * from silicon bug and known workaround to get rough(not * exact) counter is to enable interrupt on MBUF low water * attention. This can be accomplished by setting * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE, * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. * However that change would generate more interrupts and * there are still possibilities of losing multiple frames * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. * Given that the workaround still would not get correct * counter I don't think it's worth to implement it. So * ignore reading the counter on controllers that have the * silicon bug. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && sc->bge_chipid != BGE_CHIPID_BCM5720_A0) stats->InputDiscards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); stats->InputErrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); stats->RecvThresholdHit += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { /* * If controller transmitted more than BGE_NUM_RDMA_CHANNELS * frames, it's safe to disable workaround for DMA engine's * miscalculation of TXMBUF space. */ if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val &= ~BGE_RDMA_TX_LENGTH_WA_5719; else val &= ~BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; } } } static void bge_stats_clear_regs(struct bge_softc *sc) { CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); } static void bge_stats_update(struct bge_softc *sc) { if_t ifp; bus_size_t stats; uint32_t cnt; /* current register value */ ifp = sc->bge_ifp; stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; #define READ_STAT(sc, stats, stat) \ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); sc->bge_tx_collisions = cnt; cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); sc->bge_rx_nobds = cnt; cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); sc->bge_rx_inerrs = cnt; cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); sc->bge_rx_discards = cnt; cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); sc->bge_tx_discards = cnt; #undef READ_STAT } /* * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, * but when such padded frames employ the bge IP/TCP checksum offload, * the hardware checksum assist gives incorrect results (possibly * from incorporating its own padding into the UDP/TCP checksum; who knows). * If we pad such runts with zeros, the onboard checksum comes out correct. */ static __inline int bge_cksum_pad(struct mbuf *m) { int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; struct mbuf *last; /* If there's only the packet-header and we can pad there, use it. */ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= padlen) { last = m; } else { /* * Walk packet chain to find last mbuf. We will either * pad there, or append a new mbuf and pad it. */ for (last = m; last->m_next != NULL; last = last->m_next); if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { /* Allocate new empty mbuf, pad it. Compact later. */ struct mbuf *n; MGET(n, M_NOWAIT, MT_DATA); if (n == NULL) return (ENOBUFS); n->m_len = 0; last->m_next = n; last = n; } } /* Now zero the pad area, to avoid the bge cksum-assist bug. */ memset(mtod(last, caddr_t) + last->m_len, 0, padlen); last->m_len += padlen; m->m_pkthdr.len += padlen; return (0); } static struct mbuf * bge_check_short_dma(struct mbuf *m) { struct mbuf *n; int found; /* * If device receive two back-to-back send BDs with less than * or equal to 8 total bytes then the device may hang. The two * back-to-back send BDs must in the same frame for this failure * to occur. Scan mbuf chains and see whether two back-to-back * send BDs are there. If this is the case, allocate new mbuf * and copy the frame to workaround the silicon bug. */ for (n = m, found = 0; n != NULL; n = n->m_next) { if (n->m_len < 8) { found++; if (found > 1) break; continue; } found = 0; } if (found > 1) { n = m_defrag(m, M_NOWAIT); if (n == NULL) m_freem(m); } else n = m; return (n); } static struct mbuf * bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, uint16_t *flags) { struct ip *ip; struct tcphdr *tcp; struct mbuf *n; uint16_t hlen; uint32_t poff; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ n = m_dup(m, M_NOWAIT); m_freem(m); if (n == NULL) return (NULL); m = n; } m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) return (NULL); ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); poff = sizeof(struct ether_header) + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) return (NULL); tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) return (NULL); /* * It seems controller doesn't modify IP length and TCP pseudo * checksum. These checksum computed by upper stack should be 0. */ *mss = m->m_pkthdr.tso_segsz; ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); ip->ip_sum = 0; ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); /* Clear pseudo checksum computed by TCP stack. */ tcp = (struct tcphdr *)(mtod(m, char *) + poff); tcp->th_sum = 0; /* * Broadcom controllers uses different descriptor format for * TSO depending on ASIC revision. Due to TSO-capable firmware * license issue and lower performance of firmware based TSO * we only support hardware based TSO. */ /* Calculate header length, incl. TCP/IP options, in 32 bit units. */ hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; if (sc->bge_flags & BGE_FLAG_TSO3) { /* * For BCM5717 and newer controllers, hardware based TSO * uses the 14 lower bits of the bge_mss field to store the * MSS and the upper 2 bits to store the lowest 2 bits of * the IP/TCP header length. The upper 6 bits of the header * length are stored in the bge_flags[14:10,4] field. Jumbo * frames are supported. */ *mss |= ((hlen & 0x3) << 14); *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2); } else { /* * For BCM5755 and newer controllers, hardware based TSO uses * the lower 11 bits to store the MSS and the upper 5 bits to * store the IP/TCP header length. Jumbo frames are not * supported. */ *mss |= (hlen << 11); } return (m); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) { bus_dma_segment_t segs[BGE_NSEG_NEW]; bus_dmamap_t map; struct bge_tx_bd *d; struct mbuf *m = *m_head; uint32_t idx = *txidx; uint16_t csum_flags, mss, vlan_tag; int nsegs, i, error; csum_flags = 0; mss = 0; vlan_tag = 0; if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && m->m_next != NULL) { *m_head = bge_check_short_dma(m); if (*m_head == NULL) return (ENOBUFS); m = *m_head; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); if (*m_head == NULL) return (ENOBUFS); csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m->m_pkthdr.len < ETHER_MIN_NOPAD && (error = bge_cksum_pad(m)) != 0) { m_freem(m); *m_head = NULL; return (error); } } } if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && m->m_pkthdr.len > ETHER_MAX_LEN) csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; if (sc->bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { /* * Forcedly collapse mbuf chains to overcome hardware * limitation which only support a single outstanding * DMA read operation. */ if (sc->bge_forced_collapse == 1) m = m_defrag(m, M_NOWAIT); else m = m_collapse(m, M_NOWAIT, sc->bge_forced_collapse); if (m == NULL) m = *m_head; *m_head = m; } } map = sc->bge_cdata.bge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* Check if we have enough free send BDs. */ if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); return (ENOBUFS); } bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); if (m->m_flags & M_VLANTAG) { csum_flags |= BGE_TXBDFLAG_VLAN_TAG; vlan_tag = m->m_pkthdr.ether_vtag; } if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * 5725 family of devices corrupts TSO packets when TSO DMA * buffers cross into regions which are within MSS bytes of * a 4GB boundary. If we encounter the condition, drop the * packet. */ for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < d->bge_addr.bge_addr_lo) break; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } if (i != nsegs - 1) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); m_freem(*m_head); *m_head = NULL; return (EIO); } } else { for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } } /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; sc->bge_cdata.bge_tx_dmamap[idx] = map; sc->bge_cdata.bge_tx_chain[idx] = m; sc->bge_txcnt += nsegs; BGE_INC(idx, BGE_TX_RING_CNT); *txidx = idx; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(if_t ifp) { struct bge_softc *sc; struct mbuf *m_head; uint32_t prodidx; int count; sc = if_getsoftc(ifp); BGE_LOCK_ASSERT(sc); if (!sc->bge_link || (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; prodidx = sc->bge_tx_prodidx; for (count = 0; !if_sendq_empty(ifp);) { if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } m_head = if_dequeue(ifp); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, &m_head, &prodidx)) { if (m_head == NULL) break; if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } ++count; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ bpf_mtap_if(ifp, m_head); } if (count > 0) bge_start_tx(sc, prodidx); } static void bge_start_tx(struct bge_softc *sc, uint32_t prodidx) { bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); sc->bge_tx_prodidx = prodidx; /* Set a timeout in case the chip goes out to lunch. */ sc->bge_timer = BGE_TX_TIMEOUT; } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(if_t ifp) { struct bge_softc *sc; sc = if_getsoftc(ifp); BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(struct bge_softc *sc) { if_t ifp; uint16_t *m; uint32_t mode; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_START); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_START); bge_sig_post_reset(sc, BGE_RESET_START); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { device_printf(sc->bge_dev, "initialization failure\n"); return; } ifp = sc->bge_ifp; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); /* Load our MAC address. */ m = (uint16_t *)if_getlladdr(sc->bge_ifp); CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Program promiscuous mode. */ bge_setpromisc(sc); /* Program multicast filter. */ bge_setmulti(sc); /* Program VLAN tag stripping. */ bge_setvlan(sc); /* Override UDP checksum offloading. */ if (sc->bge_forced_udpcsum == 0) sc->bge_csum_features &= ~CSUM_UDP; else sc->bge_csum_features |= CSUM_UDP; if (if_getcapabilities(ifp) & IFCAP_TXCSUM && if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP)); if_sethwassistbits(ifp, sc->bge_csum_features, 0); } /* Init RX ring. */ if (bge_init_rx_ring_std(sc) != 0) { device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); bge_stop(sc); return; } /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { uint32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) device_printf (sc->bge_dev, "5705 A0 chip failed to load RX ring\n"); } /* Init jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { if (bge_init_rx_ring_jumbo(sc) != 0) { device_printf(sc->bge_dev, "no memory for jumbo Rx buffers.\n"); bge_stop(sc); return; } } /* Init our RX return ring index. */ sc->bge_rx_saved_considx = 0; /* Init our RX/TX stat counters. */ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Enable TX MAC state machine lockup fix. */ mode = CSR_READ_4(sc, BGE_TX_MODE); if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); mode |= CSR_READ_4(sc, BGE_TX_MODE) & (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); } /* Turn on transmitter. */ CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); DELAY(100); /* Turn on receiver. */ mode = CSR_READ_4(sc, BGE_RX_MODE); if (BGE_IS_5755_PLUS(sc)) mode |= BGE_RXMODE_IPV6_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5762) mode |= BGE_RXMODE_IPV4_FRAG_FIX; CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); DELAY(10); /* * Set the number of good frames to receive after RX MBUF * Low Watermark has been reached. After the RX MAC receives * this number of frames, it will drop subsequent incoming * frames until the MBUF High Watermark is reached. */ if (BGE_IS_57765_PLUS(sc)) CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); else CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); /* Clear MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_clear_regs(sc); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); } else #endif /* Enable host interrupts. */ { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); } if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); bge_ifmedia_upd_locked(ifp); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_init(void *xsc) { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); } /* * Set media options. */ static int bge_ifmedia_upd(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); int res; BGE_LOCK(sc); res = bge_ifmedia_upd_locked(ifp); BGE_UNLOCK(sc); return (res); } static int bge_ifmedia_upd_locked(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; BGE_LOCK_ASSERT(sc); ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_flags & BGE_FLAG_TBI) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); if (sgdig & BGE_SGDIGSTS_DONE) { CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO | BGE_SGDIGCFG_PAUSE_CAP | BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig | BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } } break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } DELAY(40); break; default: return (EINVAL); } return (0); } sc->bge_link_evt++; mii = device_get_softc(sc->bge_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); /* * Force an interrupt so that we will call bge_link_upd * if needed and clear any pending link state attention. * Without this we are not getting any further interrupts * for link state changes and thus will not UP the link and * not be able to send in bge_start_locked. The only * way to get things working was to receive a packet and * get an RX intr. * bge_tick should help for fiber cards and we might not * need to do this here if BGE_FLAG_TBI is set but as * we poll for fiber anyway it should not harm. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); return (0); } /* * Report current media status. */ static void bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; BGE_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { BGE_UNLOCK(sc); return; } if (sc->bge_flags & BGE_FLAG_TBI) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; BGE_UNLOCK(sc); return; } ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; BGE_UNLOCK(sc); return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BGE_UNLOCK(sc); } static int bge_ioctl(if_t ifp, u_long command, caddr_t data) { struct bge_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int flags, mask, error = 0; switch (command) { case SIOCSIFMTU: if (BGE_IS_JUMBO_CAPABLE(sc) || (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > BGE_JUMBO_MTU) { error = EINVAL; break; } } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { error = EINVAL; break; } BGE_LOCK(sc); if (if_getmtu(ifp) != ifr->ifr_mtu) { if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); } } BGE_UNLOCK(sc); break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. Similarly for ALLMULTI. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->bge_if_flags; if (flags & IFF_PROMISC) bge_setpromisc(sc); if (flags & IFF_ALLMULTI) bge_setmulti(sc); } else bge_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = if_getflags(ifp); BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_flags & BGE_FLAG_TBI) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(bge_poll, ifp); if (error) return (error); BGE_LOCK(sc); BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); if_setcapenablebit(ifp, IFCAP_POLLING, 0); BGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ BGE_LOCK(sc); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); if_setcapenablebit(ifp, 0, IFCAP_POLLING); BGE_UNLOCK(sc); } } #endif if ((mask & IFCAP_TXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, sc->bge_csum_features, 0); else if_sethwassistbits(ifp, 0, sc->bge_csum_features); } if ((mask & IFCAP_RXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) if_togglecapenable(ifp, IFCAP_RXCSUM); if ((mask & IFCAP_TSO4) != 0 && (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { if_togglecapenable(ifp, IFCAP_TSO4); if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if (mask & IFCAP_VLAN_MTU) { if_togglecapenable(ifp, IFCAP_VLAN_MTU); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init(sc); } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); BGE_LOCK(sc); bge_setvlan(sc); BGE_UNLOCK(sc); } #ifdef VLAN_CAPABILITIES if_vlancap(ifp); #endif break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bge_watchdog(struct bge_softc *sc) { if_t ifp; uint32_t status; BGE_LOCK_ASSERT(sc); if (sc->bge_timer == 0 || --sc->bge_timer) return; /* If pause frames are active then don't reset the hardware. */ if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { status = CSR_READ_4(sc, BGE_RX_STS); if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && (status & BGE_RXSTAT_RCVD_XON) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } /* * Any other condition is unexpected and the controller * should be reset. */ } ifp = sc->bge_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } static void bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) { int i; BGE_CLRBIT(sc, reg, bit); for (i = 0; i < BGE_TIMEOUT; i++) { if ((CSR_READ_4(sc, reg) & bit) == 0) return; DELAY(100); } } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; callout_stop(&sc->bge_stat_ch); /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); /* * Disable all of the receiver blocks. */ bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks. */ bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (!(BGE_IS_5705_PLUS(sc))) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Update MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); /* * Keep the ASF firmware running if up. */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); else BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; /* Clear MAC's link state (PHY may still have link UP). */ if (bootverbose && sc->bge_link) if_printf(sc->bge_ifp, "link DOWN\n"); sc->bge_link = 0; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bge_shutdown(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_suspend(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_resume(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); BGE_LOCK(sc); ifp = sc->bge_ifp; if (if_getflags(ifp) & IFF_UP) { bge_init_locked(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bge_start_locked(ifp); } BGE_UNLOCK(sc); return (0); } static void bge_link_upd(struct bge_softc *sc) { struct mii_data *mii; uint32_t link, status; BGE_LOCK_ASSERT(sc); /* Clear 'pending link event' flag. */ sc->bge_link_evt = 0; /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. * * XXX: perhaps link state detection procedure used for * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } /* Clear the interrupt. */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_IMR, BRGPHY_INTRS); } return; } if (sc->bge_flags & BGE_FLAG_TBI) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { if (!sc->bge_link) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); DELAY(40); } CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_UP); } } else if (sc->bge_link) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); } } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { /* * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit * in status word always set. Workaround this bug by reading * PHY link status directly. */ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; if (link != sc->bge_link || sc->bge_asicrev == BGE_ASICREV_BCM5700) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } } } else { /* * For controllers that call mii_tick, we have to poll * link status. */ mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); bge_miibus_statchg(sc->bge_dev); } /* Disable MAC attention when link is up. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); } static void bge_add_sysctls(struct bge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(sc->bge_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); #ifdef BGE_REGISTER_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bge_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bge_sysctl_reg_read, "I", "MAC Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bge_sysctl_ape_read, "I", "APE Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bge_sysctl_mem_read, "I", "Memory Read"); #endif /* * A common design characteristic for many Broadcom client controllers * is that they only support a single outstanding DMA read operation * on the PCIe bus. This means that it will take twice as long to fetch * a TX frame that is split into header and payload buffers as it does * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For * these controllers, coalescing buffers to reduce the number of memory * reads is effective way to get maximum performance(about 940Mbps). * Without collapsing TX buffers the maximum TCP bulk transfer * performance is about 850Mbps. However forcing coalescing mbufs * consumes a lot of CPU cycles, so leave it off by default. */ sc->bge_forced_collapse = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, "Number of fragmented TX buffers of a frame allowed before " "forced collapsing"); sc->bge_msi = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); /* * It seems all Broadcom controllers have a bug that can generate UDP * datagrams with checksum value 0 when TX UDP checksum offloading is * enabled. Generating UDP checksum value 0 is RFC 768 violation. * Even though the probability of generating such UDP datagrams is * low, I don't want to see FreeBSD boxes to inject such datagrams * into network so disable UDP checksum offloading by default. Users * still override this behavior by setting a sysctl variable, * dev.bge.0.forced_udpcsum. */ sc->bge_forced_udpcsum = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, "Enable UDP checksum offloading even if controller can " "generate UDP checksum value 0"); if (BGE_IS_5705_PLUS(sc)) bge_add_sysctl_stats_regs(sc, ctx, children); else bge_add_sysctl_stats(sc, ctx, children); } #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, \ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, \ offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", desc) static void bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *children, *schildren; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics"); schildren = children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", children, COSFramesDroppedDueToFilters, "FramesDroppedDueToFilters"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", children, nicNoMoreRxBDs, "NoMoreRxBDs"); BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", children, ifInDiscards, "InputDiscards"); BGE_SYSCTL_STAT(sc, ctx, "Input Errors", children, ifInErrors, "InputErrors"); BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", children, nicRecvThresholdHit, "RecvThresholdHit"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", children, nicDmaReadQueueFull, "DmaReadQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", children, nicRingStatusUpdate, "RingStatusUpdate"); BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", children, nicInterrupts, "Interrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", children, nicAvoidedInterrupts, "AvoidedInterrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", children, nicSendThresholdHit, "SendThresholdHit"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", children, rxstats.ifHCInOctets, "ifHCInOctets"); BGE_SYSCTL_STAT(sc, ctx, "Fragments", children, rxstats.etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", children, rxstats.ifHCInUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", children, rxstats.dot3StatsFCSErrors, "FCSErrors"); BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", children, rxstats.xoffPauseFramesReceived, "xoffPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", children, rxstats.macControlFramesReceived, "ControlFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", children, rxstats.xoffStateEntered, "xoffStateEntered"); BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); BGE_SYSCTL_STAT(sc, ctx, "Jabbers", children, rxstats.etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", children, rxstats.inRangeLengthError, "inRangeLengthError"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", children, rxstats.outRangeLengthError, "outRangeLengthError"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", children, txstats.ifHCOutOctets, "ifHCOutOctets"); BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", children, txstats.etherStatsCollisions, "Collisions"); BGE_SYSCTL_STAT(sc, ctx, "XON Sent", children, txstats.outXonSent, "XonSent"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", children, txstats.outXoffSent, "XoffSent"); BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", children, txstats.flowControlDone, "flowControlDone"); BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", children, txstats.dot3StatsInternalMacTransmitErrors, "InternalMacTransmitErrors"); BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", children, txstats.dot3StatsSingleCollisionFrames, "SingleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", children, txstats.dot3StatsMultipleCollisionFrames, "MultipleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", children, txstats.dot3StatsDeferredTransmissions, "DeferredTransmissions"); BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", children, txstats.dot3StatsExcessiveCollisions, "ExcessiveCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", children, txstats.dot3StatsLateCollisions, "LateCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", children, txstats.ifHCOutUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", children, txstats.dot3StatsCarrierSenseErrors, "CarrierSenseErrors"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", children, txstats.ifOutDiscards, "Discards"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", children, txstats.ifOutErrors, "Errors"); } #undef BGE_SYSCTL_STAT #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *child, *schild; struct bge_mac_stats *stats; stats = &sc->bge_mac_stats; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics"); schild = child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters", &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull", &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull", &stats->DmaWriteHighPriQueueFull, "NIC DMA Write High Priority Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs", &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards", &stats->InputDiscards, "Discarded Input Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors", &stats->InputErrors, "Input Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit", &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets", &stats->ifHCInOctets, "Inbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments", &stats->etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors", &stats->dot3StatsFCSErrors, "FCS Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors", &stats->dot3StatsAlignmentErrors, "Alignment Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived", &stats->xonPauseFramesReceived, "XON Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived", &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived", &stats->macControlFramesReceived, "MAC Control Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered", &stats->xoffStateEntered, "XOFF State Entered"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong", &stats->dot3StatsFramesTooLong, "Frames Too Long"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers", &stats->etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts", &stats->etherStatsUndersizePkts, "Undersized Packets"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets", &stats->ifHCOutOctets, "Outbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions", &stats->etherStatsCollisions, "TX Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent", &stats->outXonSent, "XON Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent", &stats->outXoffSent, "XOFF Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors", &stats->dot3StatsInternalMacTransmitErrors, "Internal MAC TX Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames", &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames", &stats->dot3StatsMultipleCollisionFrames, "Multiple Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions", &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions", &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions", &stats->dot3StatsLateCollisions, "Late Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); } #undef BGE_SYSCTL_STAT_ADD64 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint32_t result; int offset; sc = (struct bge_softc *)arg1; offset = arg2; result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + offsetof(bge_hostaddr, bge_addr_lo)); return (sysctl_handle_int(oidp, &result, 0, req)); } #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint16_t *sbdata; int error, result, sbsz; int i, j; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result == 1) { sc = (struct bge_softc *)arg1; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; printf("Status Block:\n"); BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0x0; i < sbsz / sizeof(uint16_t); ) { printf("%06x:", i); for (j = 0; j < 8; j++) printf(" %04x", sbdata[i++]); printf("\n"); } printf("Registers:\n"); for (i = 0x800; i < 0xA00; ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %08x", CSR_READ_4(sc, i)); i += 4; } printf("\n"); } BGE_UNLOCK(sc); printf("Hardware Flags:\n"); if (BGE_IS_5717_PLUS(sc)) printf(" - 5717 Plus\n"); if (BGE_IS_5755_PLUS(sc)) printf(" - 5755 Plus\n"); if (BGE_IS_575X_PLUS(sc)) printf(" - 575X Plus\n"); if (BGE_IS_5705_PLUS(sc)) printf(" - 5705 Plus\n"); if (BGE_IS_5714_FAMILY(sc)) printf(" - 5714 Family\n"); if (BGE_IS_5700_FAMILY(sc)) printf(" - 5700 Family\n"); if (sc->bge_flags & BGE_FLAG_JUMBO) printf(" - Supports Jumbo Frames\n"); if (sc->bge_flags & BGE_FLAG_PCIX) printf(" - PCI-X Bus\n"); if (sc->bge_flags & BGE_FLAG_PCIE) printf(" - PCI Express Bus\n"); if (sc->bge_phy_flags & BGE_PHY_NO_3LED) printf(" - No 3 LEDs\n"); if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) printf(" - RX Alignment Bug\n"); } return (error); } static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = CSR_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = APE_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = bge_readmem_ind(sc, result); printf("mem 0x%06X = 0x%08X\n", result, val); } return (error); } #endif static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) { return (1); } static int bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) { uint32_t mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); if ((mac_addr >> 16) == 0x484b) { ether_addr[0] = (uint8_t)(mac_addr >> 8); ether_addr[1] = (uint8_t)mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); ether_addr[2] = (uint8_t)(mac_addr >> 24); ether_addr[3] = (uint8_t)(mac_addr >> 16); ether_addr[4] = (uint8_t)(mac_addr >> 8); ether_addr[5] = (uint8_t)mac_addr; return (0); } return (1); } static int bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; if (sc->bge_asicrev == BGE_ASICREV_BCM5906) mac_offset = BGE_EE_MAC_OFFSET_5906; return (bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) return (1); return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) { static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { /* NOTE: Order is critical */ bge_get_eaddr_fw, bge_get_eaddr_mem, bge_get_eaddr_nvram, bge_get_eaddr_eeprom, NULL }; const bge_eaddr_fcn_t *func; for (func = bge_eaddr_funcs; *func != NULL; ++func) { if ((*func)(sc, eaddr) == 0) break; } return (*func == NULL ? ENXIO : 0); } static uint64_t bge_get_counter(if_t ifp, ift_counter cnt) { struct bge_softc *sc; struct bge_mac_stats *stats; sc = if_getsoftc(ifp); if (!BGE_IS_5705_PLUS(sc)) return (if_get_counter_default(ifp, cnt)); stats = &sc->bge_mac_stats; switch (cnt) { case IFCOUNTER_IERRORS: return (stats->NoMoreRxBDs + stats->InputDiscards + stats->InputErrors); case IFCOUNTER_COLLISIONS: return (stats->etherStatsCollisions); default: return (if_get_counter_default(ifp, cnt)); } } #ifdef DEBUGNET static void bge_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { struct bge_softc *sc; sc = if_getsoftc(ifp); BGE_LOCK(sc); /* * There is only one logical receive ring, but it is backed * by two actual rings, for cluster- and jumbo-sized mbufs. * Debugnet expects only one size, so if jumbo is in use, * this says we have two rings of jumbo mbufs, but that's * only a little wasteful. */ *nrxr = 2; *ncl = DEBUGNET_MAX_IN_FLIGHT; if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 && (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) *clsize = MJUM9BYTES; else *clsize = MCLBYTES; BGE_UNLOCK(sc); } static void bge_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused) { } static int bge_debugnet_transmit(if_t ifp, struct mbuf *m) { struct bge_softc *sc; uint32_t prodidx; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (1); prodidx = sc->bge_tx_prodidx; error = bge_encap(sc, &m, &prodidx); if (error == 0) bge_start_tx(sc, prodidx); return (error); } static int bge_debugnet_poll(if_t ifp, int count) { struct bge_softc *sc; uint32_t rx_prod, tx_cons; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (1); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); (void)bge_rxeof(sc, rx_prod, 0); bge_txeof(sc, tx_cons); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index 511beae0cc25..f9d56a9b9226 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2301 +1,2297 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", DEVICE_UNIT_ANY); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); - /* Delete children */ - if ((error = device_delete_children(dev))) - return (error); - /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_type(rv, type); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle = NULL; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, r); if (error) return (error); } /* Check for resource list entry */ if (!passthrough) rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), rman_get_type(r), rman_get_rid(r)); if ((error = rman_release_resource(r))) return (error); /* Clean resource list entry */ if (rle != NULL) rle->res = NULL; return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, rman_get_type(r)); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error, type; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; type = rman_get_type(r); switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, r)); } return (bhndb_try_activate_resource(sc, child, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error, type; sc = device_get_softc(dev); type = rman_get_type(r); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { u_int ivec; int error; /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index 09ca4d8884e6..24697a8f0b17 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1392 +1,1389 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(device_t dev, int type, u_int flags); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_attach_children). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; bus_attach_children(dev); return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); - if ((error = device_delete_children(dev))) - return (error); - if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) * @param flags Resource flags (e.g. RF_PREFETCHABLE) */ static struct rman * chipc_get_rman(device_t dev, int type, u_int flags) { struct chipc_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(dev, type, flags); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Cache rle */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), rman_get_type(r), rman_get_rid(r)); /* Deactivate resources */ error = bus_generic_rman_release_resource(dev, child, r); if (error != 0) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct) { struct chipc_softc *sc = device_get_softc(dev); struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct rman *rm; int error; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r->res)); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(dev, child, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, struct resource *r) { struct rman *rm; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(dev, child, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_get_rman, chipc_get_rman), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index 755fd19a3c9c..024c97dcb78c 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -1,13326 +1,13324 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_kern_tls.h" #include "opt_ratelimit.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #ifdef KERN_TLS #include #endif #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #ifdef DDB #include #include #endif #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "cudbg/cudbg.h" #include "t4_clip.h" #include "t4_ioctl.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #include "t4_if.h" #include "t4_smt.h" /* T4 bus driver interface */ static int t4_probe(device_t); static int t4_attach(device_t); static int t4_detach(device_t); static int t4_child_location(device_t, device_t, struct sbuf *); static int t4_ready(device_t); static int t4_read_port_device(device_t, int, device_t *); static int t4_suspend(device_t); static int t4_resume(device_t); static int t4_reset_prepare(device_t, device_t); static int t4_reset_post(device_t, device_t); static device_method_t t4_methods[] = { DEVMETHOD(device_probe, t4_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t4_driver = { "t4nex", t4_methods, sizeof(struct adapter) }; /* T4 port (cxgbe) interface */ static int cxgbe_probe(device_t); static int cxgbe_attach(device_t); static int cxgbe_detach(device_t); device_method_t cxgbe_methods[] = { DEVMETHOD(device_probe, cxgbe_probe), DEVMETHOD(device_attach, cxgbe_attach), DEVMETHOD(device_detach, cxgbe_detach), { 0, 0 } }; static driver_t cxgbe_driver = { "cxgbe", cxgbe_methods, sizeof(struct port_info) }; /* T4 VI (vcxgbe) interface */ static int vcxgbe_probe(device_t); static int vcxgbe_attach(device_t); static int vcxgbe_detach(device_t); static device_method_t vcxgbe_methods[] = { DEVMETHOD(device_probe, vcxgbe_probe), DEVMETHOD(device_attach, vcxgbe_attach), DEVMETHOD(device_detach, vcxgbe_detach), { 0, 0 } }; static driver_t vcxgbe_driver = { "vcxgbe", vcxgbe_methods, sizeof(struct vi_info) }; static d_ioctl_t t4_ioctl; static struct cdevsw t4_cdevsw = { .d_version = D_VERSION, .d_ioctl = t4_ioctl, .d_name = "t4nex", }; /* T5 bus driver interface */ static int t5_probe(device_t); static device_method_t t5_methods[] = { DEVMETHOD(device_probe, t5_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t5_driver = { "t5nex", t5_methods, sizeof(struct adapter) }; /* T5 port (cxl) interface */ static driver_t cxl_driver = { "cxl", cxgbe_methods, sizeof(struct port_info) }; /* T5 VI (vcxl) interface */ static driver_t vcxl_driver = { "vcxl", vcxgbe_methods, sizeof(struct vi_info) }; /* T6 bus driver interface */ static int t6_probe(device_t); static device_method_t t6_methods[] = { DEVMETHOD(device_probe, t6_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t6_driver = { "t6nex", t6_methods, sizeof(struct adapter) }; /* T6 port (cc) interface */ static driver_t cc_driver = { "cc", cxgbe_methods, sizeof(struct port_info) }; /* T6 VI (vcc) interface */ static driver_t vcc_driver = { "vcc", vcxgbe_methods, sizeof(struct vi_info) }; /* ifnet interface */ static void cxgbe_init(void *); static int cxgbe_ioctl(if_t, unsigned long, caddr_t); static int cxgbe_transmit(if_t, struct mbuf *); static void cxgbe_qflush(if_t); #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(if_t, union if_snd_tag_alloc_params *, struct m_snd_tag **); #endif MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); /* * Correct lock order when you need to acquire multiple locks is t4_list_lock, * then ADAPTER_LOCK, then t4_uld_list_lock. */ static struct sx t4_list_lock; SLIST_HEAD(, adapter) t4_list; #ifdef TCP_OFFLOAD static struct sx t4_uld_list_lock; struct uld_info *t4_uld_list[ULD_MAX + 1]; #endif /* * Tunables. See tweak_tunables() too. * * Each tunable is set to a default value here if it's known at compile-time. * Otherwise it is set to -n as an indication to tweak_tunables() that it should * provide a reasonable default (upto n) when the driver is loaded. * * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to * T5 are under hw.cxl. */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) parameters"); SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) T5+ parameters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE parameters"); /* * Number of queues for tx and rx, NIC and offload. */ #define NTXQ 16 int t4_ntxq = -NTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0, "Number of TX queues per port"); TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ #define NRXQ 8 int t4_nrxq = -NRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0, "Number of RX queues per port"); TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ #define NTXQ_VI 1 static int t4_ntxq_vi = -NTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0, "Number of TX queues per VI"); #define NRXQ_VI 1 static int t4_nrxq_vi = -NRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0, "Number of RX queues per VI"); static int t4_rsrv_noflowq = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq, 0, "Reserve TX queue 0 of each VI for non-flowid packets"); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) #define NOFLDTXQ 8 static int t4_nofldtxq = -NOFLDTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0, "Number of offload TX queues per port"); #define NOFLDTXQ_VI 1 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0, "Number of offload TX queues per VI"); #endif #if defined(TCP_OFFLOAD) #define NOFLDRXQ 2 static int t4_nofldrxq = -NOFLDRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0, "Number of offload RX queues per port"); #define NOFLDRXQ_VI 1 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0, "Number of offload RX queues per VI"); #define TMR_IDX_OFLD 1 static int t4_tmr_idx_ofld = TMR_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN, &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues"); #define PKTC_IDX_OFLD (-1) static int t4_pktc_idx_ofld = PKTC_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN, &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_idle = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN, &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_interval = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN, &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)"); /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ static int t4_toe_keepalive_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN, &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_min = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN, &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_max = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN, &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ static int t4_toe_rexmt_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN, &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort"); /* -1 means chip/fw default, other values are raw backoff values to use */ static int t4_toe_rexmt_backoff[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE retransmit backoff values"); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[0], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[1], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[2], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[3], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[4], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[5], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[6], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[7], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[8], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[9], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[10], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[11], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[12], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[13], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[14], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[15], 0, ""); int t4_ddp_rcvbuf_len = 256 * 1024; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_len, CTLFLAG_RWTUN, &t4_ddp_rcvbuf_len, 0, "length of each DDP RX buffer"); unsigned int t4_ddp_rcvbuf_cache = 4; SYSCTL_UINT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_cache, CTLFLAG_RWTUN, &t4_ddp_rcvbuf_cache, 0, "maximum number of free DDP RX buffers to cache per connection"); #endif #ifdef DEV_NETMAP #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */ #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */ static int t4_native_netmap = NN_EXTRA_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap, 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs"); #define NNMTXQ 8 static int t4_nnmtxq = -NNMTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0, "Number of netmap TX queues"); #define NNMRXQ 8 static int t4_nnmrxq = -NNMRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0, "Number of netmap RX queues"); #define NNMTXQ_VI 2 static int t4_nnmtxq_vi = -NNMTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0, "Number of netmap TX queues per VI"); #define NNMRXQ_VI 2 static int t4_nnmrxq_vi = -NNMRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0, "Number of netmap RX queues per VI"); #endif /* * Holdoff parameters for ports. */ #define TMR_IDX 1 int t4_tmr_idx = TMR_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx, 0, "Holdoff timer index"); TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ #define PKTC_IDX (-1) int t4_pktc_idx = PKTC_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx, 0, "Holdoff packet counter index"); TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ /* * Size (# of entries) of each tx and rx queue. */ unsigned int t4_qsize_txq = TX_EQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0, "Number of descriptors in each TX queue"); unsigned int t4_qsize_rxq = RX_IQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0, "Number of descriptors in each RX queue"); /* * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). */ int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types, 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)"); /* * Configuration file. All the _CF names here are special. */ #define DEFAULT_CF "default" #define BUILTIN_CF "built-in" #define FLASH_CF "flash" #define UWIRE_CF "uwire" #define FPGA_CF "fpga" static char t4_cfg_file[32] = DEFAULT_CF; SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file, sizeof(t4_cfg_file), "Firmware configuration file"); /* * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively). * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water * mark or when signalled to do so, 0 to never emit PAUSE. * pause_autoneg = 1 means PAUSE will be negotiated if possible and the * negotiated settings will override rx_pause/tx_pause. * Otherwise rx_pause/tx_pause are applied forcibly. */ static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN, &t4_pause_settings, 0, "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); /* * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively). * -1 to run with the firmware default. Same as FEC_AUTO (bit 5) * 0 to disable FEC. */ static int t4_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0, "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); /* * Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it * issues to the firmware. If the firmware doesn't support FORCE_FEC then the * driver runs as if this is set to 0. * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay. * 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the * transceiver. Multiple FEC bits may not be okay but will be passed on to * the firmware anyway (may result in l1cfg errors with old firmwares). * 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO * means set all FEC bits that are valid for the speed. */ static int t4_force_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0, "Controls the use of FORCE_FEC bit in L1 configuration."); /* * Link autonegotiation. * -1 to run with the firmware default. * 0 to disable. * 1 to enable. */ static int t4_autoneg = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, "Link autonegotiation"); /* * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, * encouraged respectively). '-n' is the same as 'n' except the firmware * version used in the checks is read from the firmware bundled with the driver. */ static int t4_fw_install = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); /* * ASIC features that will be used. Disable the ones you don't want so that the * chip resources aren't wasted on features that will not be used. */ static int t4_nbmcaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN, &t4_nbmcaps_allowed, 0, "Default NBM capabilities"); static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN, &t4_linkcaps_allowed, 0, "Default link capabilities"); static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | FW_CAPS_CONFIG_SWITCH_EGRESS; SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN, &t4_switchcaps_allowed, 0, "Default switch capabilities"); #ifdef RATELIMIT static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD; #else static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER; #endif SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN, &t4_niccaps_allowed, 0, "Default NIC capabilities"); static int t4_toecaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN, &t4_toecaps_allowed, 0, "Default TCP offload capabilities"); static int t4_rdmacaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN, &t4_rdmacaps_allowed, 0, "Default RDMA capabilities"); static int t4_cryptocaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN, &t4_cryptocaps_allowed, 0, "Default crypto capabilities"); static int t4_iscsicaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN, &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities"); static int t4_fcoecaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN, &t4_fcoecaps_allowed, 0, "Default FCoE capabilities"); static int t5_write_combine = 0; SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine, 0, "Use WC instead of UC for BAR2"); /* From t4_sysctls: doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"} */ static int t4_doorbells_allowed = 0xf; SYSCTL_INT(_hw_cxgbe, OID_AUTO, doorbells_allowed, CTLFLAG_RDTUN, &t4_doorbells_allowed, 0, "Limit tx queues to these doorbells"); static int t4_num_vis = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0, "Number of VIs per port"); /* * PCIe Relaxed Ordering. * -1: driver should figure out a good value. * 0: disable RO. * 1: enable RO. * 2: leave RO alone. */ static int pcie_relaxed_ordering = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, &pcie_relaxed_ordering, 0, "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); static int t4_panic_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN, &t4_panic_on_fatal_err, 0, "panic on fatal errors"); static int t4_reset_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN, &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors"); static int t4_clock_gate_on_suspend = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN, &t4_clock_gate_on_suspend, 0, "gate the clock on suspend"); static int t4_tx_vm_wr = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0, "Use VM work requests to transmit packets."); /* * Set to non-zero to enable the attack filter. A packet that matches any of * these conditions will get dropped on ingress: * 1) IP && source address == destination address. * 2) TCP/IP && source address is not a unicast address. * 3) TCP/IP && destination address is not a unicast address. * 4) IP && source address is loopback (127.x.y.z). * 5) IP && destination address is loopback (127.x.y.z). * 6) IPv6 && source address == destination address. * 7) IPv6 && source address is not a unicast address. * 8) IPv6 && source address is loopback (::1/128). * 9) IPv6 && destination address is loopback (::1/128). * 10) IPv6 && source address is unspecified (::/128). * 11) IPv6 && destination address is unspecified (::/128). * 12) TCP/IPv6 && source address is multicast (ff00::/8). * 13) TCP/IPv6 && destination address is multicast (ff00::/8). */ static int t4_attack_filter = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN, &t4_attack_filter, 0, "Drop suspicious traffic"); static int t4_drop_ip_fragments = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN, &t4_drop_ip_fragments, 0, "Drop IP fragments"); static int t4_drop_pkts_with_l2_errors = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l2_errors, 0, "Drop all frames with Layer 2 length or checksum errors"); static int t4_drop_pkts_with_l3_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l3_errors, 0, "Drop all frames with IP version, length, or checksum errors"); static int t4_drop_pkts_with_l4_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l4_errors, 0, "Drop all frames with Layer 4 length, checksum, or other errors"); #ifdef TCP_OFFLOAD /* * TOE tunables. */ static int t4_cop_managed_offloading = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN, &t4_cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading); #endif #ifdef KERN_TLS /* * This enables KERN_TLS for all adapters if set. */ static int t4_kern_tls = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0, "Enable KERN_TLS mode for T6 adapters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) KERN_TLS parameters"); static int t4_tls_inline_keys = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN, &t4_tls_inline_keys, 0, "Always pass TLS keys in work requests (1) or attempt to store TLS keys " "in card memory."); static int t4_tls_combo_wrs = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs, 0, "Attempt to combine TCB field updates with TLS record work requests."); #endif /* Functions used by VIs to obtain unique MAC addresses for each VI. */ static int vi_mac_funcs[] = { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, }; struct intrs_and_queues { uint16_t intr_type; /* INTx, MSI, or MSI-X */ uint16_t num_vis; /* number of VIs for each port */ uint16_t nirq; /* Total # of vectors */ uint16_t ntxq; /* # of NIC txq's for each port */ uint16_t nrxq; /* # of NIC rxq's for each port */ uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */ uint16_t nofldrxq; /* # of TOE rxq's for each port */ uint16_t nnmtxq; /* # of netmap txq's */ uint16_t nnmrxq; /* # of netmap rxq's */ /* The vcxgbe/vcxl interfaces use these and not the ones above. */ uint16_t ntxq_vi; /* # of NIC txq's */ uint16_t nrxq_vi; /* # of NIC rxq's */ uint16_t nofldtxq_vi; /* # of TOE txq's */ uint16_t nofldrxq_vi; /* # of TOE rxq's */ uint16_t nnmtxq_vi; /* # of netmap txq's */ uint16_t nnmrxq_vi; /* # of netmap rxq's */ }; static void setup_memwin(struct adapter *); static void position_memwin(struct adapter *, int, uint32_t); static int validate_mem_range(struct adapter *, uint32_t, uint32_t); static int fwmtype_to_hwmtype(int); static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, uint32_t *); static int fixup_devlog_params(struct adapter *); static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); static int contact_firmware(struct adapter *); static int partition_resources(struct adapter *); static int get_params__pre_init(struct adapter *); static int set_params__pre_init(struct adapter *); static int get_params__post_init(struct adapter *); static int set_params__post_init(struct adapter *); static void t4_set_desc(struct adapter *); static bool fixed_ifmedia(struct port_info *); static void build_medialist(struct port_info *); static void init_link_config(struct port_info *); static int fixup_link_config(struct port_info *); static int apply_link_config(struct port_info *); static int cxgbe_init_synchronized(struct vi_info *); static int cxgbe_uninit_synchronized(struct vi_info *); static int adapter_full_init(struct adapter *); static void adapter_full_uninit(struct adapter *); static int vi_full_init(struct vi_info *); static void vi_full_uninit(struct vi_info *); static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *); static void quiesce_txq(struct sge_txq *); static void quiesce_wrq(struct sge_wrq *); static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *); static void quiesce_vi(struct vi_info *); static int t4_alloc_irq(struct adapter *, struct irq *, int rid, driver_intr_t *, void *, char *); static int t4_free_irq(struct adapter *, struct irq *); static void t4_init_atid_table(struct adapter *); static void t4_free_atid_table(struct adapter *); static void stop_atid_allocator(struct adapter *); static void restart_atid_allocator(struct adapter *); static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); static void vi_refresh_stats(struct vi_info *); static void cxgbe_refresh_stats(struct vi_info *); static void cxgbe_tick(void *); static void vi_tick(void *); static void cxgbe_sysctls(struct port_info *); static int sysctl_int_array(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS); static int sysctl_btphy(SYSCTL_HANDLER_ARGS); static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); static int sysctl_link_fec(SYSCTL_HANDLER_ARGS); static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS); static int sysctl_module_fec(SYSCTL_HANDLER_ARGS); static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); static int sysctl_force_fec(SYSCTL_HANDLER_ARGS); static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); static int sysctl_temperature(SYSCTL_HANDLER_ARGS); static int sysctl_vdd(SYSCTL_HANDLER_ARGS); static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS); static int sysctl_loadavg(SYSCTL_HANDLER_ARGS); static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS); static int sysctl_devlog(SYSCTL_HANDLER_ARGS); static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tids(SYSCTL_HANDLER_ARGS); static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); static int sysctl_cpus(SYSCTL_HANDLER_ARGS); static int sysctl_reset(SYSCTL_HANDLER_ARGS); #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS); static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); #endif static int get_sge_context(struct adapter *, struct t4_sge_context *); static int load_fw(struct adapter *, struct t4_data *); static int load_cfg(struct adapter *, struct t4_data *); static int load_boot(struct adapter *, struct t4_bootrom *); static int load_bootcfg(struct adapter *, struct t4_data *); static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); static void free_offload_policy(struct t4_offload_policy *); static int set_offload_policy(struct adapter *, struct t4_offload_policy *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); static int read_i2c(struct adapter *, struct t4_i2c_data *); static int clear_stats(struct adapter *, u_int); static int hold_clip_addr(struct adapter *, struct t4_clip_addr *); static int release_clip_addr(struct adapter *, struct t4_clip_addr *); static inline int stop_adapter(struct adapter *); static inline void set_adapter_hwstatus(struct adapter *, const bool); static int stop_lld(struct adapter *); static inline int restart_adapter(struct adapter *); static int restart_lld(struct adapter *); #ifdef TCP_OFFLOAD static int deactivate_all_uld(struct adapter *); static void stop_all_uld(struct adapter *); static void restart_all_uld(struct adapter *); #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *, bool); #endif static int mod_event(module_t, int, void *); static int notify_siblings(device_t, int); static uint64_t vi_get_counter(if_t, ift_counter); static uint64_t cxgbe_get_counter(if_t, ift_counter); static void enable_vxlan_rx(struct adapter *); static void reset_adapter_task(void *, int); static void fatal_error_task(void *, int); static void dump_devlog(struct adapter *); static void dump_cim_regs(struct adapter *); static void dump_cimla(struct adapter *); struct { uint16_t device; char *desc; } t4_pciids[] = { {0xa000, "Chelsio Terminator 4 FPGA"}, {0x4400, "Chelsio T440-dbg"}, {0x4401, "Chelsio T420-CR"}, {0x4402, "Chelsio T422-CR"}, {0x4403, "Chelsio T440-CR"}, {0x4404, "Chelsio T420-BCH"}, {0x4405, "Chelsio T440-BCH"}, {0x4406, "Chelsio T440-CH"}, {0x4407, "Chelsio T420-SO"}, {0x4408, "Chelsio T420-CX"}, {0x4409, "Chelsio T420-BT"}, {0x440a, "Chelsio T404-BT"}, {0x440e, "Chelsio T440-LP-CR"}, }, t5_pciids[] = { {0xb000, "Chelsio Terminator 5 FPGA"}, {0x5400, "Chelsio T580-dbg"}, {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */ {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */ {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */ {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */ /* Custom */ {0x5483, "Custom T540-CR"}, {0x5484, "Custom T540-BT"}, }, t6_pciids[] = { {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ {0x6405, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */ {0x6406, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */ {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ {0x6414, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */ {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ /* Custom */ {0x6480, "Custom T6225-CR"}, {0x6481, "Custom T62100-CR"}, {0x6482, "Custom T6225-CR"}, {0x6483, "Custom T62100-CR"}, {0x6484, "Custom T64100-CR"}, {0x6485, "Custom T6240-SO"}, {0x6486, "Custom T6225-SO-CR"}, {0x6487, "Custom T6225-CR"}, }; #ifdef TCP_OFFLOAD /* * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should * be exactly the same for both rxq and ofld_rxq. */ CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); #endif CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); static int t4_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xa000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t4_pciids); i++) { if (d == t4_pciids[i].device) { device_set_desc(dev, t4_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t5_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xb000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t5_pciids); i++) { if (d == t5_pciids[i].device) { device_set_desc(dev, t5_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t6_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); for (i = 0; i < nitems(t6_pciids); i++) { if (d == t6_pciids[i].device) { device_set_desc(dev, t6_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void t5_attribute_workaround(device_t dev) { device_t root_port; uint32_t v; /* * The T5 chips do not properly echo the No Snoop and Relaxed * Ordering attributes when replying to a TLP from a Root * Port. As a workaround, find the parent Root Port and * disable No Snoop and Relaxed Ordering. Note that this * affects all devices under this root port. */ root_port = pci_find_pcie_root_port(dev); if (root_port == NULL) { device_printf(dev, "Unable to find parent root port\n"); return; } v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 0) device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", device_get_nameunit(root_port)); } static const struct devnames devnames[] = { { .nexus_name = "t4nex", .ifnet_name = "cxgbe", .vi_ifnet_name = "vcxgbe", .pf03_drv_name = "t4iov", .vf_nexus_name = "t4vf", .vf_ifnet_name = "cxgbev" }, { .nexus_name = "t5nex", .ifnet_name = "cxl", .vi_ifnet_name = "vcxl", .pf03_drv_name = "t5iov", .vf_nexus_name = "t5vf", .vf_ifnet_name = "cxlv" }, { .nexus_name = "t6nex", .ifnet_name = "cc", .vi_ifnet_name = "vcc", .pf03_drv_name = "t6iov", .vf_nexus_name = "t6vf", .vf_ifnet_name = "ccv" } }; void t4_init_devnames(struct adapter *sc) { int id; id = chip_id(sc); if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) sc->names = &devnames[id - CHELSIO_T4]; else { device_printf(sc->dev, "chip id %d is not supported.\n", id); sc->names = NULL; } } static int t4_ifnet_unit(struct adapter *sc, struct port_info *pi) { const char *parent, *name; long value; int line, unit; line = 0; parent = device_get_nameunit(sc->dev); name = sc->names->ifnet_name; while (resource_find_dev(&line, name, &unit, "at", parent) == 0) { if (resource_long_value(name, unit, "port", &value) == 0 && value == pi->port_id) return (unit); } return (-1); } static void t4_calibration(void *arg) { struct adapter *sc; struct clock_sync *cur, *nex; uint64_t hw; sbintime_t sbt; int next_up; sc = (struct adapter *)arg; KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration")); hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO); sbt = sbinuptime(); cur = &sc->cal_info[sc->cal_current]; next_up = (sc->cal_current + 1) % CNT_CAL_INFO; nex = &sc->cal_info[next_up]; if (__predict_false(sc->cal_count == 0)) { /* First time in, just get the values in */ cur->hw_cur = hw; cur->sbt_cur = sbt; sc->cal_count++; goto done; } if (cur->hw_cur == hw) { /* The clock is not advancing? */ sc->cal_count = 0; atomic_store_rel_int(&cur->gen, 0); goto done; } seqc_write_begin(&nex->gen); nex->hw_prev = cur->hw_cur; nex->sbt_prev = cur->sbt_cur; nex->hw_cur = hw; nex->sbt_cur = sbt; seqc_write_end(&nex->gen); sc->cal_current = next_up; done: callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration, sc, C_DIRECT_EXEC); } static void t4_calibration_start(struct adapter *sc) { /* * Here if we have not done a calibration * then do so otherwise start the appropriate * timer. */ int i; for (i = 0; i < CNT_CAL_INFO; i++) { sc->cal_info[i].gen = 0; } sc->cal_current = 0; sc->cal_count = 0; sc->cal_gen = 0; t4_calibration(sc); } static int t4_attach(device_t dev) { struct adapter *sc; int rc = 0, i, j, rqidx, tqidx, nports; struct make_dev_args mda; struct intrs_and_queues iaq; struct sge *s; uint32_t *buf; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) int ofld_tqidx; #endif #ifdef TCP_OFFLOAD int ofld_rqidx; #endif #ifdef DEV_NETMAP int nm_rqidx, nm_tqidx; #endif int num_vis; sc = device_get_softc(dev); sc->dev = dev; sysctl_ctx_init(&sc->ctx); TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); if ((pci_get_device(dev) & 0xff00) == 0x5400) t5_attribute_workaround(dev); pci_enable_busmaster(dev); if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { uint32_t v; pci_set_max_read_req(dev, 4096); v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (pcie_relaxed_ordering == 0 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } else if (pcie_relaxed_ordering == 1 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { v |= PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } } sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); sc->traceq = -1; mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", device_get_nameunit(dev)); snprintf(sc->lockname, sizeof(sc->lockname), "%s", device_get_nameunit(dev)); mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); t4_add_adapter(sc); mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); TAILQ_INIT(&sc->sfl); callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); sc->policy = NULL; rw_init(&sc->policy_lock, "connection offload policy"); callout_init(&sc->ktls_tick, 1); callout_init(&sc->cal_callout, 1); refcount_init(&sc->vxlan_refcount, 0); TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc); TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc); sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); rc = t4_map_bars_0_and_4(sc); if (rc != 0) goto done; /* error message displayed already */ memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); /* Prepare the adapter for operation. */ buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); rc = -t4_prep_adapter(sc, buf); free(buf, M_CXGBE); if (rc != 0) { device_printf(dev, "failed to prepare adapter: %d.\n", rc); goto done; } /* * This is the real PF# to which we're attaching. Works from within PCI * passthrough environments too, where pci_get_function() could return a * different PF# depending on the passthrough configuration. We need to * use the real PF# in all our communication with the firmware. */ j = t4_read_reg(sc, A_PL_WHOAMI); sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); sc->mbox = sc->pf; t4_init_devnames(sc); if (sc->names == NULL) { rc = ENOTSUP; goto done; /* error message displayed already */ } /* * Do this really early, with the memory windows set up even before the * character device. The userland tool's register i/o and mem read * will work even in "recovery mode". */ setup_memwin(sc); if (t4_init_devlog_params(sc, 0) == 0) fixup_devlog_params(sc); make_dev_args_init(&mda); mda.mda_devsw = &t4_cdevsw; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); if (rc != 0) device_printf(dev, "failed to create nexus char device: %d.\n", rc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { device_printf(dev, "recovery mode.\n"); goto done; } #if defined(__i386__) if ((cpu_feature & CPUID_CX8) == 0) { device_printf(dev, "64 bit atomics not available.\n"); rc = ENOTSUP; goto done; } #endif /* Contact the firmware and try to become the master driver. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); rc = get_params__pre_init(sc); if (rc != 0) goto done; /* error message displayed already */ if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_map_bar_2(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_adj_doorbells(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_create_dma_tag(sc); if (rc != 0) goto done; /* error message displayed already */ /* * First pass over all the ports - allocate VIs and initialize some * basic parameters like mac address, port type, etc. */ for_each_port(sc, i) { struct port_info *pi; pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); sc->port[i] = pi; /* These must be set before t4_port_init */ pi->adapter = sc; pi->port_id = i; /* * XXX: vi[0] is special so we can't delay this allocation until * pi->nvi's final value is known. */ pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, M_ZERO | M_WAITOK); /* * Allocate the "main" VI and initialize parameters * like mac addr. */ rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { device_printf(dev, "unable to initialize port %d: %d\n", i, rc); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); sc->port[i] = NULL; goto done; } if (is_bt(pi->port_type)) setbit(&sc->bt_map, pi->tx_chan); else MPASS(!isset(&sc->bt_map, pi->tx_chan)); snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", device_get_nameunit(dev), i); mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); sc->chan_map[pi->tx_chan] = i; /* * The MPS counter for FCS errors doesn't work correctly on the * T6 so we use the MAC counter here. Which MAC is in use * depends on the link settings which will be known when the * link comes up. */ if (is_t6(sc)) pi->fcs_reg = -1; else { pi->fcs_reg = t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); } pi->fcs_base = 0; /* All VIs on this port share this media. */ ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, cxgbe_media_status); PORT_LOCK(pi); init_link_config(pi); fixup_link_config(pi); build_medialist(pi); if (fixed_ifmedia(pi)) pi->flags |= FIXED_IFMEDIA; PORT_UNLOCK(pi); pi->dev = device_add_child(dev, sc->names->ifnet_name, t4_ifnet_unit(sc, pi)); if (pi->dev == NULL) { device_printf(dev, "failed to add device for port %d.\n", i); rc = ENXIO; goto done; } pi->vi[0].dev = pi->dev; device_set_softc(pi->dev, pi); } /* * Interrupt type, # of interrupts, # of rx/tx queues, etc. */ nports = sc->params.nports; rc = cfg_itype_and_nqueues(sc, &iaq); if (rc != 0) goto done; /* error message displayed already */ num_vis = iaq.num_vis; sc->intr_type = iaq.intr_type; sc->intr_count = iaq.nirq; s = &sc->sge; s->nrxq = nports * iaq.nrxq; s->ntxq = nports * iaq.ntxq; if (num_vis > 1) { s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; } s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ s->neq += nports; /* ctrl queues: 1 per port */ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { s->nofldtxq = nports * iaq.nofldtxq; if (num_vis > 1) s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; s->neq += s->nofldtxq; s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { s->nofldrxq = nports * iaq.nofldrxq; if (num_vis > 1) s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; s->neq += s->nofldrxq; /* free list */ s->niq += s->nofldrxq; s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef DEV_NETMAP s->nnmrxq = 0; s->nnmtxq = 0; if (t4_native_netmap & NN_MAIN_VI) { s->nnmrxq += nports * iaq.nnmrxq; s->nnmtxq += nports * iaq.nnmtxq; } if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) { s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; } s->neq += s->nnmtxq + s->nnmrxq; s->niq += s->nnmrxq; s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), M_CXGBE, M_ZERO | M_WAITOK); #endif MPASS(s->niq <= s->iqmap_sz); MPASS(s->neq <= s->eqmap_sz); s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, M_ZERO | M_WAITOK); s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, M_ZERO | M_WAITOK); s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, M_ZERO | M_WAITOK); s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, M_ZERO | M_WAITOK); sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, M_ZERO | M_WAITOK); t4_init_l2t(sc, M_WAITOK); t4_init_smt(sc, M_WAITOK); t4_init_tx_sched(sc); t4_init_atid_table(sc); #ifdef RATELIMIT t4_init_etid_table(sc); #endif #ifdef INET6 t4_init_clip_table(sc); #endif if (sc->vres.key.size != 0) sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); /* * Second pass over the ports. This time we know the number of rx and * tx queues that each port should get. */ rqidx = tqidx = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) ofld_tqidx = 0; #endif #ifdef TCP_OFFLOAD ofld_rqidx = 0; #endif #ifdef DEV_NETMAP nm_rqidx = nm_tqidx = 0; #endif for_each_port(sc, i) { struct port_info *pi = sc->port[i]; struct vi_info *vi; if (pi == NULL) continue; pi->nvi = num_vis; for_each_vi(pi, j, vi) { vi->pi = pi; vi->adapter = sc; vi->first_intr = -1; vi->qsize_rxq = t4_qsize_rxq; vi->qsize_txq = t4_qsize_txq; vi->first_rxq = rqidx; vi->first_txq = tqidx; vi->tmr_idx = t4_tmr_idx; vi->pktc_idx = t4_pktc_idx; vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; rqidx += vi->nrxq; tqidx += vi->ntxq; if (j == 0 && vi->ntxq > 1) vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; else vi->rsrv_noflowq = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->first_ofld_txq = ofld_tqidx; vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; ofld_tqidx += vi->nofldtxq; #endif #ifdef TCP_OFFLOAD vi->ofld_tmr_idx = t4_tmr_idx_ofld; vi->ofld_pktc_idx = t4_pktc_idx_ofld; vi->first_ofld_rxq = ofld_rqidx; vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; ofld_rqidx += vi->nofldrxq; #endif #ifdef DEV_NETMAP vi->first_nm_rxq = nm_rqidx; vi->first_nm_txq = nm_tqidx; if (j == 0) { vi->nnmrxq = iaq.nnmrxq; vi->nnmtxq = iaq.nnmtxq; } else { vi->nnmrxq = iaq.nnmrxq_vi; vi->nnmtxq = iaq.nnmtxq_vi; } nm_rqidx += vi->nnmrxq; nm_tqidx += vi->nnmtxq; #endif } } rc = t4_setup_intr_handlers(sc); if (rc != 0) { device_printf(dev, "failed to setup interrupt handlers: %d\n", rc); goto done; } bus_identify_children(dev); /* * Ensure thread-safe mailbox access (in debug builds). * * So far this was the only thread accessing the mailbox but various * ifnets and sysctls are about to be created and their handlers/ioctls * will access the mailbox from different threads. */ sc->flags |= CHK_MBOX_ACCESS; bus_attach_children(dev); t4_calibration_start(sc); device_printf(dev, "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", sc->params.pci.speed, sc->params.pci.width, sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); t4_set_desc(sc); notify_siblings(dev, 0); done: if (rc != 0 && sc->cdev) { /* cdev was created and so cxgbetool works; recover that way. */ device_printf(dev, "error during attach, adapter is now in recovery mode.\n"); rc = 0; } if (rc != 0) t4_detach_common(dev); else t4_sysctls(sc); return (rc); } static int t4_child_location(device_t bus, device_t dev, struct sbuf *sb) { struct adapter *sc; struct port_info *pi; int i; sc = device_get_softc(bus); for_each_port(sc, i) { pi = sc->port[i]; if (pi != NULL && pi->dev == dev) { sbuf_printf(sb, "port=%d", pi->port_id); break; } } return (0); } static int t4_ready(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); if (sc->flags & FW_OK) return (0); return (ENXIO); } static int t4_read_port_device(device_t dev, int port, device_t *child) { struct adapter *sc; struct port_info *pi; sc = device_get_softc(dev); if (port < 0 || port >= MAX_NPORTS) return (EINVAL); pi = sc->port[port]; if (pi == NULL || pi->dev == NULL) return (ENXIO); *child = pi->dev; return (0); } static int notify_siblings(device_t dev, int detaching) { device_t sibling; int error, i; error = 0; for (i = 0; i < PCI_FUNCMAX; i++) { if (i == pci_get_function(dev)) continue; sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), i); if (sibling == NULL || !device_is_attached(sibling)) continue; if (detaching) error = T4_DETACH_CHILD(sibling); else (void)T4_ATTACH_CHILD(sibling); if (error) break; } return (error); } /* * Idempotent */ static int t4_detach(device_t dev) { int rc; rc = notify_siblings(dev, 1); if (rc) { device_printf(dev, "failed to detach sibling devices: %d\n", rc); return (rc); } return (t4_detach_common(dev)); } int t4_detach_common(device_t dev) { struct adapter *sc; struct port_info *pi; int i, rc; sc = device_get_softc(dev); #ifdef TCP_OFFLOAD rc = deactivate_all_uld(sc); if (rc) { device_printf(dev, "failed to detach upper layer drivers: %d\n", rc); return (rc); } #endif if (sc->cdev) { destroy_dev(sc->cdev); sc->cdev = NULL; } sx_xlock(&t4_list_lock); SLIST_REMOVE(&t4_list, sc, adapter, link); sx_xunlock(&t4_list_lock); sc->flags &= ~CHK_MBOX_ACCESS; if (sc->flags & FULL_INIT_DONE) { if (!(sc->flags & IS_VF)) t4_intr_disable(sc); } if (device_is_attached(dev)) { rc = bus_detach_children(dev); if (rc) { device_printf(dev, "failed to detach child devices: %d\n", rc); return (rc); } } for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_free_tx_sched(sc); for (i = 0; i < MAX_NPORTS; i++) { pi = sc->port[i]; if (pi) { t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); - if (pi->dev) - device_delete_child(dev, pi->dev); mtx_destroy(&pi->pi_lock); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); } } callout_stop(&sc->cal_callout); callout_drain(&sc->cal_callout); device_delete_children(dev); sysctl_ctx_free(&sc->ctx); adapter_full_uninit(sc); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_fw_bye(sc, sc->mbox); if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) pci_release_msi(dev); if (sc->regs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); if (sc->udbs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->msix_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, sc->msix_res); if (sc->l2t) t4_free_l2t(sc); if (sc->smt) t4_free_smt(sc->smt); t4_free_atid_table(sc); #ifdef RATELIMIT t4_free_etid_table(sc); #endif if (sc->key_map) vmem_destroy(sc->key_map); #ifdef INET6 t4_destroy_clip_table(sc); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) free(sc->sge.ofld_txq, M_CXGBE); #endif #ifdef TCP_OFFLOAD free(sc->sge.ofld_rxq, M_CXGBE); #endif #ifdef DEV_NETMAP free(sc->sge.nm_rxq, M_CXGBE); free(sc->sge.nm_txq, M_CXGBE); #endif free(sc->irq, M_CXGBE); free(sc->sge.rxq, M_CXGBE); free(sc->sge.txq, M_CXGBE); free(sc->sge.ctrlq, M_CXGBE); free(sc->sge.iqmap, M_CXGBE); free(sc->sge.eqmap, M_CXGBE); free(sc->tids.ftid_tab, M_CXGBE); free(sc->tids.hpftid_tab, M_CXGBE); free_hftid_hash(&sc->tids); free(sc->tids.tid_tab, M_CXGBE); t4_destroy_dma_tag(sc); callout_drain(&sc->ktls_tick); callout_drain(&sc->sfl_callout); if (mtx_initialized(&sc->tids.ftid_lock)) { mtx_destroy(&sc->tids.ftid_lock); cv_destroy(&sc->tids.ftid_cv); } if (mtx_initialized(&sc->tids.atid_lock)) mtx_destroy(&sc->tids.atid_lock); if (mtx_initialized(&sc->ifp_lock)) mtx_destroy(&sc->ifp_lock); if (rw_initialized(&sc->policy_lock)) { rw_destroy(&sc->policy_lock); #ifdef TCP_OFFLOAD if (sc->policy != NULL) free_offload_policy(sc->policy); #endif } for (i = 0; i < NUM_MEMWIN; i++) { struct memwin *mw = &sc->memwin[i]; if (rw_initialized(&mw->mw_lock)) rw_destroy(&mw->mw_lock); } mtx_destroy(&sc->sfl_lock); mtx_destroy(&sc->reg_lock); mtx_destroy(&sc->sc_lock); bzero(sc, sizeof(*sc)); return (0); } static inline int stop_adapter(struct adapter *sc) { struct port_info *pi; int i; if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", __func__, curthread, sc->flags, sc->error_flags); return (EALREADY); } CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, sc->flags, sc->error_flags); t4_shutdown_adapter(sc); for_each_port(sc, i) { pi = sc->port[i]; PORT_LOCK(pi); if (pi->up_vis > 0 && pi->link_cfg.link_ok) { /* * t4_shutdown_adapter has already shut down all the * PHYs but it also disables interrupts and DMA so there * won't be a link interrupt. Update the state manually * if the link was up previously and inform the kernel. */ pi->link_cfg.link_ok = false; t4_os_link_changed(pi); } PORT_UNLOCK(pi); } return (0); } static inline int restart_adapter(struct adapter *sc) { uint32_t val; if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", __func__, curthread, sc->flags, sc->error_flags); return (EALREADY); } CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, sc->flags, sc->error_flags); MPASS(hw_off_limits(sc)); MPASS((sc->flags & FW_OK) == 0); MPASS((sc->flags & MASTER_PF) == 0); MPASS(sc->reset_thread == NULL); /* * The adapter is supposed to be back on PCIE with its config space and * BARs restored to their state before reset. Register access via * t4_read_reg BAR0 should just work. */ sc->reset_thread = curthread; val = t4_read_reg(sc, A_PL_WHOAMI); if (val == 0xffffffff || val == 0xeeeeeeee) { CH_ERR(sc, "%s: device registers not readable.\n", __func__); sc->reset_thread = NULL; atomic_set_int(&sc->error_flags, ADAP_STOPPED); return (ENXIO); } atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR); atomic_add_int(&sc->incarnation, 1); atomic_add_int(&sc->num_resets, 1); return (0); } static inline void set_adapter_hwstatus(struct adapter *sc, const bool usable) { if (usable) { /* Must be marked reusable by the designated thread. */ ASSERT_SYNCHRONIZED_OP(sc); MPASS(sc->reset_thread == curthread); mtx_lock(&sc->reg_lock); atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS); mtx_unlock(&sc->reg_lock); } else { /* Mark the adapter totally off limits. */ begin_synchronized_op(sc, NULL, SLEEP_OK, "t4hwsts"); mtx_lock(&sc->reg_lock); atomic_set_int(&sc->error_flags, HW_OFF_LIMITS); mtx_unlock(&sc->reg_lock); sc->flags &= ~(FW_OK | MASTER_PF); sc->reset_thread = NULL; end_synchronized_op(sc, 0); } } static int stop_lld(struct adapter *sc) { struct port_info *pi; struct vi_info *vi; if_t ifp; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif int rc, i, j, k; /* * XXX: Can there be a synch_op in progress that will hang because * hardware has been stopped? We'll hang too and the solution will be * to use a version of begin_synch_op that wakes up existing synch_op * with errors. Maybe stop_adapter should do this wakeup? * * I don't think any synch_op could get stranded waiting for DMA or * interrupt so I think we're okay here. Remove this comment block * after testing. */ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4slld"); if (rc != 0) return (ENXIO); /* Quiesce all activity. */ for_each_port(sc, i) { pi = sc->port[i]; pi->vxlan_tcam_entry = false; for_each_vi(pi, j, vi) { vi->xact_addr_filt = -1; mtx_lock(&vi->tick_mtx); vi->flags |= VI_SKIP_STATS; mtx_unlock(&vi->tick_mtx); if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { mtx_lock(&vi->tick_mtx); callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); callout_drain(&vi->tick); } /* * Note that the HW is not available. */ for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED); TXQ_UNLOCK(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, k, ofld_txq) { TXQ_LOCK(&ofld_txq->wrq); ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED; TXQ_UNLOCK(&ofld_txq->wrq); } #endif for_each_rxq(vi, k, rxq) { rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #if defined(TCP_OFFLOAD) for_each_ofld_rxq(vi, k, ofld_rxq) { ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #endif quiesce_vi(vi); } if (sc->flags & FULL_INIT_DONE) { /* Control queue */ wrq = &sc->sge.ctrlq[i]; TXQ_LOCK(wrq); wrq->eq.flags &= ~EQ_HW_ALLOCATED; TXQ_UNLOCK(wrq); quiesce_wrq(wrq); } if (pi->flags & HAS_TRACEQ) { pi->flags &= ~HAS_TRACEQ; sc->traceq = -1; sc->tracer_valid = 0; sc->tracer_enabled = 0; } } if (sc->flags & FULL_INIT_DONE) { /* Firmware event queue */ sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED; quiesce_iq_fl(sc, &sc->sge.fwq, NULL); } /* Stop calibration */ callout_stop(&sc->cal_callout); callout_drain(&sc->cal_callout); if (t4_clock_gate_on_suspend) { t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN | F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN | F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0); } end_synchronized_op(sc, 0); stop_atid_allocator(sc); t4_stop_l2t(sc); return (rc); } int suspend_adapter(struct adapter *sc) { stop_adapter(sc); stop_lld(sc); #ifdef TCP_OFFLOAD stop_all_uld(sc); #endif set_adapter_hwstatus(sc, false); return (0); } static int t4_suspend(device_t dev) { struct adapter *sc = device_get_softc(dev); int rc; CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); rc = suspend_adapter(sc); CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread); return (rc); } struct adapter_pre_reset_state { u_int flags; uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; u_int cfcsum; char cfg_file[32]; struct adapter_params params; struct t4_virt_res vres; struct tid_info tids; struct sge sge; int rawf_base; int nrawf; }; static void save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { ASSERT_SYNCHRONIZED_OP(sc); o->flags = sc->flags; o->nbmcaps = sc->nbmcaps; o->linkcaps = sc->linkcaps; o->switchcaps = sc->switchcaps; o->niccaps = sc->niccaps; o->toecaps = sc->toecaps; o->rdmacaps = sc->rdmacaps; o->cryptocaps = sc->cryptocaps; o->iscsicaps = sc->iscsicaps; o->fcoecaps = sc->fcoecaps; o->cfcsum = sc->cfcsum; MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file)); memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file)); o->params = sc->params; o->vres = sc->vres; o->tids = sc->tids; o->sge = sc->sge; o->rawf_base = sc->rawf_base; o->nrawf = sc->nrawf; } static int compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { int rc = 0; ASSERT_SYNCHRONIZED_OP(sc); /* Capabilities */ #define COMPARE_CAPS(c) do { \ if (o->c##caps != sc->c##caps) { \ CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \ sc->c##caps); \ rc = EINVAL; \ } \ } while (0) COMPARE_CAPS(nbm); COMPARE_CAPS(link); COMPARE_CAPS(switch); COMPARE_CAPS(nic); COMPARE_CAPS(toe); COMPARE_CAPS(rdma); COMPARE_CAPS(crypto); COMPARE_CAPS(iscsi); COMPARE_CAPS(fcoe); #undef COMPARE_CAPS /* Firmware config file */ if (o->cfcsum != sc->cfcsum) { CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file, o->cfcsum, sc->cfg_file, sc->cfcsum); rc = EINVAL; } #define COMPARE_PARAM(p, name) do { \ if (o->p != sc->p) { \ CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \ rc = EINVAL; \ } \ } while (0) COMPARE_PARAM(sge.iq_start, iq_start); COMPARE_PARAM(sge.eq_start, eq_start); COMPARE_PARAM(tids.ftid_base, ftid_base); COMPARE_PARAM(tids.ftid_end, ftid_end); COMPARE_PARAM(tids.nftids, nftids); COMPARE_PARAM(vres.l2t.start, l2t_start); COMPARE_PARAM(vres.l2t.size, l2t_size); COMPARE_PARAM(sge.iqmap_sz, iqmap_sz); COMPARE_PARAM(sge.eqmap_sz, eqmap_sz); COMPARE_PARAM(tids.tid_base, tid_base); COMPARE_PARAM(tids.hpftid_base, hpftid_base); COMPARE_PARAM(tids.hpftid_end, hpftid_end); COMPARE_PARAM(tids.nhpftids, nhpftids); COMPARE_PARAM(rawf_base, rawf_base); COMPARE_PARAM(nrawf, nrawf); COMPARE_PARAM(params.mps_bg_map, mps_bg_map); COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support); COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl); COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support); COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr); COMPARE_PARAM(tids.ntids, ntids); COMPARE_PARAM(tids.etid_base, etid_base); COMPARE_PARAM(tids.etid_end, etid_end); COMPARE_PARAM(tids.netids, netids); COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred); COMPARE_PARAM(params.ethoffload, ethoffload); COMPARE_PARAM(tids.natids, natids); COMPARE_PARAM(tids.stid_base, stid_base); COMPARE_PARAM(vres.ddp.start, ddp_start); COMPARE_PARAM(vres.ddp.size, ddp_size); COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred); COMPARE_PARAM(vres.stag.start, stag_start); COMPARE_PARAM(vres.stag.size, stag_size); COMPARE_PARAM(vres.rq.start, rq_start); COMPARE_PARAM(vres.rq.size, rq_size); COMPARE_PARAM(vres.pbl.start, pbl_start); COMPARE_PARAM(vres.pbl.size, pbl_size); COMPARE_PARAM(vres.qp.start, qp_start); COMPARE_PARAM(vres.qp.size, qp_size); COMPARE_PARAM(vres.cq.start, cq_start); COMPARE_PARAM(vres.cq.size, cq_size); COMPARE_PARAM(vres.ocq.start, ocq_start); COMPARE_PARAM(vres.ocq.size, ocq_size); COMPARE_PARAM(vres.srq.start, srq_start); COMPARE_PARAM(vres.srq.size, srq_size); COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp); COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter); COMPARE_PARAM(vres.iscsi.start, iscsi_start); COMPARE_PARAM(vres.iscsi.size, iscsi_size); COMPARE_PARAM(vres.key.start, key_start); COMPARE_PARAM(vres.key.size, key_size); #undef COMPARE_PARAM return (rc); } static int restart_lld(struct adapter *sc) { struct adapter_pre_reset_state *old_state = NULL; struct port_info *pi; struct vi_info *vi; if_t ifp; struct sge_txq *txq; int rc, i, j, k; rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rlld"); if (rc != 0) return (ENXIO); /* Restore memory window. */ setup_memwin(sc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { CH_ALERT(sc, "%s: recovery mode during restart.\n", __func__); rc = 0; set_adapter_hwstatus(sc, true); goto done; } old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK); save_caps_and_params(sc, old_state); /* Reestablish contact with firmware and become the primary PF. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = compare_caps_and_params(sc, old_state); if (rc != 0) goto done; /* error message displayed already */ for_each_port(sc, i) { pi = sc->port[i]; MPASS(pi != NULL); MPASS(pi->vi != NULL); MPASS(pi->vi[0].dev == pi->dev); rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { CH_ERR(sc, "failed to re-initialize port %d: %d\n", i, rc); goto done; } MPASS(sc->chan_map[pi->tx_chan] == i); PORT_LOCK(pi); fixup_link_config(pi); build_medialist(pi); PORT_UNLOCK(pi); for_each_vi(pi, j, vi) { if (IS_MAIN_VI(vi)) continue; rc = alloc_extra_vi(sc, pi, vi); if (rc != 0) { CH_ERR(vi, "failed to re-allocate extra VI: %d\n", rc); goto done; } } } /* * Interrupts and queues are about to be enabled and other threads will * want to access the hardware too. It is safe to do so. Note that * this thread is still in the middle of a synchronized_op. */ set_adapter_hwstatus(sc, true); if (sc->flags & FULL_INIT_DONE) { rc = adapter_full_init(sc); if (rc != 0) { CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc); goto done; } if (sc->vxlan_refcount > 0) enable_vxlan_rx(sc); for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { mtx_lock(&vi->tick_mtx); vi->flags &= ~VI_SKIP_STATS; mtx_unlock(&vi->tick_mtx); if (!(vi->flags & VI_INIT_DONE)) continue; rc = vi_full_init(vi); if (rc != 0) { CH_ERR(vi, "failed to re-initialize " "interface: %d\n", rc); goto done; } if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } ifp = vi->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) continue; /* * Note that we do not setup multicast addresses * in the first pass. This ensures that the * unicast DMACs for all VIs on all ports get an * MPS TCAM entry. */ rc = update_mac_settings(ifp, XGMAC_ALL & ~XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MAC: %d\n", rc); goto done; } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { CH_ERR(vi, "failed to re-enable VI: %d\n", rc); goto done; } for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); callout_schedule(&vi->tick, hz); mtx_unlock(&vi->tick_mtx); } PORT_LOCK(pi); if (pi->up_vis > 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); } PORT_UNLOCK(pi); } /* Now reprogram the L2 multicast addresses. */ for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) continue; rc = update_mac_settings(ifp, XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc); rc = 0; /* carry on */ } } } } /* Reset all calibration */ t4_calibration_start(sc); done: end_synchronized_op(sc, 0); free(old_state, M_CXGBE); restart_atid_allocator(sc); t4_restart_l2t(sc); return (rc); } int resume_adapter(struct adapter *sc) { restart_adapter(sc); restart_lld(sc); #ifdef TCP_OFFLOAD restart_all_uld(sc); #endif return (0); } static int t4_resume(device_t dev) { struct adapter *sc = device_get_softc(dev); int rc; CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); rc = resume_adapter(sc); CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread); return (rc); } static int t4_reset_prepare(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); return (0); } static int t4_reset_post(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); return (0); } static int reset_adapter_with_pci_bus_reset(struct adapter *sc) { int rc; mtx_lock(&Giant); rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0); mtx_unlock(&Giant); return (rc); } static int reset_adapter_with_pl_rst(struct adapter *sc) { suspend_adapter(sc); /* This is a t4_write_reg without the hw_off_limits check. */ MPASS(sc->error_flags & HW_OFF_LIMITS); bus_space_write_4(sc->bt, sc->bh, A_PL_RST, F_PIORSTMODE | F_PIORST | F_AUTOPCIEPAUSE); pause("pl_rst", 1 * hz); /* Wait 1s for reset */ resume_adapter(sc); return (0); } static inline int reset_adapter(struct adapter *sc) { if (vm_guest == 0) return (reset_adapter_with_pci_bus_reset(sc)); else return (reset_adapter_with_pl_rst(sc)); } static void reset_adapter_task(void *arg, int pending) { struct adapter *sc = arg; const int flags = sc->flags; const int eflags = sc->error_flags; int rc; if (pending > 1) CH_ALERT(sc, "%s: pending %d\n", __func__, pending); rc = reset_adapter(sc); if (rc != 0) { CH_ERR(sc, "adapter did not reset properly, rc = %d, " "flags 0x%08x -> 0x%08x, err_flags 0x%08x -> 0x%08x.\n", rc, flags, sc->flags, eflags, sc->error_flags); } } static int cxgbe_probe(device_t dev) { struct port_info *pi = device_get_softc(dev); device_set_descf(dev, "port %d", pi->port_id); return (BUS_PROBE_DEFAULT); } #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \ IFCAP_HWRXTSTMP | IFCAP_MEXTPG) #define T4_CAP_ENABLE (T4_CAP) static void cxgbe_vi_attach(device_t dev, struct vi_info *vi) { if_t ifp; struct sbuf *sb; struct sysctl_ctx_list *ctx = &vi->ctx; struct sysctl_oid_list *children; struct pfil_head_args pa; struct adapter *sc = vi->adapter; sysctl_ctx_init(ctx); children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev)); vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues"); vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues"); #ifdef DEV_NETMAP vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues"); vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues"); #endif #ifdef TCP_OFFLOAD vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues"); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues"); #endif vi->xact_addr_filt = -1; mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF); callout_init_mtx(&vi->tick, &vi->tick_mtx, 0); if (sc->flags & IS_VF || t4_tx_vm_wr != 0) vi->flags |= TX_USES_VM_WR; /* Allocate an ifnet and set it up */ ifp = if_alloc_dev(IFT_ETHER, dev); vi->ifp = ifp; if_setsoftc(ifp, vi); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, cxgbe_init); if_setioctlfn(ifp, cxgbe_ioctl); if_settransmitfn(ifp, cxgbe_transmit); if_setqflushfn(ifp, cxgbe_qflush); if (vi->pi->nvi > 1 || sc->flags & IS_VF) if_setgetcounterfn(ifp, vi_get_counter); else if_setgetcounterfn(ifp, cxgbe_get_counter); #if defined(KERN_TLS) || defined(RATELIMIT) if_setsndtagallocfn(ifp, cxgbe_snd_tag_alloc); #endif #ifdef RATELIMIT if_setratelimitqueryfn(ifp, cxgbe_ratelimit_query); #endif if_setcapabilities(ifp, T4_CAP); if_setcapenable(ifp, T4_CAP_ENABLE); if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (chip_id(sc) >= CHELSIO_T6) { if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0); if_setcapenablebit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0); if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN, 0); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) if_setcapabilitiesbit(ifp, IFCAP_TOE, 0); #endif #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) { if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0); if_setcapenablebit(ifp, IFCAP_TXRTLMT, 0); } #endif if_sethwtsomax(ifp, IP_MAXPACKET); if (vi->flags & TX_USES_VM_WR) if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_VM_TSO); else if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_TSO); #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_EO_TSO); #endif if_sethwtsomaxsegsize(ifp, 65536); #ifdef KERN_TLS if (is_ktls(sc)) { if_setcapabilitiesbit(ifp, IFCAP_TXTLS, 0); if (sc->flags & KERN_TLS_ON || !is_t6(sc)) if_setcapenablebit(ifp, IFCAP_TXTLS, 0); } #endif ether_ifattach(ifp, vi->hw_addr); #ifdef DEV_NETMAP if (vi->nnmrxq != 0) cxgbe_nm_attach(vi); #endif sb = sbuf_new_auto(); sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) switch (if_getcapabilities(ifp) & (IFCAP_TOE | IFCAP_TXRTLMT)) { case IFCAP_TOE: sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); break; case IFCAP_TOE | IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); break; case IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); break; } #endif #ifdef TCP_OFFLOAD if (if_getcapabilities(ifp) & IFCAP_TOE) sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); #endif #ifdef DEV_NETMAP if (if_getcapabilities(ifp) & IFCAP_NETMAP) sbuf_printf(sb, "; %d txq, %d rxq (netmap)", vi->nnmtxq, vi->nnmrxq); #endif sbuf_finish(sb); device_printf(dev, "%s\n", sbuf_data(sb)); sbuf_delete(sb); vi_sysctls(vi); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = if_name(ifp); vi->pfil = pfil_head_register(&pa); } static int cxgbe_attach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; struct vi_info *vi; int i; sysctl_ctx_init(&pi->ctx); cxgbe_vi_attach(dev, &pi->vi[0]); for_each_vi(pi, i, vi) { if (i == 0) continue; vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, DEVICE_UNIT_ANY); if (vi->dev == NULL) { device_printf(dev, "failed to add VI %d\n", i); continue; } device_set_softc(vi->dev, vi); } cxgbe_sysctls(pi); bus_attach_children(dev); return (0); } static void cxgbe_vi_detach(struct vi_info *vi) { if_t ifp = vi->ifp; if (vi->pfil != NULL) { pfil_head_unregister(vi->pfil); vi->pfil = NULL; } ether_ifdetach(ifp); /* Let detach proceed even if these fail. */ #ifdef DEV_NETMAP if (if_getcapabilities(ifp) & IFCAP_NETMAP) cxgbe_nm_detach(vi); #endif cxgbe_uninit_synchronized(vi); callout_drain(&vi->tick); mtx_destroy(&vi->tick_mtx); sysctl_ctx_free(&vi->ctx); vi_full_uninit(vi); if_free(vi->ifp); vi->ifp = NULL; } static int cxgbe_detach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; int rc; /* Detach the extra VIs first. */ rc = bus_generic_detach(dev); if (rc) return (rc); device_delete_children(dev); sysctl_ctx_free(&pi->ctx); begin_vi_detach(sc, &pi->vi[0]); if (pi->flags & HAS_TRACEQ) { sc->traceq = -1; /* cloner should not create ifnet */ t4_tracer_port_detach(sc); } cxgbe_vi_detach(&pi->vi[0]); ifmedia_removeall(&pi->media); end_vi_detach(sc, &pi->vi[0]); return (0); } static void cxgbe_init(void *arg) { struct vi_info *vi = arg; struct adapter *sc = vi->adapter; if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) return; cxgbe_init_synchronized(vi); end_synchronized_op(sc, 0); } static int cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data) { int rc = 0, mtu, flags; struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifreq *ifr = (struct ifreq *)data; uint32_t mask; switch (cmd) { case SIOCSIFMTU: mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > MAX_MTU) return (EINVAL); rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); if (rc) return (rc); if_setmtu(ifp, mtu); if (vi->flags & VI_INIT_DONE) { t4_update_fl_bufsize(ifp); if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MTU); } end_synchronized_op(sc, 0); break; case SIOCSIFFLAGS: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto fail; } if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = vi->if_flags; if ((if_getflags(ifp) ^ flags) & (IFF_PROMISC | IFF_ALLMULTI)) { rc = update_mac_settings(ifp, XGMAC_PROMISC | XGMAC_ALLMULTI); } } else { rc = cxgbe_init_synchronized(vi); } vi->if_flags = if_getflags(ifp); } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { rc = cxgbe_uninit_synchronized(vi); } end_synchronized_op(sc, 0); break; case SIOCADDMULTI: case SIOCDELMULTI: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi"); if (rc) return (rc); if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MCADDRS); end_synchronized_op(sc, 0); break; case SIOCSIFCAP: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); if (rc) return (rc); mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & if_getcapenable(ifp) && !(IFCAP_TXCSUM & if_getcapenable(ifp))) { mask &= ~IFCAP_TSO4; if_setcapenablebit(ifp, 0, IFCAP_TSO4); if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & if_getcapenable(ifp) && !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { mask &= ~IFCAP_TSO6; if_setcapenablebit(ifp, 0, IFCAP_TSO6); if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_RXCSUM_IPV6) if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & if_getcapenable(ifp)) && !(IFCAP_TXCSUM & if_getcapenable(ifp))) { if_printf(ifp, "enable txcsum first.\n"); rc = EAGAIN; goto fail; } if_togglecapenable(ifp, IFCAP_TSO4); } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & if_getcapenable(ifp)) && !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { if_printf(ifp, "enable txcsum6 first.\n"); rc = EAGAIN; goto fail; } if_togglecapenable(ifp, IFCAP_TSO6); } if (mask & IFCAP_LRO) { #if defined(INET) || defined(INET6) int i; struct sge_rxq *rxq; if_togglecapenable(ifp, IFCAP_LRO); for_each_rxq(vi, i, rxq) { if (if_getcapenable(ifp) & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; else rxq->iq.flags &= ~IQ_LRO_ENABLED; } #endif } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE) { int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE; rc = toe_capability(vi, enable); if (rc != 0) goto fail; if_togglecapenable(ifp, mask); } #endif if (mask & IFCAP_VLAN_HWTAGGING) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_VLANEX); } if (mask & IFCAP_VLAN_MTU) { if_togglecapenable(ifp, IFCAP_VLAN_MTU); /* Need to find out how to disable auto-mtu-inflation */ } if (mask & IFCAP_VLAN_HWTSO) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if (mask & IFCAP_VLAN_HWCSUM) if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); #ifdef RATELIMIT if (mask & IFCAP_TXRTLMT) if_togglecapenable(ifp, IFCAP_TXRTLMT); #endif if (mask & IFCAP_HWRXTSTMP) { int i; struct sge_rxq *rxq; if_togglecapenable(ifp, IFCAP_HWRXTSTMP); for_each_rxq(vi, i, rxq) { if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) rxq->iq.flags |= IQ_RX_TIMESTAMP; else rxq->iq.flags &= ~IQ_RX_TIMESTAMP; } } if (mask & IFCAP_MEXTPG) if_togglecapenable(ifp, IFCAP_MEXTPG); #ifdef KERN_TLS if (mask & IFCAP_TXTLS) { int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TXTLS; rc = ktls_capability(sc, enable); if (rc != 0) goto fail; if_togglecapenable(ifp, mask & IFCAP_TXTLS); } #endif if (mask & IFCAP_VXLAN_HWCSUM) { if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM); if_togglehwassist(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP); } if (mask & IFCAP_VXLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO); if_togglehwassist(ifp, CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO); } #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif fail: end_synchronized_op(sc, 0); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd); break; case SIOCGI2C: { struct ifi2creq i2c; rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (rc != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { rc = EPERM; break; } if (i2c.len > sizeof(i2c.data)) { rc = EINVAL; break; } rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, i2c.offset, i2c.len, &i2c.data[0]); end_synchronized_op(sc, 0); if (rc == 0) rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } default: rc = ether_ioctl(ifp, cmd, data); } return (rc); } static int cxgbe_transmit(if_t ifp, struct mbuf *m) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc; struct sge_txq *txq; void *items[1]; int rc; M_ASSERTPKTHDR(m); MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ #if defined(KERN_TLS) || defined(RATELIMIT) if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) MPASS(m->m_pkthdr.snd_tag->ifp == ifp); #endif if (__predict_false(pi->link_cfg.link_ok == false)) { m_freem(m); return (ENETDOWN); } rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); if (__predict_false(rc != 0)) { if (__predict_true(rc == EINPROGRESS)) { /* queued by parse_pkt */ MPASS(m != NULL); return (0); } MPASS(m == NULL); /* was freed already */ atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ return (rc); } /* Select a txq. */ sc = vi->adapter; txq = &sc->sge.txq[vi->first_txq]; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + vi->rsrv_noflowq); items[0] = m; rc = mp_ring_enqueue(txq->r, items, 1, 256); if (__predict_false(rc != 0)) m_freem(m); return (rc); } static void cxgbe_qflush(if_t ifp) { struct vi_info *vi = if_getsoftc(ifp); struct sge_txq *txq; int i; /* queues do not exist if !VI_INIT_DONE. */ if (vi->flags & VI_INIT_DONE) { for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_QFLUSH; TXQ_UNLOCK(txq); while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("qflush", 1); } TXQ_LOCK(txq); txq->eq.flags &= ~EQ_QFLUSH; TXQ_UNLOCK(txq); } } if_qflush(ifp); } static uint64_t vi_get_counter(if_t ifp, ift_counter c) { struct vi_info *vi = if_getsoftc(ifp); struct fw_vi_stats_vf *s = &vi->stats; mtx_lock(&vi->tick_mtx); vi_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_bcast_frames + s->rx_mcast_frames + s->rx_ucast_frames); case IFCOUNTER_IERRORS: return (s->rx_err_frames); case IFCOUNTER_OPACKETS: return (s->tx_bcast_frames + s->tx_mcast_frames + s->tx_ucast_frames + s->tx_offload_frames); case IFCOUNTER_OERRORS: return (s->tx_drop_frames); case IFCOUNTER_IBYTES: return (s->rx_bcast_bytes + s->rx_mcast_bytes + s->rx_ucast_bytes); case IFCOUNTER_OBYTES: return (s->tx_bcast_bytes + s->tx_mcast_bytes + s->tx_ucast_bytes + s->tx_offload_bytes); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = 0; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } static uint64_t cxgbe_get_counter(if_t ifp, ift_counter c) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct port_stats *s = &pi->stats; mtx_lock(&vi->tick_mtx); cxgbe_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_frames); case IFCOUNTER_IERRORS: return (s->rx_jabber + s->rx_runt + s->rx_too_long + s->rx_fcs_err + s->rx_len_err); case IFCOUNTER_OPACKETS: return (s->tx_frames); case IFCOUNTER_OERRORS: return (s->tx_error_frames); case IFCOUNTER_IBYTES: return (s->rx_octets); case IFCOUNTER_OBYTES: return (s->tx_octets); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_IQDROPS: return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + s->rx_trunc3 + pi->tnl_cong_drops); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = s->tx_drop; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt) { int error; switch (params->hdr.type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: error = cxgbe_rate_tag_alloc(ifp, params, pt); break; #endif #ifdef KERN_TLS case IF_SND_TAG_TYPE_TLS: { struct vi_info *vi = if_getsoftc(ifp); if (is_t6(vi->pi->adapter)) error = t6_tls_tag_alloc(ifp, params, pt); else error = EOPNOTSUPP; break; } #endif default: error = EOPNOTSUPP; } return (error); } #endif /* * The kernel picks a media from the list we had provided but we still validate * the requeste. */ int cxgbe_media_change(if_t ifp) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct ifmedia *ifm = &pi->media; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); if (rc != 0) return (rc); PORT_LOCK(pi); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { /* ifconfig .. media autoselect */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; /* AN not supported by transceiver */ goto done; } lc->requested_aneg = AUTONEG_ENABLE; lc->requested_speed = 0; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->requested_aneg = AUTONEG_DISABLE; lc->requested_speed = ifmedia_baudrate(ifm->ifm_media) / 1000000; lc->requested_fc = 0; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) lc->requested_fc |= PAUSE_RX; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) lc->requested_fc |= PAUSE_TX; } if (pi->up_vis > 0 && !hw_off_limits(sc)) { fixup_link_config(pi); rc = apply_link_config(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } /* * Base media word (without ETHER, pause, link active, etc.) for the port at the * given speed. */ static int port_mword(struct port_info *pi, uint32_t speed) { MPASS(speed & M_FW_PORT_CAP32_SPEED); MPASS(powerof2(speed)); switch(pi->port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI: /* BaseT */ switch (speed) { case FW_PORT_CAP32_SPEED_100M: return (IFM_100_T); case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_T); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_T); } break; case FW_PORT_TYPE_KX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_KX4); break; case FW_PORT_TYPE_CX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_CX4); break; case FW_PORT_TYPE_KX: if (speed == FW_PORT_CAP32_SPEED_1G) return (IFM_1000_KX); break; case FW_PORT_TYPE_KR: case FW_PORT_TYPE_BP_AP: case FW_PORT_TYPE_BP4_AP: case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_KR_SFP28: case FW_PORT_TYPE_KR_XLAUI: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_KX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_KR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_KR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_KR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_KR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_KR4); } break; case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA: case FW_PORT_TYPE_QSFP: case FW_PORT_TYPE_CR4_QSFP: case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_CR2_QSFP: case FW_PORT_TYPE_SFP28: /* Pluggable transceiver */ switch (pi->mod_type) { case FW_PORT_MOD_TYPE_LR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_LX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_LR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_LR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_LR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_LR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_LR4); } break; case FW_PORT_MOD_TYPE_SR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_SX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_SR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_SR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_SR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_SR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_SR4); } break; case FW_PORT_MOD_TYPE_ER: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_ER); break; case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_CX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_TWINAX); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_CR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_CR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_CR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_CR4); } break; case FW_PORT_MOD_TYPE_LRM: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_LRM); break; case FW_PORT_MOD_TYPE_NA: MPASS(0); /* Not pluggable? */ /* fall throough */ case FW_PORT_MOD_TYPE_ERROR: case FW_PORT_MOD_TYPE_UNKNOWN: case FW_PORT_MOD_TYPE_NOTSUPPORTED: break; case FW_PORT_MOD_TYPE_NONE: return (IFM_NONE); } break; case FW_PORT_TYPE_NONE: return (IFM_NONE); } return (IFM_UNKNOWN); } void cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; if (begin_synchronized_op(sc, vi , SLEEP_OK | INTR_OK, "t4med") != 0) return; PORT_LOCK(pi); if (pi->up_vis == 0 && !hw_off_limits(sc)) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here * so that ifconfig displays accurate ifmedia at all times. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); build_medialist(pi); } /* ifm_status */ ifmr->ifm_status = IFM_AVALID; if (lc->link_ok == false) goto done; ifmr->ifm_status |= IFM_ACTIVE; /* ifm_active */ ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); if (lc->fc & PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (lc->fc & PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } static int vcxgbe_probe(device_t dev) { struct vi_info *vi = device_get_softc(dev); device_set_descf(dev, "port %d vi %td", vi->pi->port_id, vi - vi->pi->vi); return (BUS_PROBE_DEFAULT); } static int alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) { int func, index, rc; uint32_t param, val; ASSERT_SYNCHRONIZED_OP(sc); index = vi - pi->vi; MPASS(index > 0); /* This function deals with _extra_ VIs only */ KASSERT(index < nitems(vi_mac_funcs), ("%s: VI %s doesn't have a MAC func", __func__, device_get_nameunit(vi->dev))); func = vi_mac_funcs[index]; rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); if (rc < 0) { CH_ERR(vi, "failed to allocate virtual interface %d" "for port %d: %d\n", index, pi->port_id, -rc); return (-rc); } vi->viid = rc; if (vi->rss_size == 1) { /* * This VI didn't get a slice of the RSS table. Reduce the * number of VIs being created (hw.cxgbe.num_vis) or modify the * configuration file (nvi, rssnvi for this PF) if this is a * problem. */ device_printf(vi->dev, "RSS table not available.\n"); vi->rss_base = 0xffff; return (0); } param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | V_FW_PARAMS_PARAM_YZ(vi->viid); rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc) vi->rss_base = 0xffff; else { MPASS((val >> 16) == vi->rss_size); vi->rss_base = val & 0xffff; } return (0); } static int vcxgbe_attach(device_t dev) { struct vi_info *vi; struct port_info *pi; struct adapter *sc; int rc; vi = device_get_softc(dev); pi = vi->pi; sc = pi->adapter; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); if (rc) return (rc); rc = alloc_extra_vi(sc, pi, vi); end_synchronized_op(sc, 0); if (rc) return (rc); cxgbe_vi_attach(dev, vi); return (0); } static int vcxgbe_detach(device_t dev) { struct vi_info *vi; struct adapter *sc; vi = device_get_softc(dev); sc = vi->adapter; begin_vi_detach(sc, vi); cxgbe_vi_detach(vi); t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); end_vi_detach(sc, vi); return (0); } static struct callout fatal_callout; static struct taskqueue *reset_tq; static void delayed_panic(void *arg) { struct adapter *sc = arg; panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); } static void fatal_error_task(void *arg, int pending) { struct adapter *sc = arg; int rc; if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) { dump_cim_regs(sc); dump_cimla(sc); dump_devlog(sc); } if (t4_reset_on_fatal_err) { CH_ALERT(sc, "resetting adapter after fatal error.\n"); rc = reset_adapter(sc); if (rc == 0 && t4_panic_on_fatal_err) { CH_ALERT(sc, "reset was successful, " "system will NOT panic.\n"); return; } } if (t4_panic_on_fatal_err) { CH_ALERT(sc, "panicking on fatal error (after 30s).\n"); callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); } } void t4_fatal_err(struct adapter *sc, bool fw_error) { const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; stop_adapter(sc); if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR))) return; if (fw_error) { /* * We are here because of a firmware error/timeout and not * because of a hardware interrupt. It is possible (although * not very likely) that an error interrupt was also raised but * this thread ran first and inhibited t4_intr_err. We walk the * main INT_CAUSE registers here to make sure we haven't missed * anything interesting. */ t4_slow_intr_handler(sc, verbose); atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); } t4_report_fw_error(sc); log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n", device_get_nameunit(sc->dev), fw_error); taskqueue_enqueue(reset_tq, &sc->fatal_error_task); } void t4_add_adapter(struct adapter *sc) { sx_xlock(&t4_list_lock); SLIST_INSERT_HEAD(&t4_list, sc, link); sx_xunlock(&t4_list_lock); } int t4_map_bars_0_and_4(struct adapter *sc) { sc->regs_rid = PCIR_BAR(0); sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE); if (sc->regs_res == NULL) { device_printf(sc->dev, "cannot map registers.\n"); return (ENXIO); } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); setbit(&sc->doorbells, DOORBELL_KDB); sc->msix_rid = PCIR_BAR(4); sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->msix_rid, RF_ACTIVE); if (sc->msix_res == NULL) { device_printf(sc->dev, "cannot map MSI-X BAR.\n"); return (ENXIO); } return (0); } int t4_map_bar_2(struct adapter *sc) { /* * T4: only iWARP driver uses the userspace doorbells. There is no need * to map it if RDMA is disabled. */ if (is_t4(sc) && sc->rdmacaps == 0) return (0); sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE); if (sc->udbs_res == NULL) { device_printf(sc->dev, "cannot map doorbell BAR.\n"); return (ENXIO); } sc->udbs_base = rman_get_virtual(sc->udbs_res); if (chip_id(sc) >= CHELSIO_T5) { setbit(&sc->doorbells, DOORBELL_UDB); #if defined(__i386__) || defined(__amd64__) if (t5_write_combine) { int rc, mode; /* * Enable write combining on BAR2. This is the * userspace doorbell BAR and is split into 128B * (UDBS_SEG_SIZE) doorbell regions, each associated * with an egress queue. The first 64B has the doorbell * and the second 64B can be used to submit a tx work * request with an implicit doorbell. */ rc = pmap_change_attr((vm_offset_t)sc->udbs_base, rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); if (rc == 0) { clrbit(&sc->doorbells, DOORBELL_UDB); setbit(&sc->doorbells, DOORBELL_WCWR); setbit(&sc->doorbells, DOORBELL_UDBWC); } else { device_printf(sc->dev, "couldn't enable write combining: %d\n", rc); } mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); t4_write_reg(sc, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | mode); } #endif } sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; return (0); } int t4_adj_doorbells(struct adapter *sc) { if ((sc->doorbells & t4_doorbells_allowed) != 0) { sc->doorbells &= t4_doorbells_allowed; return (0); } CH_ERR(sc, "No usable doorbell (available = 0x%x, allowed = 0x%x).\n", sc->doorbells, t4_doorbells_allowed); return (EINVAL); } struct memwin_init { uint32_t base; uint32_t aperture; }; static const struct memwin_init t4_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } }; static const struct memwin_init t5_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, }; static void setup_memwin(struct adapter *sc) { const struct memwin_init *mw_init; struct memwin *mw; int i; uint32_t bar0; if (is_t4(sc)) { /* * Read low 32b of bar0 indirectly via the hardware backdoor * mechanism. Works from within PCI passthrough environments * too, where rman_get_start() can return a different value. We * need to program the T4 memory window decoders with the actual * addresses that will be coming across the PCIe link. */ bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; mw_init = &t4_memwin[0]; } else { /* T5+ use the relative offset inside the PCIe BAR */ bar0 = 0; mw_init = &t5_memwin[0]; } for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { if (!rw_initialized(&mw->mw_lock)) { rw_init(&mw->mw_lock, "memory window access"); mw->mw_base = mw_init->base; mw->mw_aperture = mw_init->aperture; mw->mw_curpos = 0; } t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), (mw->mw_base + bar0) | V_BIR(0) | V_WINDOW(ilog2(mw->mw_aperture) - 10)); rw_wlock(&mw->mw_lock); position_memwin(sc, i, mw->mw_curpos); rw_wunlock(&mw->mw_lock); } /* flush */ t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); } /* * Positions the memory window at the given address in the card's address space. * There are some alignment requirements and the actual position may be at an * address prior to the requested address. mw->mw_curpos always has the actual * position of the window. */ static void position_memwin(struct adapter *sc, int idx, uint32_t addr) { struct memwin *mw; uint32_t pf; uint32_t reg; MPASS(idx >= 0 && idx < NUM_MEMWIN); mw = &sc->memwin[idx]; rw_assert(&mw->mw_lock, RA_WLOCKED); if (is_t4(sc)) { pf = 0; mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ } reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); t4_write_reg(sc, reg, mw->mw_curpos | pf); t4_read_reg(sc, reg); /* flush */ } int rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len, int rw) { struct memwin *mw; uint32_t mw_end, v; MPASS(idx >= 0 && idx < NUM_MEMWIN); /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len <= 0) return (EINVAL); mw = &sc->memwin[idx]; while (len > 0) { rw_rlock(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; if (addr >= mw_end || addr < mw->mw_curpos) { /* Will need to reposition the window */ if (!rw_try_upgrade(&mw->mw_lock)) { rw_runlock(&mw->mw_lock); rw_wlock(&mw->mw_lock); } rw_assert(&mw->mw_lock, RA_WLOCKED); position_memwin(sc, idx, addr); rw_downgrade(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; } rw_assert(&mw->mw_lock, RA_RLOCKED); while (addr < mw_end && len > 0) { if (rw == 0) { v = t4_read_reg(sc, mw->mw_base + addr - mw->mw_curpos); *val++ = le32toh(v); } else { v = *val++; t4_write_reg(sc, mw->mw_base + addr - mw->mw_curpos, htole32(v)); } addr += 4; len -= 4; } rw_runlock(&mw->mw_lock); } return (0); } CTASSERT(M_TID_COOKIE == M_COOKIE); CTASSERT(MAX_ATIDS <= (M_TID_TID + 1)); static void t4_init_atid_table(struct adapter *sc) { struct tid_info *t; int i; t = &sc->tids; if (t->natids == 0) return; MPASS(t->atid_tab == NULL); t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, M_ZERO | M_WAITOK); mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); t->afree = t->atid_tab; t->atids_in_use = 0; t->atid_alloc_stopped = false; for (i = 1; i < t->natids; i++) t->atid_tab[i - 1].next = &t->atid_tab[i]; t->atid_tab[t->natids - 1].next = NULL; } static void t4_free_atid_table(struct adapter *sc) { struct tid_info *t; t = &sc->tids; KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); if (mtx_initialized(&t->atid_lock)) mtx_destroy(&t->atid_lock); free(t->atid_tab, M_CXGBE); t->atid_tab = NULL; } static void stop_atid_allocator(struct adapter *sc) { struct tid_info *t = &sc->tids; mtx_lock(&t->atid_lock); t->atid_alloc_stopped = true; mtx_unlock(&t->atid_lock); } static void restart_atid_allocator(struct adapter *sc) { struct tid_info *t = &sc->tids; mtx_lock(&t->atid_lock); KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); t->atid_alloc_stopped = false; mtx_unlock(&t->atid_lock); } int alloc_atid(struct adapter *sc, void *ctx) { struct tid_info *t = &sc->tids; int atid = -1; mtx_lock(&t->atid_lock); if (t->afree && !t->atid_alloc_stopped) { union aopen_entry *p = t->afree; atid = p - t->atid_tab; MPASS(atid <= M_TID_TID); t->afree = p->next; p->data = ctx; t->atids_in_use++; } mtx_unlock(&t->atid_lock); return (atid); } void * lookup_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; return (t->atid_tab[atid].data); } void free_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; union aopen_entry *p = &t->atid_tab[atid]; mtx_lock(&t->atid_lock); p->next = t->afree; t->afree = p; t->atids_in_use--; mtx_unlock(&t->atid_lock); } static void queue_tid_release(struct adapter *sc, int tid) { CXGBE_UNIMPLEMENTED("deferred tid release"); } void release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) { struct wrqe *wr; struct cpl_tid_release *req; wr = alloc_wrqe(sizeof(*req), ctrlq); if (wr == NULL) { queue_tid_release(sc, tid); /* defer */ return; } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); t4_wrq_tx(sc, wr); } static int t4_range_cmp(const void *a, const void *b) { return ((const struct t4_range *)a)->start - ((const struct t4_range *)b)->start; } /* * Verify that the memory range specified by the addr/len pair is valid within * the card's address space. */ static int validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len) { struct t4_range mem_ranges[4], *r, *next; uint32_t em, addr_len; int i, n, remaining; /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len == 0) return (EINVAL); /* Enabled memories */ em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); r = &mem_ranges[0]; n = 0; bzero(r, sizeof(mem_ranges)); if (em & F_EDRAM0_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); r->size = G_EDRAM0_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM0_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EDRAM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); r->size = G_EDRAM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EXT_MEM_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); r->size = G_EXT_MEM_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); r->size = G_EXT_MEM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } MPASS(n <= nitems(mem_ranges)); if (n > 1) { /* Sort and merge the ranges. */ qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); /* Start from index 0 and examine the next n - 1 entries. */ r = &mem_ranges[0]; for (remaining = n - 1; remaining > 0; remaining--, r++) { MPASS(r->size > 0); /* r is a valid entry. */ next = r + 1; MPASS(next->size > 0); /* and so is the next one. */ while (r->start + r->size >= next->start) { /* Merge the next one into the current entry. */ r->size = max(r->start + r->size, next->start + next->size) - r->start; n--; /* One fewer entry in total. */ if (--remaining == 0) goto done; /* short circuit */ next++; } if (next != r + 1) { /* * Some entries were merged into r and next * points to the first valid entry that couldn't * be merged. */ MPASS(next->size > 0); /* must be valid */ memcpy(r + 1, next, remaining * sizeof(*r)); #ifdef INVARIANTS /* * This so that the foo->size assertion in the * next iteration of the loop do the right * thing for entries that were pulled up and are * no longer valid. */ MPASS(n < nitems(mem_ranges)); bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * sizeof(struct t4_range)); #endif } } done: /* Done merging the ranges. */ MPASS(n > 0); r = &mem_ranges[0]; for (i = 0; i < n; i++, r++) { if (addr >= r->start && addr + len <= r->start + r->size) return (0); } } return (EFAULT); } static int fwmtype_to_hwmtype(int mtype) { switch (mtype) { case FW_MEMTYPE_EDC0: return (MEM_EDC0); case FW_MEMTYPE_EDC1: return (MEM_EDC1); case FW_MEMTYPE_EXTMEM: return (MEM_MC0); case FW_MEMTYPE_EXTMEM1: return (MEM_MC1); default: panic("%s: cannot translate fw mtype %d.", __func__, mtype); } } /* * Verify that the memory range specified by the memtype/offset/len pair is * valid and lies entirely within the memtype specified. The global address of * the start of the range is returned in addr. */ static int validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len, uint32_t *addr) { uint32_t em, addr_len, maddr; /* Memory can only be accessed in naturally aligned 4 byte units */ if (off & 3 || len & 3 || len == 0) return (EINVAL); em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); switch (fwmtype_to_hwmtype(mtype)) { case MEM_EDC0: if (!(em & F_EDRAM0_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); maddr = G_EDRAM0_BASE(addr_len) << 20; break; case MEM_EDC1: if (!(em & F_EDRAM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); maddr = G_EDRAM1_BASE(addr_len) << 20; break; case MEM_MC: if (!(em & F_EXT_MEM_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); maddr = G_EXT_MEM_BASE(addr_len) << 20; break; case MEM_MC1: if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); maddr = G_EXT_MEM1_BASE(addr_len) << 20; break; default: return (EINVAL); } *addr = maddr + off; /* global address */ return (validate_mem_range(sc, *addr, len)); } static int fixup_devlog_params(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; int rc; rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, dparams->size, &dparams->addr); return (rc); } static void update_nirq(struct intrs_and_queues *iaq, int nports) { iaq->nirq = T4_EXTRA_INTR; iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); iaq->nirq += nports * iaq->nofldrxq; iaq->nirq += nports * (iaq->num_vis - 1) * max(iaq->nrxq_vi, iaq->nnmrxq_vi); iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; } /* * Adjust requirements to fit the number of interrupts available. */ static void calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, int navail) { int old_nirq; const int nports = sc->params.nports; MPASS(nports > 0); MPASS(navail > 0); bzero(iaq, sizeof(*iaq)); iaq->intr_type = itype; iaq->num_vis = t4_num_vis; iaq->ntxq = t4_ntxq; iaq->ntxq_vi = t4_ntxq_vi; iaq->nrxq = t4_nrxq; iaq->nrxq_vi = t4_nrxq_vi; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { iaq->nofldtxq = t4_nofldtxq; iaq->nofldtxq_vi = t4_nofldtxq_vi; } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { iaq->nofldrxq = t4_nofldrxq; iaq->nofldrxq_vi = t4_nofldrxq_vi; } #endif #ifdef DEV_NETMAP if (t4_native_netmap & NN_MAIN_VI) { iaq->nnmtxq = t4_nnmtxq; iaq->nnmrxq = t4_nnmrxq; } if (t4_native_netmap & NN_EXTRA_VI) { iaq->nnmtxq_vi = t4_nnmtxq_vi; iaq->nnmrxq_vi = t4_nnmrxq_vi; } #endif update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { /* * This is the normal case -- there are enough interrupts for * everything. */ goto done; } /* * If extra VIs have been configured try reducing their count and see if * that works. */ while (iaq->num_vis > 1) { iaq->num_vis--; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "virtual interfaces per port " "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " "itype %d, navail %u, nirq %d.\n", iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); goto done; } } /* * Extra VIs will not be created. Log a message if they were requested. */ MPASS(iaq->num_vis == 1); iaq->ntxq_vi = iaq->nrxq_vi = 0; iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; if (iaq->num_vis != t4_num_vis) { device_printf(sc->dev, "extra virtual interfaces disabled. " "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); } /* * Keep reducing the number of NIC rx queues to the next lower power of * 2 (for even RSS distribution) and halving the TOE rx queues and see * if that works. */ do { if (iaq->nrxq > 1) { iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1); if (iaq->nnmrxq > iaq->nrxq) iaq->nnmrxq = iaq->nrxq; } if (iaq->nofldrxq > 1) iaq->nofldrxq >>= 1; old_nirq = iaq->nirq; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "running with reduced number of " "rx queues because of shortage of interrupts. " "nrxq=%u, nofldrxq=%u. " "itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, itype, navail, iaq->nirq); goto done; } } while (old_nirq != iaq->nirq); /* One interrupt for everything. Ugh. */ device_printf(sc->dev, "running with minimal number of queues. " "itype %d, navail %u.\n", itype, navail); iaq->nirq = 1; iaq->nrxq = 1; iaq->ntxq = 1; if (iaq->nofldrxq > 0) { iaq->nofldrxq = 1; iaq->nofldtxq = 1; } iaq->nnmtxq = 0; iaq->nnmrxq = 0; done: MPASS(iaq->num_vis > 0); if (iaq->num_vis > 1) { MPASS(iaq->nrxq_vi > 0); MPASS(iaq->ntxq_vi > 0); } MPASS(iaq->nirq > 0); MPASS(iaq->nrxq > 0); MPASS(iaq->ntxq > 0); if (itype == INTR_MSI) { MPASS(powerof2(iaq->nirq)); } } static int cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) { int rc, itype, navail, nalloc; for (itype = INTR_MSIX; itype; itype >>= 1) { if ((itype & t4_intr_types) == 0) continue; /* not allowed */ if (itype == INTR_MSIX) navail = pci_msix_count(sc->dev); else if (itype == INTR_MSI) navail = pci_msi_count(sc->dev); else navail = 1; restart: if (navail == 0) continue; calculate_iaq(sc, iaq, itype, navail); nalloc = iaq->nirq; rc = 0; if (itype == INTR_MSIX) rc = pci_alloc_msix(sc->dev, &nalloc); else if (itype == INTR_MSI) rc = pci_alloc_msi(sc->dev, &nalloc); if (rc == 0 && nalloc > 0) { if (nalloc == iaq->nirq) return (0); /* * Didn't get the number requested. Use whatever number * the kernel is willing to allocate. */ device_printf(sc->dev, "fewer vectors than requested, " "type=%d, req=%d, rcvd=%d; will downshift req.\n", itype, iaq->nirq, nalloc); pci_release_msi(sc->dev); navail = nalloc; goto restart; } device_printf(sc->dev, "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", itype, rc, iaq->nirq, nalloc); } device_printf(sc->dev, "failed to find a usable interrupt type. " "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, pci_msix_count(sc->dev), pci_msi_count(sc->dev)); return (ENXIO); } #define FW_VERSION(chip) ( \ V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) /* Just enough of fw_hdr to cover all version info. */ struct fw_h { __u8 ver; __u8 chip; __be16 len512; __be32 fw_ver; __be32 tp_microcode_ver; __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; }; /* Spot check a couple of fields. */ CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); struct fw_info { uint8_t chip; char *kld_name; char *fw_mod_name; struct fw_h fw_h; } fw_info[] = { { .chip = CHELSIO_T4, .kld_name = "t4fw_cfg", .fw_mod_name = "t4fw", .fw_h = { .chip = FW_HDR_CHIP_T4, .fw_ver = htobe32(FW_VERSION(T4)), .intfver_nic = FW_INTFVER(T4, NIC), .intfver_vnic = FW_INTFVER(T4, VNIC), .intfver_ofld = FW_INTFVER(T4, OFLD), .intfver_ri = FW_INTFVER(T4, RI), .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T4, ISCSI), .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), .intfver_fcoe = FW_INTFVER(T4, FCOE), }, }, { .chip = CHELSIO_T5, .kld_name = "t5fw_cfg", .fw_mod_name = "t5fw", .fw_h = { .chip = FW_HDR_CHIP_T5, .fw_ver = htobe32(FW_VERSION(T5)), .intfver_nic = FW_INTFVER(T5, NIC), .intfver_vnic = FW_INTFVER(T5, VNIC), .intfver_ofld = FW_INTFVER(T5, OFLD), .intfver_ri = FW_INTFVER(T5, RI), .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, }, { .chip = CHELSIO_T6, .kld_name = "t6fw_cfg", .fw_mod_name = "t6fw", .fw_h = { .chip = FW_HDR_CHIP_T6, .fw_ver = htobe32(FW_VERSION(T6)), .intfver_nic = FW_INTFVER(T6, NIC), .intfver_vnic = FW_INTFVER(T6, VNIC), .intfver_ofld = FW_INTFVER(T6, OFLD), .intfver_ri = FW_INTFVER(T6, RI), .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T6, ISCSI), .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), .intfver_fcoe = FW_INTFVER(T6, FCOE), }, } }; static struct fw_info * find_fw_info(int chip) { int i; for (i = 0; i < nitems(fw_info); i++) { if (fw_info[i].chip == chip) return (&fw_info[i]); } return (NULL); } /* * Is the given firmware API compatible with the one the driver was compiled * with? */ static int fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) { /* short circuit if it's the exact same firmware version */ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) return (1); /* * XXX: Is this too conservative? Perhaps I should limit this to the * features that are supported in the driver. */ #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) return (1); #undef SAME_INTF return (0); } static int load_fw_module(struct adapter *sc, const struct firmware **dcfg, const struct firmware **fw) { struct fw_info *fw_info; *dcfg = NULL; if (fw != NULL) *fw = NULL; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } *dcfg = firmware_get(fw_info->kld_name); if (*dcfg != NULL) { if (fw != NULL) *fw = firmware_get(fw_info->fw_mod_name); return (0); } return (ENOENT); } static void unload_fw_module(struct adapter *sc, const struct firmware *dcfg, const struct firmware *fw) { if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); if (dcfg != NULL) firmware_put(dcfg, FIRMWARE_UNLOAD); } /* * Return values: * 0 means no firmware install attempted. * ERESTART means a firmware install was attempted and was successful. * +ve errno means a firmware install was attempted but failed. */ static int install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, const struct fw_h *drv_fw, const char *reason, int *already) { const struct firmware *cfg, *fw; const uint32_t c = be32toh(card_fw->fw_ver); uint32_t d, k; int rc, fw_install; struct fw_h bundled_fw; bool load_attempted; cfg = fw = NULL; load_attempted = false; fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); if (t4_fw_install < 0) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p;" " will use compiled-in firmware version for" "hw.cxgbe.fw_install checks.\n", rc, cfg, fw); } else { memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); } load_attempted = true; } d = be32toh(bundled_fw.fw_ver); if (reason != NULL) goto install; if ((sc->flags & FW_OK) == 0) { if (c == 0xffffffff) { reason = "missing"; goto install; } rc = 0; goto done; } if (!fw_compatible(card_fw, &bundled_fw)) { reason = "incompatible or unusable"; goto install; } if (d > c) { reason = "older than the version bundled with this driver"; goto install; } if (fw_install == 2 && d != c) { reason = "different than the version bundled with this driver"; goto install; } /* No reason to do anything to the firmware already on the card. */ rc = 0; goto done; install: rc = 0; if ((*already)++) goto done; if (fw_install == 0) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver is prohibited from installing a firmware " "on the card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); goto done; } /* * We'll attempt to install a firmware. Load the module first (if it * hasn't been loaded already). */ if (!load_attempted) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p\n", rc, cfg, fw); /* carry on */ } } if (fw == NULL) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver cannot take corrective action because it " "is unable to load the firmware module.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); rc = sc->flags & FW_OK ? 0 : ENOENT; goto done; } k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); if (k != d) { MPASS(t4_fw_install > 0); device_printf(sc->dev, "firmware in KLD (%u.%u.%u.%u) is not what the driver was " "expecting (%u.%u.%u.%u) and will not be used.\n", G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = sc->flags & FW_OK ? 0 : EINVAL; goto done; } device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); if (rc != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", rc); } else { /* Installed successfully, update the cached header too. */ rc = ERESTART; memcpy(card_fw, fw->data, sizeof(*card_fw)); } done: unload_fw_module(sc, cfg, fw); return (rc); } /* * Establish contact with the firmware and attempt to become the master driver. * * A firmware will be installed to the card if needed (if the driver is allowed * to do so). */ static int contact_firmware(struct adapter *sc) { int rc, already = 0; enum dev_state state; struct fw_info *fw_info; struct fw_hdr *card_fw; /* fw on the card */ const struct fw_h *drv_fw; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } drv_fw = &fw_info->fw_h; /* Read the header of the firmware on the card */ card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); restart: rc = -t4_get_fw_hdr(sc, card_fw); if (rc != 0) { device_printf(sc->dev, "unable to read firmware header from card's flash: %d\n", rc); goto done; } rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) goto restart; if (rc != 0) goto done; rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); if (rc < 0 || state == DEV_STATE_ERR) { rc = -rc; device_printf(sc->dev, "failed to connect to the firmware: %d, %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); #if 0 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, "not responding properly to HELLO", &already) == ERESTART) goto restart; #endif goto done; } MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ if (rc == sc->pf) { sc->flags |= MASTER_PF; rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) rc = 0; else if (rc != 0) goto done; } else if (state == DEV_STATE_UNINIT) { /* * We didn't get to be the master so we definitely won't be * configuring the chip. It's a bug if someone else hasn't * configured it already. */ device_printf(sc->dev, "couldn't be master(%d), " "device not already initialized either(%d). " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); rc = EPROTO; goto done; } else { /* * Some other PF is the master and has configured the chip. * This is allowed but untested. */ device_printf(sc->dev, "PF%d is master, device state %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); sc->cfcsum = 0; rc = 0; } done: if (rc != 0 && sc->flags & FW_OK) { t4_fw_bye(sc, sc->mbox); sc->flags &= ~FW_OK; } free(card_fw, M_CXGBE); return (rc); } static int copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, uint32_t mtype, uint32_t moff) { struct fw_info *fw_info; const struct firmware *dcfg, *rcfg = NULL; const uint32_t *cfdata; uint32_t cflen, addr; int rc; load_fw_module(sc, &dcfg, NULL); /* Card specific interpretation of "default". */ if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (pci_get_device(sc->dev) == 0x440a) snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF); if (is_fpga(sc)) snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF); } if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (dcfg == NULL) { device_printf(sc->dev, "KLD with default config is not available.\n"); rc = ENOENT; goto done; } cfdata = dcfg->data; cflen = dcfg->datasize & ~3; } else { char s[32]; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); rc = EINVAL; goto done; } snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); rcfg = firmware_get(s); if (rcfg == NULL) { device_printf(sc->dev, "unable to load module \"%s\" for configuration " "profile \"%s\".\n", s, cfg_file); rc = ENOENT; goto done; } cfdata = rcfg->data; cflen = rcfg->datasize & ~3; } if (cflen > FLASH_CFG_MAX_SIZE) { device_printf(sc->dev, "config file too long (%d, max allowed is %d).\n", cflen, FLASH_CFG_MAX_SIZE); rc = EINVAL; goto done; } rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); if (rc != 0) { device_printf(sc->dev, "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", __func__, mtype, moff, cflen, rc); rc = EINVAL; goto done; } write_via_memwin(sc, 2, addr, cfdata, cflen); done: if (rcfg != NULL) firmware_put(rcfg, FIRMWARE_UNLOAD); unload_fw_module(sc, dcfg, NULL); return (rc); } struct caps_allowed { uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; }; #define FW_PARAM_DEV(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) /* * Provide a configuration profile to the firmware and have it initialize the * chip accordingly. This may involve uploading a configuration file to the * card. */ static int apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, const struct caps_allowed *caps_allowed) { int rc; struct fw_caps_config_cmd caps; uint32_t mtype, moff, finicsum, cfcsum, param, val; rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); if (rc != 0) { device_printf(sc->dev, "firmware reset failed: %d.\n", rc); return (rc); } bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) { mtype = 0; moff = 0; caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) { mtype = FW_MEMTYPE_FLASH; moff = t4_flash_cfg_addr(sc); caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); } else { /* * Ask the firmware where it wants us to upload the config file. */ param = FW_PARAM_DEV(CF); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* No support for config file? Shouldn't happen. */ device_printf(sc->dev, "failed to query config file location: %d.\n", rc); goto done; } mtype = G_FW_PARAMS_PARAM_Y(val); moff = G_FW_PARAMS_PARAM_Z(val) << 16; caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); if (rc != 0) { device_printf(sc->dev, "failed to upload config file to card: %d.\n", rc); goto done; } } rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to pre-process config file: %d " "(mtype %d, moff 0x%x).\n", rc, mtype, moff); goto done; } finicsum = be32toh(caps.finicsum); cfcsum = be32toh(caps.cfcsum); /* actual */ if (finicsum != cfcsum) { device_printf(sc->dev, "WARNING: config file checksum mismatch: %08x %08x\n", finicsum, cfcsum); } sc->cfcsum = cfcsum; snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); /* * Let the firmware know what features will (not) be used so it can tune * things accordingly. */ #define LIMIT_CAPS(x) do { \ caps.x##caps &= htobe16(caps_allowed->x##caps); \ } while (0) LIMIT_CAPS(nbm); LIMIT_CAPS(link); LIMIT_CAPS(switch); LIMIT_CAPS(nic); LIMIT_CAPS(toe); LIMIT_CAPS(rdma); LIMIT_CAPS(crypto); LIMIT_CAPS(iscsi); LIMIT_CAPS(fcoe); #undef LIMIT_CAPS if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) { /* * TOE and hashfilters are mutually exclusive. It is a config * file or firmware bug if both are reported as available. Try * to cope with the situation in non-debug builds by disabling * TOE. */ MPASS(caps.toecaps == 0); caps.toecaps = 0; caps.rdmacaps = 0; caps.iscsicaps = 0; } caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); if (rc != 0) { device_printf(sc->dev, "failed to process config file: %d.\n", rc); goto done; } t4_tweak_chip_settings(sc); set_params__pre_init(sc); /* get basic stuff going */ rc = -t4_fw_initialize(sc, sc->mbox); if (rc != 0) { device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); goto done; } done: return (rc); } /* * Partition chip resources for use between various PFs, VFs, etc. */ static int partition_resources(struct adapter *sc) { char cfg_file[sizeof(t4_cfg_file)]; struct caps_allowed caps_allowed; int rc; bool fallback; /* Only the master driver gets to configure the chip resources. */ MPASS(sc->flags & MASTER_PF); #define COPY_CAPS(x) do { \ caps_allowed.x##caps = t4_##x##caps_allowed; \ } while (0) bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(nbm); COPY_CAPS(link); COPY_CAPS(switch); COPY_CAPS(nic); COPY_CAPS(toe); COPY_CAPS(rdma); COPY_CAPS(crypto); COPY_CAPS(iscsi); COPY_CAPS(fcoe); fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file); retry: rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); if (rc != 0 && fallback) { dump_devlog(sc); device_printf(sc->dev, "failed (%d) to configure card with \"%s\" profile, " "will fall back to a basic configuration and retry.\n", rc, cfg_file); snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF); bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(switch); caps_allowed.niccaps = FW_CAPS_CONFIG_NIC; fallback = false; goto retry; } #undef COPY_CAPS return (rc); } /* * Retrieve parameters that are needed (or nice to have) very early. */ static int get_params__pre_init(struct adapter *sc) { int rc; uint32_t param[2], val[2]; t4_get_version_info(sc); snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); param[0] = FW_PARAM_DEV(PORTVEC); param[1] = FW_PARAM_DEV(CCLK); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (pre_init): %d.\n", rc); return (rc); } sc->params.portvec = val[0]; sc->params.nports = bitcount32(val[0]); sc->params.vpd.cclk = val[1]; /* Read device log parameters. */ rc = -t4_init_devlog_params(sc, 1); if (rc == 0) fixup_devlog_params(sc); else { device_printf(sc->dev, "failed to get devlog parameters: %d.\n", rc); rc = 0; /* devlog isn't critical for device operation */ } return (rc); } /* * Any params that need to be set before FW_INITIALIZE. */ static int set_params__pre_init(struct adapter *sc) { int rc = 0; uint32_t param, val; if (chip_id(sc) >= CHELSIO_T6) { param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* firmwares < 1.20.1.0 do not have this param. */ if (rc == FW_EINVAL && sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { rc = 0; } if (rc != 0) { device_printf(sc->dev, "failed to enable high priority filters :%d.\n", rc); } param = FW_PARAM_DEV(PPOD_EDRAM); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc == 0 && val == 1) { rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { device_printf(sc->dev, "failed to set PPOD_EDRAM: %d.\n", rc); } } } /* Enable opaque VIIDs with firmwares that support it. */ param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc == 0 && val == 1) sc->params.viid_smt_extn_support = true; else sc->params.viid_smt_extn_support = false; return (rc); } /* * Retrieve various parameters that are of interest to the driver. The device * has been initialized by the firmware at this point. */ static int get_params__post_init(struct adapter *sc) { int rc; uint32_t param[7], val[7]; struct fw_caps_config_cmd caps; param[0] = FW_PARAM_PFVF(IQFLINT_START); param[1] = FW_PARAM_PFVF(EQ_START); param[2] = FW_PARAM_PFVF(FILTER_START); param[3] = FW_PARAM_PFVF(FILTER_END); param[4] = FW_PARAM_PFVF(L2T_START); param[5] = FW_PARAM_PFVF(L2T_END); param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init): %d.\n", rc); return (rc); } sc->sge.iq_start = val[0]; sc->sge.eq_start = val[1]; if ((int)val[3] > (int)val[2]) { sc->tids.ftid_base = val[2]; sc->tids.ftid_end = val[3]; sc->tids.nftids = val[3] - val[2] + 1; } sc->vres.l2t.start = val[4]; sc->vres.l2t.size = val[5] - val[4] + 1; /* val[5] is the last hwidx and it must not collide with F_SYNC_WR */ if (sc->vres.l2t.size > 0) MPASS(fls(val[5]) <= S_SYNC_WR); sc->params.core_vdd = val[6]; param[0] = FW_PARAM_PFVF(IQFLINT_END); param[1] = FW_PARAM_PFVF(EQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init2): %d.\n", rc); return (rc); } MPASS((int)val[0] >= sc->sge.iq_start); sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; MPASS((int)val[1] >= sc->sge.eq_start); sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; if (chip_id(sc) >= CHELSIO_T6) { sc->tids.tid_base = t4_read_reg(sc, A_LE_DB_ACTIVE_TABLE_START_INDEX); param[0] = FW_PARAM_PFVF(HPFILTER_START); param[1] = FW_PARAM_PFVF(HPFILTER_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query hpfilter parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.hpftid_base = val[0]; sc->tids.hpftid_end = val[1]; sc->tids.nhpftids = val[1] - val[0] + 1; /* * These should go off if the layout changes and the * driver needs to catch up. */ MPASS(sc->tids.hpftid_base == 0); MPASS(sc->tids.tid_base == sc->tids.nhpftids); } param[0] = FW_PARAM_PFVF(RAWF_START); param[1] = FW_PARAM_PFVF(RAWF_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query rawf parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->rawf_base = val[0]; sc->nrawf = val[1] - val[0] + 1; } } /* * The parameters that follow may not be available on all firmwares. We * query them individually rather than in a compound query because old * firmwares fail the entire query if an unknown parameter is queried. */ /* * MPS buffer group configuration. */ param[0] = FW_PARAM_DEV(MPSBGMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.mps_bg_map = val[0]; else sc->params.mps_bg_map = UINT32_MAX; /* Not a legal value. */ param[0] = FW_PARAM_DEV(TPCHMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.tp_ch_map = val[0]; else sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */ /* * Determine whether the firmware supports the filter2 work request. */ param[0] = FW_PARAM_DEV(FILTER2_WR); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.filter2_wr_support = val[0] != 0; else sc->params.filter2_wr_support = 0; /* * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL. */ param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.ulptx_memwrite_dsgl = val[0] != 0; else sc->params.ulptx_memwrite_dsgl = false; /* FW_RI_FR_NSMR_TPTE_WR support */ param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; else sc->params.fr_nsmr_tpte_wr_support = false; /* Support for 512 SGL entries per FR MR. */ param[0] = FW_PARAM_DEV(DEV_512SGL_MR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.dev_512sgl_mr = val[0] != 0; else sc->params.dev_512sgl_mr = false; param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; else sc->params.max_pkts_per_eth_tx_pkts_wr = 15; param[0] = FW_PARAM_DEV(NUM_TM_CLASS); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) { MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */ sc->params.nsched_cls = val[0]; } else sc->params.nsched_cls = sc->chip_params->nsched_cls; /* get capabilites */ bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to get card capabilities: %d.\n", rc); return (rc); } #define READ_CAPS(x) do { \ sc->x = htobe16(caps.x); \ } while (0) READ_CAPS(nbmcaps); READ_CAPS(linkcaps); READ_CAPS(switchcaps); READ_CAPS(niccaps); READ_CAPS(toecaps); READ_CAPS(rdmacaps); READ_CAPS(cryptocaps); READ_CAPS(iscsicaps); READ_CAPS(fcoecaps); if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { MPASS(chip_id(sc) > CHELSIO_T4); MPASS(sc->toecaps == 0); sc->toecaps = 0; param[0] = FW_PARAM_DEV(NTID); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query HASHFILTER parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); sc->params.hash_filter = 1; } if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { param[0] = FW_PARAM_PFVF(ETHOFLD_START); param[1] = FW_PARAM_PFVF(ETHOFLD_END); param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query NIC parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.etid_base = val[0]; sc->tids.etid_end = val[1]; sc->tids.netids = val[1] - val[0] + 1; sc->params.eo_wr_cred = val[2]; sc->params.ethoffload = 1; } } if (sc->toecaps) { /* query offload-related parameters */ param[0] = FW_PARAM_DEV(NTID); param[1] = FW_PARAM_PFVF(SERVER_START); param[2] = FW_PARAM_PFVF(SERVER_END); param[3] = FW_PARAM_PFVF(TDDP_START); param[4] = FW_PARAM_PFVF(TDDP_END); param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TOE parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); if ((int)val[2] > (int)val[1]) { sc->tids.stid_base = val[1]; sc->tids.nstids = val[2] - val[1] + 1; } sc->vres.ddp.start = val[3]; sc->vres.ddp.size = val[4] - val[3] + 1; sc->params.ofldq_wr_cred = val[5]; sc->params.offload = 1; } else { /* * The firmware attempts memfree TOE configuration for -SO cards * and will report toecaps=0 if it runs out of resources (this * depends on the config file). It may not report 0 for other * capabilities dependent on the TOE in this case. Set them to * 0 here so that the driver doesn't bother tracking resources * that will never be used. */ sc->iscsicaps = 0; sc->rdmacaps = 0; } if (sc->rdmacaps) { param[0] = FW_PARAM_PFVF(STAG_START); param[1] = FW_PARAM_PFVF(STAG_END); param[2] = FW_PARAM_PFVF(RQ_START); param[3] = FW_PARAM_PFVF(RQ_END); param[4] = FW_PARAM_PFVF(PBL_START); param[5] = FW_PARAM_PFVF(PBL_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(1): %d.\n", rc); return (rc); } sc->vres.stag.start = val[0]; sc->vres.stag.size = val[1] - val[0] + 1; sc->vres.rq.start = val[2]; sc->vres.rq.size = val[3] - val[2] + 1; sc->vres.pbl.start = val[4]; sc->vres.pbl.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SQRQ_START); param[1] = FW_PARAM_PFVF(SQRQ_END); param[2] = FW_PARAM_PFVF(CQ_START); param[3] = FW_PARAM_PFVF(CQ_END); param[4] = FW_PARAM_PFVF(OCQ_START); param[5] = FW_PARAM_PFVF(OCQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(2): %d.\n", rc); return (rc); } sc->vres.qp.start = val[0]; sc->vres.qp.size = val[1] - val[0] + 1; sc->vres.cq.start = val[2]; sc->vres.cq.size = val[3] - val[2] + 1; sc->vres.ocq.start = val[4]; sc->vres.ocq.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SRQ_START); param[1] = FW_PARAM_PFVF(SRQ_END); param[2] = FW_PARAM_DEV(MAXORDIRD_QP); param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(3): %d.\n", rc); return (rc); } sc->vres.srq.start = val[0]; sc->vres.srq.size = val[1] - val[0] + 1; sc->params.max_ordird_qp = val[2]; sc->params.max_ird_adapter = val[3]; } if (sc->iscsicaps) { param[0] = FW_PARAM_PFVF(ISCSI_START); param[1] = FW_PARAM_PFVF(ISCSI_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query iSCSI parameters: %d.\n", rc); return (rc); } sc->vres.iscsi.start = val[0]; sc->vres.iscsi.size = val[1] - val[0] + 1; } if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { param[0] = FW_PARAM_PFVF(TLS_START); param[1] = FW_PARAM_PFVF(TLS_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TLS parameters: %d.\n", rc); return (rc); } sc->vres.key.start = val[0]; sc->vres.key.size = val[1] - val[0] + 1; } /* * We've got the params we wanted to query directly from the firmware. * Grab some others via other means. */ t4_init_sge_params(sc); t4_init_tp_params(sc); t4_read_mtu_tbl(sc, sc->params.mtus, NULL); t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); rc = t4_verify_chip_settings(sc); if (rc != 0) return (rc); t4_init_rx_buf_info(sc); return (rc); } #ifdef KERN_TLS static void ktls_tick(void *arg) { struct adapter *sc; uint32_t tstamp; sc = arg; tstamp = tcp_ts_getticks(); t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1); t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31); callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK); } static int t6_config_kern_tls(struct adapter *sc, bool enable) { int rc; uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) | V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) | V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, ¶m); if (rc != 0) { CH_ERR(sc, "failed to %s NIC TLS: %d\n", enable ? "enable" : "disable", rc); return (rc); } if (enable) { sc->flags |= KERN_TLS_ON; callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc, C_HARDCLOCK); } else { sc->flags &= ~KERN_TLS_ON; callout_stop(&sc->ktls_tick); } return (rc); } #endif static int set_params__post_init(struct adapter *sc) { uint32_t mask, param, val; #ifdef TCP_OFFLOAD int i, v, shift; #endif /* ask for encapsulated CPLs */ param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val = 1; (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* Enable 32b port caps if the firmware supports it. */ param = FW_PARAM_PFVF(PORT_CAPS32); val = 1; if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0) sc->params.port_caps32 = 1; /* Let filter + maskhash steer to a part of the VI's RSS region. */ val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER), V_MASKFILTER(val - 1)); mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER | F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN | F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM; val = 0; if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE, F_ATTACKFILTERENABLE); val |= F_DROPERRORATTACK; } if (t4_drop_ip_fragments != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP, F_FRAGMENTDROP); val |= F_DROPERRORFRAG; } if (t4_drop_pkts_with_l2_errors != 0) val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN; if (t4_drop_pkts_with_l3_errors != 0) { val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN | F_DROPERRORCSUMIP; } if (t4_drop_pkts_with_l4_errors != 0) { val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUM; } t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val); #ifdef TCP_OFFLOAD /* * Override the TOE timers with user provided tunables. This is not the * recommended way to change the timers (the firmware config file is) so * these tunables are not documented. * * All the timer tunables are in microseconds. */ if (t4_toe_keepalive_idle != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); v &= M_KEEPALIVEIDLE; t4_set_reg_field(sc, A_TP_KEEP_IDLE, V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); } if (t4_toe_keepalive_interval != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); v &= M_KEEPALIVEINTVL; t4_set_reg_field(sc, A_TP_KEEP_INTVL, V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); } if (t4_toe_keepalive_count != 0) { v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); } if (t4_toe_rexmt_min != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); v &= M_RXTMIN; t4_set_reg_field(sc, A_TP_RXT_MIN, V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); } if (t4_toe_rexmt_max != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); v &= M_RXTMAX; t4_set_reg_field(sc, A_TP_RXT_MAX, V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); } if (t4_toe_rexmt_count != 0) { v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); } for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { if (t4_toe_rexmt_backoff[i] != -1) { v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; shift = (i & 3) << 3; t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), M_TIMERBACKOFFINDEX0 << shift, v << shift); } } #endif /* * Limit TOE connections to 2 reassembly "islands". This is * required to permit migrating TOE connections to either * ULP_MODE_TCPDDP or UPL_MODE_TLS. */ t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE), V_PASSMODE(2)); #ifdef KERN_TLS if (is_ktls(sc)) { sc->tlst.inline_keys = t4_tls_inline_keys; sc->tlst.combo_wrs = t4_tls_combo_wrs; if (t4_kern_tls != 0 && is_t6(sc)) t6_config_kern_tls(sc, true); } #endif return (0); } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV static void t4_set_desc(struct adapter *sc) { struct adapter_params *p = &sc->params; device_set_descf(sc->dev, "Chelsio %s", p->vpd.id); } static inline void ifmedia_add4(struct ifmedia *ifm, int m) { ifmedia_add(ifm, m, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL); } /* * This is the selected media, which is not quite the same as the active media. * The media line in ifconfig is "media: Ethernet selected (active)" if selected * and active are not the same, and "media: Ethernet selected" otherwise. */ static void set_current_media(struct port_info *pi) { struct link_config *lc; struct ifmedia *ifm; int mword; u_int speed; PORT_LOCK_ASSERT_OWNED(pi); /* Leave current media alone if it's already set to IFM_NONE. */ ifm = &pi->media; if (ifm->ifm_cur != NULL && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) return; lc = &pi->link_cfg; if (lc->requested_aneg != AUTONEG_DISABLE && lc->pcaps & FW_PORT_CAP32_ANEG) { ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); return; } mword = IFM_ETHER | IFM_FDX; if (lc->requested_fc & PAUSE_TX) mword |= IFM_ETH_TXPAUSE; if (lc->requested_fc & PAUSE_RX) mword |= IFM_ETH_RXPAUSE; if (lc->requested_speed == 0) speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ else speed = lc->requested_speed; mword |= port_mword(pi, speed_to_fwcap(speed)); ifmedia_set(ifm, mword); } /* * Returns true if the ifmedia list for the port cannot change. */ static bool fixed_ifmedia(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI || pi->port_type == FW_PORT_TYPE_KX4 || pi->port_type == FW_PORT_TYPE_KX || pi->port_type == FW_PORT_TYPE_KR || pi->port_type == FW_PORT_TYPE_BP_AP || pi->port_type == FW_PORT_TYPE_BP4_AP || pi->port_type == FW_PORT_TYPE_BP40_BA || pi->port_type == FW_PORT_TYPE_KR4_100G || pi->port_type == FW_PORT_TYPE_KR_SFP28 || pi->port_type == FW_PORT_TYPE_KR_XLAUI); } static void build_medialist(struct port_info *pi) { uint32_t ss, speed; int unknown, mword, bit; struct link_config *lc; struct ifmedia *ifm; PORT_LOCK_ASSERT_OWNED(pi); if (pi->flags & FIXED_IFMEDIA) return; /* * Rebuild the ifmedia list. */ ifm = &pi->media; ifmedia_removeall(ifm); lc = &pi->link_cfg; ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ if (__predict_false(ss == 0)) { /* not supposed to happen. */ MPASS(ss != 0); no_media: MPASS(LIST_EMPTY(&ifm->ifm_list)); ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(ifm, IFM_ETHER | IFM_NONE); return; } unknown = 0; for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) { speed = 1 << bit; MPASS(speed & M_FW_PORT_CAP32_SPEED); if (ss & speed) { mword = port_mword(pi, speed); if (mword == IFM_NONE) { goto no_media; } else if (mword == IFM_UNKNOWN) unknown++; else ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword); } } if (unknown > 0) /* Add one unknown for all unknown media types. */ ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN); if (lc->pcaps & FW_PORT_CAP32_ANEG) ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); set_current_media(pi); } /* * Initialize the requested fields in the link config based on driver tunables. */ static void init_link_config(struct port_info *pi) { struct link_config *lc = &pi->link_cfg; PORT_LOCK_ASSERT_OWNED(pi); lc->requested_caps = 0; lc->requested_speed = 0; if (t4_autoneg == 0) lc->requested_aneg = AUTONEG_DISABLE; else if (t4_autoneg == 1) lc->requested_aneg = AUTONEG_ENABLE; else lc->requested_aneg = AUTONEG_AUTO; lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG); if (t4_fec & FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (t4_fec == 0) lc->requested_fec = FEC_NONE; else { /* -1 is handled by the FEC_AUTO block above and not here. */ lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE); if (lc->requested_fec == 0) lc->requested_fec = FEC_AUTO; } if (t4_force_fec < 0) lc->force_fec = -1; else if (t4_force_fec > 0) lc->force_fec = 1; else lc->force_fec = 0; } /* * Makes sure that all requested settings comply with what's supported by the * port. Returns the number of settings that were invalid and had to be fixed. */ static int fixup_link_config(struct port_info *pi) { int n = 0; struct link_config *lc = &pi->link_cfg; uint32_t fwspeed; PORT_LOCK_ASSERT_OWNED(pi); /* Speed (when not autonegotiating) */ if (lc->requested_speed != 0) { fwspeed = speed_to_fwcap(lc->requested_speed); if ((fwspeed & lc->pcaps) == 0) { n++; lc->requested_speed = 0; } } /* Link autonegotiation */ MPASS(lc->requested_aneg == AUTONEG_ENABLE || lc->requested_aneg == AUTONEG_DISABLE || lc->requested_aneg == AUTONEG_AUTO); if (lc->requested_aneg == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { n++; lc->requested_aneg = AUTONEG_AUTO; } /* Flow control */ MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); if (lc->requested_fc & PAUSE_TX && !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { n++; lc->requested_fc &= ~PAUSE_TX; } if (lc->requested_fc & PAUSE_RX && !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { n++; lc->requested_fc &= ~PAUSE_RX; } if (!(lc->requested_fc & PAUSE_AUTONEG) && !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { n++; lc->requested_fc |= PAUSE_AUTONEG; } /* FEC */ if ((lc->requested_fec & FEC_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || (lc->requested_fec & FEC_BASER_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { n++; lc->requested_fec = FEC_AUTO; } return (n); } /* * Apply the requested L1 settings, which are expected to be valid, to the * hardware. */ static int apply_link_config(struct port_info *pi) { struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; #ifdef INVARIANTS ASSERT_SYNCHRONIZED_OP(sc); PORT_LOCK_ASSERT_OWNED(pi); if (lc->requested_aneg == AUTONEG_ENABLE) MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); if (!(lc->requested_fc & PAUSE_AUTONEG)) MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); if (lc->requested_fc & PAUSE_TX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); if (lc->requested_fc & PAUSE_RX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); if (lc->requested_fec & FEC_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); if (lc->requested_fec & FEC_BASER_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); #endif if (!(sc->flags & IS_VF)) { rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); if (rc != 0) { device_printf(pi->dev, "l1cfg failed: %d\n", rc); return (rc); } } /* * An L1_CFG will almost always result in a link-change event if the * link is up, and the driver will refresh the actual fec/fc/etc. when * the notification is processed. If the link is down then the actual * settings are meaningless. * * This takes care of the case where a change in the L1 settings may not * result in a notification. */ if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); return (0); } #define FW_MAC_EXACT_CHUNK 7 struct mcaddr_ctx { if_t ifp; const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; uint64_t hash; int i; int del; int rc; }; static u_int add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct mcaddr_ctx *ctx = arg; struct vi_info *vi = if_getsoftc(ctx->ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if (ctx->rc < 0) return (0); ctx->mcaddr[ctx->i] = LLADDR(sdl); MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i])); ctx->i++; if (ctx->i == FW_MAC_EXACT_CHUNK) { ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del, ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0); if (ctx->rc < 0) { int j; for (j = 0; j < ctx->i; j++) { if_printf(ctx->ifp, "failed to add mc address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx->mcaddr[j][0], ctx->mcaddr[j][1], ctx->mcaddr[j][2], ctx->mcaddr[j][3], ctx->mcaddr[j][4], ctx->mcaddr[j][5], -ctx->rc); } return (0); } ctx->del = 0; ctx->i = 0; } return (1); } /* * Program the port's XGMAC based on parameters in ifnet. The caller also * indicates which parameters should be programmed (the rest are left alone). */ int update_mac_settings(if_t ifp, int flags) { int rc = 0; struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); KASSERT(flags, ("%s: not told what to update.", __func__)); if (flags & XGMAC_MTU) mtu = if_getmtu(ifp); if (flags & XGMAC_PROMISC) promisc = if_getflags(ifp) & IFF_PROMISC ? 1 : 0; if (flags & XGMAC_ALLMULTI) allmulti = if_getflags(ifp) & IFF_ALLMULTI ? 1 : 0; if (flags & XGMAC_VLANEX) vlanex = if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING ? 1 : 0; if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, allmulti, 1, vlanex, false); if (rc) { if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); return (rc); } } if (flags & XGMAC_UCADDR) { uint8_t ucaddr[ETHER_ADDR_LEN]; bcopy(if_getlladdr(ifp), ucaddr, sizeof(ucaddr)); rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, ucaddr, true, &vi->smt_idx); if (rc < 0) { rc = -rc; if_printf(ifp, "change_mac failed: %d\n", rc); return (rc); } else { vi->xact_addr_filt = rc; rc = 0; } } if (flags & XGMAC_MCADDRS) { struct epoch_tracker et; struct mcaddr_ctx ctx; int j; ctx.ifp = ifp; ctx.hash = 0; ctx.i = 0; ctx.del = 1; ctx.rc = 0; /* * Unlike other drivers, we accumulate list of pointers into * interface address lists and we need to keep it safe even * after if_foreach_llmaddr() returns, thus we must enter the * network epoch. */ NET_EPOCH_ENTER(et); if_foreach_llmaddr(ifp, add_maddr, &ctx); if (ctx.rc < 0) { NET_EPOCH_EXIT(et); rc = -ctx.rc; return (rc); } if (ctx.i > 0) { rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0); NET_EPOCH_EXIT(et); if (rc < 0) { rc = -rc; for (j = 0; j < ctx.i; j++) { if_printf(ifp, "failed to add mcast address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx.mcaddr[j][0], ctx.mcaddr[j][1], ctx.mcaddr[j][2], ctx.mcaddr[j][3], ctx.mcaddr[j][4], ctx.mcaddr[j][5], rc); } return (rc); } ctx.del = 0; } else NET_EPOCH_EXIT(et); rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0); if (rc != 0) if_printf(ifp, "failed to set mcast address hash: %d\n", rc); if (ctx.del == 0) { /* We clobbered the VXLAN entry if there was one. */ pi->vxlan_tcam_entry = false; } } if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && pi->vxlan_tcam_entry == false) { rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); rc = 0; pi->vxlan_tcam_entry = true; } } return (rc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ int begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, char *wmesg) { int rc, pri; #ifdef WITNESS /* the caller thinks it's ok to sleep, but is it really? */ if (flags & SLEEP_OK) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "begin_synchronized_op"); #endif if (INTR_OK) pri = PCATCH; else pri = 0; ADAPTER_LOCK(sc); for (;;) { if (vi && IS_DETACHING(vi)) { rc = ENXIO; goto done; } if (!IS_BUSY(sc)) { rc = 0; break; } if (!(flags & SLEEP_OK)) { rc = EBUSY; goto done; } if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { rc = EINTR; goto done; } } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = wmesg; sc->last_op_thr = curthread; sc->last_op_flags = flags; #endif done: if (!(flags & HOLD_LOCK) || rc) ADAPTER_UNLOCK(sc); return (rc); } /* * Tell if_ioctl and if_init that the VI is going away. This is * special variant of begin_synchronized_op and must be paired with a * call to end_vi_detach. */ void begin_vi_detach(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); SET_DETACHING(vi); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = "t4detach"; sc->last_op_thr = curthread; sc->last_op_flags = 0; #endif ADAPTER_UNLOCK(sc); } void end_vi_detach(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); CLR_DETACHING(vi); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ void end_synchronized_op(struct adapter *sc, int flags) { if (flags & LOCK_HELD) ADAPTER_LOCK_ASSERT_OWNED(sc); else ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } static int cxgbe_init_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if_t ifp = vi->ifp; int rc = 0, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return (0); /* already running */ if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0)) return (rc); /* error message displayed already */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); /* error message displayed already */ rc = update_mac_settings(ifp, XGMAC_ALL); if (rc) goto done; /* error message displayed already */ PORT_LOCK(pi); if (pi->up_vis == 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { if_printf(ifp, "enable_vi failed: %d\n", rc); PORT_UNLOCK(pi); goto done; } /* * Can't fail from this point onwards. Review cxgbe_uninit_synchronized * if this changes. */ for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } /* * The first iq of the first port to come up is used for tracing. */ if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } /* all ok */ pi->up_vis++; if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); PORT_UNLOCK(pi); mtx_lock(&vi->tick_mtx); if (vi->pi->nvi > 1 || sc->flags & IS_VF) callout_reset(&vi->tick, hz, vi_tick, vi); else callout_reset(&vi->tick, hz, cxgbe_tick, vi); mtx_unlock(&vi->tick_mtx); done: if (rc != 0) cxgbe_uninit_synchronized(vi); return (rc); } /* * Idempotent. */ static int cxgbe_uninit_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if_t ifp = vi->ifp; int rc, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (!(vi->flags & VI_INIT_DONE)) { if (__predict_false(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { KASSERT(0, ("uninited VI is running")); if_printf(ifp, "uninited VI with running ifnet. " "vi->flags 0x%016lx, if_flags 0x%08x, " "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp), if_getdrvflags(ifp)); } return (0); } /* * Disable the VI so that all its data in either direction is discarded * by the MPS. Leave everything else (the queues, interrupts, and 1Hz * tick) intact as the TP can deliver negative advice or data that it's * holding in its RAM (for an offloaded connection) even after the VI is * disabled. */ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); if (rc) { if_printf(ifp, "disable_vi failed: %d\n", rc); return (rc); } for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); PORT_LOCK(pi); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { PORT_UNLOCK(pi); return (0); } if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); pi->up_vis--; if (pi->up_vis > 0) { PORT_UNLOCK(pi); return (0); } pi->link_cfg.link_ok = false; pi->link_cfg.speed = 0; pi->link_cfg.link_down_rc = 255; t4_os_link_changed(pi); PORT_UNLOCK(pi); return (0); } /* * It is ok for this function to fail midway and return right away. t4_detach * will walk the entire sc->irq list and clean up whatever is valid. */ int t4_setup_intr_handlers(struct adapter *sc) { int rc, rid, p, q, v; char s[8]; struct irq *irq; struct port_info *pi; struct vi_info *vi; struct sge *sge = &sc->sge; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; #endif #ifdef RSS int nbuckets = rss_getnumbuckets(); #endif /* * Setup interrupts. */ irq = &sc->irq[0]; rid = sc->intr_type == INTR_INTX ? 0 : 1; if (forwarding_intr_to_fwq(sc)) return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); /* Multiple interrupts. */ if (sc->flags & IS_VF) KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); else KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); /* The first one is always error intr on PFs */ if (!(sc->flags & IS_VF)) { rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); if (rc != 0) return (rc); irq++; rid++; } /* The second one is always the firmware event queue (first on VFs) */ rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); if (rc != 0) return (rc); irq++; rid++; for_each_port(sc, p) { pi = sc->port[p]; for_each_vi(pi, v, vi) { vi->first_intr = rid - 1; if (vi->nnmrxq > 0) { int n = max(vi->nrxq, vi->nnmrxq); rxq = &sge->rxq[vi->first_rxq]; #ifdef DEV_NETMAP nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; #endif for (q = 0; q < n; q++) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); if (q < vi->nrxq) irq->rxq = rxq++; #ifdef DEV_NETMAP if (q < vi->nnmrxq) irq->nm_rxq = nm_rxq++; if (irq->nm_rxq != NULL && irq->rxq == NULL) { /* Netmap rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, irq->nm_rxq, s); } if (irq->nm_rxq != NULL && irq->rxq != NULL) { /* NIC and Netmap rx */ rc = t4_alloc_irq(sc, irq, rid, t4_vi_intr, irq, s); } #endif if (irq->rxq != NULL && irq->nm_rxq == NULL) { /* NIC rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_intr, irq->rxq, s); } if (rc != 0) return (rc); #ifdef RSS if (q < vi->nrxq) { bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); } #endif irq++; rid++; vi->nintr++; } } else { for_each_rxq(vi, q, rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, s); if (rc != 0) return (rc); #ifdef RSS bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); #endif irq++; rid++; vi->nintr++; } } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, q, ofld_rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, ofld_rxq, s); if (rc != 0) return (rc); irq++; rid++; vi->nintr++; } #endif } } MPASS(irq == &sc->irq[sc->intr_count]); return (0); } static void write_global_rss_key(struct adapter *sc) { #ifdef RSS int i; uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; CTASSERT(RSS_KEYSIZE == 40); rss_getkey((void *)&raw_rss_key[0]); for (i = 0; i < nitems(rss_key); i++) { rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); } t4_write_rss_key(sc, &rss_key[0], -1, 1); #endif } /* * Idempotent. */ static int adapter_full_init(struct adapter *sc) { int rc, i; ASSERT_SYNCHRONIZED_OP(sc); /* * queues that belong to the adapter (not any particular port). */ rc = t4_setup_adapter_queues(sc); if (rc != 0) return (rc); MPASS(sc->params.nports <= nitems(sc->tq)); for (i = 0; i < sc->params.nports; i++) { if (sc->tq[i] != NULL) continue; sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq[i]); if (sc->tq[i] == NULL) { CH_ERR(sc, "failed to allocate task queue %d\n", i); return (ENOMEM); } taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", device_get_nameunit(sc->dev), i); } if (!(sc->flags & IS_VF)) { write_global_rss_key(sc); t4_intr_enable(sc); } return (0); } int adapter_init(struct adapter *sc) { int rc; ASSERT_SYNCHRONIZED_OP(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); KASSERT((sc->flags & FULL_INIT_DONE) == 0, ("%s: FULL_INIT_DONE already", __func__)); rc = adapter_full_init(sc); if (rc != 0) adapter_full_uninit(sc); else sc->flags |= FULL_INIT_DONE; return (rc); } /* * Idempotent. */ static void adapter_full_uninit(struct adapter *sc) { int i; t4_teardown_adapter_queues(sc); for (i = 0; i < nitems(sc->tq); i++) { if (sc->tq[i] == NULL) continue; taskqueue_free(sc->tq[i]); sc->tq[i] = NULL; } sc->flags &= ~FULL_INIT_DONE; } #ifdef RSS #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ RSS_HASHTYPE_RSS_UDP_IPV6) /* Translates kernel hash types to hardware. */ static int hashconfig_to_hashen(int hashconfig) { int hashen = 0; if (hashconfig & RSS_HASHTYPE_RSS_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; return (hashen); } /* Translates hardware hash types to kernel. */ static int hashen_to_hashconfig(int hashen) { int hashconfig = 0; if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { /* * If UDP hashing was enabled it must have been enabled for * either IPv4 or IPv6 (inclusive or). Enabling UDP without * enabling any 4-tuple hash is nonsense configuration. */ MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; } if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV6; return (hashconfig); } #endif /* * Idempotent. */ static int vi_full_init(struct vi_info *vi) { struct adapter *sc = vi->adapter; struct sge_rxq *rxq; int rc, i, j; #ifdef RSS int nbuckets = rss_getnumbuckets(); int hashconfig = rss_gethashconfig(); int extra; #endif ASSERT_SYNCHRONIZED_OP(sc); /* * Allocate tx/rx/fl queues for this VI. */ rc = t4_setup_vi_queues(vi); if (rc != 0) return (rc); /* * Setup RSS for this VI. Save a copy of the RSS table for later use. */ if (vi->nrxq > vi->rss_size) { CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); " "some queues will never receive traffic.\n", vi->nrxq, vi->rss_size); } else if (vi->rss_size % vi->nrxq) { CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); " "expect uneven traffic distribution.\n", vi->nrxq, vi->rss_size); } #ifdef RSS if (vi->nrxq != nbuckets) { CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);" "performance will be impacted.\n", vi->nrxq, nbuckets); } #endif if (vi->rss == NULL) vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE, M_ZERO | M_WAITOK); for (i = 0; i < vi->rss_size;) { #ifdef RSS j = rss_get_indirection_to_bucket(i); j %= vi->nrxq; rxq = &sc->sge.rxq[vi->first_rxq + j]; vi->rss[i++] = rxq->iq.abs_id; #else for_each_rxq(vi, j, rxq) { vi->rss[i++] = rxq->iq.abs_id; if (i == vi->rss_size) break; } #endif } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->rss, vi->rss_size); if (rc != 0) { CH_ERR(vi, "rss_config failed: %d\n", rc); return (rc); } #ifdef RSS vi->hashen = hashconfig_to_hashen(hashconfig); /* * We may have had to enable some hashes even though the global config * wants them disabled. This is a potential problem that must be * reported to the user. */ extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; /* * If we consider only the supported hash types, then the enabled hashes * are a superset of the requested hashes. In other words, there cannot * be any supported hash that was requested but not enabled, but there * can be hashes that were not requested but had to be enabled. */ extra &= SUPPORTED_RSS_HASHTYPES; MPASS((extra & hashconfig) == 0); if (extra) { CH_ALERT(vi, "global RSS config (0x%x) cannot be accommodated.\n", hashconfig); } if (extra & RSS_HASHTYPE_RSS_IPV4) CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_IPV6) CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n"); #else vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; #endif rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], 0, 0); if (rc != 0) { CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc); return (rc); } return (0); } int vi_init(struct vi_info *vi) { int rc; ASSERT_SYNCHRONIZED_OP(vi->adapter); KASSERT((vi->flags & VI_INIT_DONE) == 0, ("%s: VI_INIT_DONE already", __func__)); rc = vi_full_init(vi); if (rc != 0) vi_full_uninit(vi); else vi->flags |= VI_INIT_DONE; return (rc); } /* * Idempotent. */ static void vi_full_uninit(struct vi_info *vi) { if (vi->flags & VI_INIT_DONE) { quiesce_vi(vi); free(vi->rss, M_CXGBE); free(vi->nm_rss, M_CXGBE); } t4_teardown_vi_queues(vi); vi->flags &= ~VI_INIT_DONE; } static void quiesce_txq(struct sge_txq *txq) { struct sge_eq *eq = &txq->eq; struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; MPASS(eq->flags & EQ_SW_ALLOCATED); MPASS(!(eq->flags & EQ_ENABLED)); /* Wait for the mp_ring to empty. */ while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("rquiesce", 1); } MPASS(txq->txp.npkt == 0); if (eq->flags & EQ_HW_ALLOCATED) { /* * Hardware is alive and working normally. Wait for it to * finish and then wait for the driver to catch up and reclaim * all descriptors. */ while (spg->cidx != htobe16(eq->pidx)) pause("equiesce", 1); while (eq->cidx != eq->pidx) pause("dquiesce", 1); } else { /* * Hardware is unavailable. Discard all pending tx and reclaim * descriptors directly. */ TXQ_LOCK(txq); while (eq->cidx != eq->pidx) { struct mbuf *m, *nextpkt; struct tx_sdesc *txsd; txsd = &txq->sdesc[eq->cidx]; for (m = txsd->m; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } IDXINCR(eq->cidx, txsd->desc_used, eq->sidx); } spg->pidx = spg->cidx = htobe16(eq->cidx); TXQ_UNLOCK(txq); } } static void quiesce_wrq(struct sge_wrq *wrq) { struct wrqe *wr; TXQ_LOCK(wrq); while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { STAILQ_REMOVE_HEAD(&wrq->wr_list, link); #ifdef INVARIANTS wrq->nwr_pending--; wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE); #endif free(wr, M_CXGBE); } MPASS(wrq->nwr_pending == 0); MPASS(wrq->ndesc_needed == 0); wrq->nwr_pending = 0; wrq->ndesc_needed = 0; TXQ_UNLOCK(wrq); } static void quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl) { /* Synchronize with the interrupt handler */ while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) pause("iqfree", 1); if (fl != NULL) { MPASS(iq->flags & IQ_HAS_FL); mtx_lock(&sc->sfl_lock); FL_LOCK(fl); fl->flags |= FL_DOOMED; FL_UNLOCK(fl); callout_stop(&sc->sfl_callout); mtx_unlock(&sc->sfl_lock); KASSERT((fl->flags & FL_STARVING) == 0, ("%s: still starving", __func__)); /* Release all buffers if hardware is no longer available. */ if (!(iq->flags & IQ_HW_ALLOCATED)) free_fl_buffers(sc, fl); } } /* * Wait for all activity on all the queues of the VI to complete. It is assumed * that no new work is being enqueued by the hardware or the driver. That part * should be arranged before calling this function. */ static void quiesce_vi(struct vi_info *vi) { int i; struct adapter *sc = vi->adapter; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif if (!(vi->flags & VI_INIT_DONE)) return; for_each_txq(vi, i, txq) { quiesce_txq(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { quiesce_wrq(&ofld_txq->wrq); } #endif for_each_rxq(vi, i, rxq) { quiesce_iq_fl(sc, &rxq->iq, &rxq->fl); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl); } #endif } static int t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, driver_intr_t *handler, void *arg, char *name) { int rc; irq->rid = rid; irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, RF_SHAREABLE | RF_ACTIVE); if (irq->res == NULL) { device_printf(sc->dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, arg, &irq->tag); if (rc != 0) { device_printf(sc->dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name, rc); } else if (name) bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); return (rc); } static int t4_free_irq(struct adapter *sc, struct irq *irq) { if (irq->tag) bus_teardown_intr(sc->dev, irq->res, irq->tag); if (irq->res) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); bzero(irq, sizeof(*irq)); return (0); } static void get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) { regs->version = chip_id(sc) | chip_rev(sc) << 10; t4_get_regs(sc, buf, regs->len); } #define A_PL_INDIR_CMD 0x1f8 #define S_PL_AUTOINC 31 #define M_PL_AUTOINC 0x1U #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) #define S_PL_VFID 20 #define M_PL_VFID 0xffU #define V_PL_VFID(x) ((x) << S_PL_VFID) #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) #define S_PL_ADDR 0 #define M_PL_ADDR 0xfffffU #define V_PL_ADDR(x) ((x) << S_PL_ADDR) #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) #define A_PL_INDIR_DATA 0x1fc static uint64_t read_vf_stat(struct adapter *sc, u_int vin, int reg) { u32 stats[2]; if (sc->flags & IS_VF) { stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); } else { mtx_assert(&sc->reg_lock, MA_OWNED); t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg))); stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); } return (((uint64_t)stats[1]) << 32 | stats[0]); } static void t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats) { #define GET_STAT(name) \ read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L) if (!(sc->flags & IS_VF)) mtx_lock(&sc->reg_lock); stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); if (!(sc->flags & IS_VF)) mtx_unlock(&sc->reg_lock); #undef GET_STAT } static void t4_clr_vi_stats(struct adapter *sc, u_int vin) { int reg; t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) t4_write_reg(sc, A_PL_INDIR_DATA, 0); } static void vi_refresh_stats(struct vi_info *vi) { struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ mtx_assert(&vi->tick_mtx, MA_OWNED); if (vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats); getmicrotime(&vi->last_refreshed); } static void cxgbe_refresh_stats(struct vi_info *vi) { u_int i, v, tnl_cong_drops, chan_map; struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ struct port_info *pi; struct adapter *sc; mtx_assert(&vi->tick_mtx, MA_OWNED); if (vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; pi = vi->pi; sc = vi->adapter; tnl_cong_drops = 0; t4_get_port_stats(sc, pi->port_id, &pi->stats); chan_map = pi->rx_e_chan_map; while (chan_map) { i = ffs(chan_map) - 1; mtx_lock(&sc->reg_lock); t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); mtx_unlock(&sc->reg_lock); tnl_cong_drops += v; chan_map &= ~(1 << i); } pi->tnl_cong_drops = tnl_cong_drops; getmicrotime(&vi->last_refreshed); } static void cxgbe_tick(void *arg) { struct vi_info *vi = arg; MPASS(IS_MAIN_VI(vi)); mtx_assert(&vi->tick_mtx, MA_OWNED); cxgbe_refresh_stats(vi); callout_schedule(&vi->tick, hz); } static void vi_tick(void *arg) { struct vi_info *vi = arg; mtx_assert(&vi->tick_mtx, MA_OWNED); vi_refresh_stats(vi); callout_schedule(&vi->tick, hz); } /* * Should match fw_caps_config_ enums in t4fw_interface.h */ static char *caps_decoder[] = { "\20\001IPMI\002NCSI", /* 0: NBM */ "\20\001PPP\002QFC\003DCBX", /* 1: link */ "\20\001INGRESS\002EGRESS", /* 2: switch */ "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ "\006HASHFILTER\007ETHOFLD", "\20\001TOE", /* 4: TOE */ "\20\001RDDP\002RDMAC", /* 5: RDMA */ "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" "\007T10DIF" "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */ "\004TLS_HW", "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ "\004PO_INITIATOR\005PO_TARGET", }; void t4_sysctls(struct adapter *sc) { struct sysctl_ctx_list *ctx = &sc->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *c0; static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; /* * dev.t4nex.X. */ oid = device_get_sysctl_tree(sc->dev); c0 = children = SYSCTL_CHILDREN(oid); sc->sc_do_rxcopy = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, sc->params.nports, "# of ports"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells, (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", "available doorbells"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, sc->params.vpd.cclk, "core clock frequency (in KHz)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", "interrupt holdoff timer values (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", "interrupt holdoff packet counter values"); t4_sge_sysctls(sc, ctx, children); sc->lro_timeout = 100; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, &sc->debug_flags, 0, "flags to enable runtime debugging"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); if (sc->flags & IS_VF) return; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, NULL, chip_rev(sc), "chip hardware revision"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, sc->er_version, 0, "expansion ROM version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, sc->bs_version, 0, "bootstrap firmware version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, NULL, sc->params.scfg_vers, "serial config version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, NULL, sc->params.vpd_vers, "VPD version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, sc->cfcsum, "config file checksum"); #define SYSCTL_CAP(name, n, text) \ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \ (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \ "available " text " capabilities") SYSCTL_CAP(nbmcaps, 0, "NBM"); SYSCTL_CAP(linkcaps, 1, "link"); SYSCTL_CAP(switchcaps, 2, "switch"); SYSCTL_CAP(niccaps, 3, "NIC"); SYSCTL_CAP(toecaps, 4, "TCP offload"); SYSCTL_CAP(rdmacaps, 5, "RDMA"); SYSCTL_CAP(iscsicaps, 6, "iSCSI"); SYSCTL_CAP(cryptocaps, 7, "crypto"); SYSCTL_CAP(fcoecaps, 8, "FCoE"); #undef SYSCTL_CAP SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, NULL, sc->tids.nftids, "number of filters"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_temperature, "I", "chip temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_reset_sensor, "I", "reset the chip's temperature sensor."); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_loadavg, "A", "microprocessor load averages (debug firmwares only)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd, "I", "core Vdd (in mV)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS, sysctl_cpus, "A", "local CPUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS, sysctl_cpus, "A", "preferred CPUs for interrupts"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW, &sc->swintr, 0, "software triggered interrupts"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I", "1 = reset adapter, 0 = zero reset counter"); /* * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "logs and miscellaneous information"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cctrl, "A", "congestion control"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3, sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4, sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5, sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_la, "A", "CIM logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); if (chip_id(sc) > CHELSIO_T4) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_qcfg, "A", "CIM queue configuration"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cpl_stats, "A", "CPL statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ddp_stats, "A", "non-TCP DDP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tid_stats, "A", "tid stats"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_devlog, "A", "firmware's device log"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_fcoe_stats, "A", "FCoE statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_hw_sched, "A", "hardware scheduler "); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_l2t, "A", "hardware L2 table"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_smt, "A", "hardware source MAC table"); #ifdef INET6 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_clip, "A", "active CLIP table entries"); #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_lb_stats, "A", "loopback statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_meminfo, "A", "memory regions"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, "A", "MPS TCAM entries"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_path_mtus, "A", "path MTUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_pm_stats, "A", "PM statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_rdma_stats, "A", "RDMA statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tcp_stats, "A", "TCP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tids, "A", "TID information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_err_stats, "A", "TP error statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tnl_stats, "A", "TP tunnel statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la, "A", "TP logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tx_rate, "A", "Tx rate"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ulprx_la, "A", "ULPRX logic analyzer"); if (chip_id(sc) >= CHELSIO_T5) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_wcwr_stats, "A", "write combined work requests"); } #ifdef KERN_TLS if (is_ktls(sc)) { /* * dev.t4nex.0.tls. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys", CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS " "keys in work requests (1) or attempt to store TLS keys " "in card memory."); if (is_t6(sc)) SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs", CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to " "combine TCB field updates with TLS record work " "requests."); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { int i; char s[4]; /* * dev.t4nex.X.toe. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters"); children = SYSCTL_CHILDREN(oid); sc->tt.cong_algorithm = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " "3 = highspeed)"); sc->tt.sndbuf = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, &sc->tt.sndbuf, 0, "hardware send buffer"); sc->tt.ddp = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, ""); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW, &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)"); sc->tt.rx_coalesce = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); sc->tt.tls = 1; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I", "Inline TLS allowed"); sc->tt.tx_align = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); sc->tt.tx_zcopy = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", CTLFLAG_RW, &sc->tt.tx_zcopy, 0, "Enable zero-copy aio_write(2)"); sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cop_managed_offloading", CTLFLAG_RW, &sc->tt.cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); sc->tt.autorcvbuf_inc = 16 * 1024; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc", CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0, "autorcvbuf increment"); sc->tt.update_hc_on_pmtu_change = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "update_hc_on_pmtu_change", CTLFLAG_RW, &sc->tt.update_hc_on_pmtu_change, 0, "Update hostcache entry if the PMTU changes"); sc->tt.iso = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW, &sc->tt.iso, 0, "Enable iSCSI segmentation offload"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_tick, "A", "TP timer tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_tp_tick, "A", "TCP timestamp tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_tp_tick, "A", "DACK tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_dack_timer, "IU", "DACK timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MIN, sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MAX, sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MIN, sysctl_tp_timer, "LU", "Persist timer min (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MAX, sysctl_tp_timer, "LU", "Persist timer max (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_IDLE, sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_INTVL, sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU", "Number of SYN retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU", "Number of retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU", "Number of keepalive probes before abort"); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE retransmit backoffs"); children = SYSCTL_CHILDREN(oid); for (i = 0; i < 16; i++) { snprintf(s, sizeof(s), "%u", i); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i, sysctl_tp_backoff, "IU", "TOE retransmit backoff"); } } #endif } void vi_sysctls(struct vi_info *vi) { struct sysctl_ctx_list *ctx = &vi->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; /* * dev.v?(cxgbe|cxl).X. */ oid = device_get_sysctl_tree(vi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, vi->viid, "VI identifer"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, &vi->nrxq, 0, "# of rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, &vi->ntxq, 0, "# of tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, &vi->first_rxq, 0, "index of first rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, &vi->first_txq, 0, "index of first tx queue"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL, vi->rss_base, "start of RSS indirection table"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, vi->rss_size, "size of RSS indirection table"); if (IS_MAIN_VI(vi)) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_noflowq, "IU", "Reserve queue 0 for non-flowid packets"); } if (vi->adapter->flags & IS_VF) { MPASS(vi->flags & TX_USES_VM_WR); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD, NULL, 1, "use VM work requests for transmit"); } else { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_tx_vm_wr, "I", "use VM work requestes for transmit"); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, &vi->nofldrxq, 0, "# of rx queues for offloaded TCP connections"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", CTLFLAG_RD, &vi->first_ofld_rxq, 0, "index of first TOE rx queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx_ofld, "I", "holdoff timer index for TOE queues"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx_ofld, "I", "holdoff packet counter index for TOE queues"); } #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (vi->nofldtxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, &vi->nofldtxq, 0, "# of tx queues for TOE/ETHOFLD"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", CTLFLAG_RD, &vi->first_ofld_txq, 0, "index of first TOE/ETHOFLD tx queue"); } #endif #ifdef DEV_NETMAP if (vi->nnmrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, &vi->nnmrxq, 0, "# of netmap rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, &vi->nnmtxq, 0, "# of netmap tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", CTLFLAG_RD, &vi->first_nm_rxq, 0, "index of first netmap rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", CTLFLAG_RD, &vi->first_nm_txq, 0, "index of first netmap tx queue"); } #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx, "I", "holdoff timer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_rxq, "I", "rx queue size"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_txq, "I", "tx queue size"); } static void cxgbe_sysctls(struct port_info *pi) { struct sysctl_ctx_list *ctx = &pi->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *children2; struct adapter *sc = pi->adapter; int i; char name[16]; static char *tc_flags = {"\20\1USER"}; /* * dev.cxgbe.X. */ oid = device_get_sysctl_tree(pi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_btphy, "I", "PHY temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1, sysctl_btphy, "I", "PHY firmware version"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_pause_settings, "A", "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec", CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A", "FEC in use on the link"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_requested_fec, "A", "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec", CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A", "FEC recommended by the cable/transceiver"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_autoneg, "I", "autonegotiation (-1 = not supported)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD, &pi->link_cfg.requested_caps, 0, "L1 config requested by driver"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD, &pi->link_cfg.pcaps, 0, "port capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD, &pi->link_cfg.acaps, 0, "advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD, &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, port_top_speed(pi), "max speed (in Gbps)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, pi->mps_bg_map, "MPS buffer group map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, NULL, pi->rx_e_chan_map, "TP rx e-channel map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_chan", CTLFLAG_RD, NULL, pi->tx_chan, "TP tx c-channel"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_chan", CTLFLAG_RD, NULL, pi->rx_chan, "TP rx c-channel"); if (sc->flags & IS_VF) return; /* * dev.(cxgbe|cxl).X.tc. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx scheduler traffic classes (cl_rl)"); children2 = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize", CTLFLAG_RW, &pi->sched_params->pktsize, 0, "pktsize for per-flow cl-rl (0 means up to the driver )"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize", CTLFLAG_RW, &pi->sched_params->burstsize, 0, "burstsize for per-flow cl-rl (0 means up to the driver)"); for (i = 0; i < sc->params.nsched_cls; i++) { struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; snprintf(name, sizeof(name), "%d", i); children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class")); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state", CTLFLAG_RD, &tc->state, 0, "current state"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags, (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", CTLFLAG_RD, &tc->refcount, 0, "references to this class"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, (pi->port_id << 16) | i, sysctl_tc_params, "A", "traffic class parameters"); } /* * dev.cxgbe.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, &pi->tx_parse_error, 0, "# of tx packets with invalid length or # of segments"); #define T4_REGSTAT(name, stat, desc) \ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \ sysctl_handle_t4_reg64, "QU", desc) /* We get these from port_stats and they may be stale by up to 1s */ #define T4_PORTSTAT(name, desc) \ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ &pi->stats.name, desc) T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames"); T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range"); T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames"); T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted"); T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted"); T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted"); T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted"); T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted"); T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted"); T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted"); T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted"); T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted"); T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU"); T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames"); if (is_t6(sc)) { T4_PORTSTAT(rx_fcs_err, "# of frames received with bad FCS since last link up"); } else { T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR, "# of frames received with bad FCS"); } T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error"); T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors"); T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received"); T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range"); T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received"); T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received"); T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received"); T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received"); T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received"); T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received"); T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received"); T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received"); T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received"); T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); #undef T4_REGSTAT #undef T4_PORTSTAT } static int sysctl_int_array(SYSCTL_HANDLER_ARGS) { int rc, *i, space = 0; struct sbuf sb; sbuf_new_for_sysctl(&sb, NULL, 64, req); for (i = arg1; arg2; arg2 -= sizeof(int), i++) { if (space) sbuf_printf(&sb, " "); sbuf_printf(&sb, "%d", *i); space = 1; } rc = sbuf_finish(&sb); sbuf_delete(&sb); return (rc); } static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_btphy(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; int op = arg2; struct adapter *sc = pi->adapter; u_int v; int rc; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { /* XXX: magic numbers */ rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, &v); } end_synchronized_op(sc, 0); if (rc) return (rc); if (op == 0) v /= 256; rc = sysctl_handle_int(oidp, &v, 0, req); return (rc); } static int sysctl_noflowq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; int rc, val; val = vi->rsrv_noflowq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if ((val >= 1) && (vi->ntxq > 1)) vi->rsrv_noflowq = 1; else vi->rsrv_noflowq = 0; return (rc); } static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int rc, val, i; MPASS(!(sc->flags & IS_VF)); val = vi->flags & TX_USES_VM_WR ? 1 : 0; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val != 0 && val != 1) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txvm"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) { /* * We don't want parse_pkt to run with one setting (VF or PF) * and then eth_tx to see a different setting but still use * stale information calculated by parse_pkt. */ rc = EBUSY; } else { struct port_info *pi = vi->pi; struct sge_txq *txq; uint32_t ctrl0; uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; if (val) { vi->flags |= TX_USES_VM_WR; if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO); ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan)); if (!(sc->flags & IS_VF)) npkt--; } else { vi->flags &= ~TX_USES_VM_WR; if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO); ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); } for_each_txq(vi, i, txq) { txq->cpl_ctrl0 = ctrl0; txq->txp.max_npkt = npkt; } } end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_rxq *rxq; uint8_t v; idx = vi->tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4tmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); for_each_rxq(vi, i, rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&rxq->iq.intr_params, v); #else rxq->iq.intr_params = v; #endif } vi->tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4pktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_rxq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || (qsize & 7)) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4rxqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_rxq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_txq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || qsize > 65536) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_txq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RX\2TX\3AUTO"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | (lc->requested_fc & PAUSE_AUTONEG), bits); } else { sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG), bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[2]; int n; s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)); s[1] = 0; rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); if (s[1] != 0) return (EINVAL); if (s[0] < '0' || s[0] > '9') return (EINVAL); /* not a number */ n = s[0] - '0'; if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) return (EINVAL); /* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4PAUSE"); if (rc) return (rc); if (!hw_off_limits(sc)) { PORT_LOCK(pi); lc->requested_fc = n; fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); PORT_UNLOCK(pi); } end_synchronized_op(sc, 0); } return (rc); } static int sysctl_link_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; int rc; struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) sbuf_printf(sb, "%b", lc->fec, bits); else sbuf_printf(sb, "no link"); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t old; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2" "\5RSVD3\6auto\7module"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", lc->requested_fec, bits); rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[8]; int n; snprintf(s, sizeof(s), "%d", lc->requested_fec == FEC_AUTO ? -1 : lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); n = strtol(&s[0], NULL, 0); if (n < 0 || n & FEC_AUTO) n = FEC_AUTO; else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE)) return (EINVAL);/* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4reqf"); if (rc) return (rc); PORT_LOCK(pi); old = lc->requested_fec; if (n == FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (n == 0 || n == FEC_NONE) lc->requested_fec = FEC_NONE; else { if ((lc->pcaps | V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) != lc->pcaps) { rc = ENOTSUP; goto done; } lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | FEC_MODULE); } if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) { rc = apply_link_config(pi); if (rc != 0) { lc->requested_fec = old; if (rc == FW_EPROTO) rc = ENOTSUP; } } } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } return (rc); } static int sysctl_module_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t fec; struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) { rc = EBUSY; goto done; } if (hw_off_limits(sc)) { rc = ENXIO; goto done; } PORT_LOCK(pi); if (pi->up_vis == 0) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); } fec = lc->fec_hint; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || !fec_supported(lc->pcaps)) { PORT_UNLOCK(pi); sbuf_printf(sb, "n/a"); } else { if (fec == 0) fec = FEC_NONE; PORT_UNLOCK(pi); sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits); } rc = sbuf_finish(sb); done: sbuf_delete(sb); end_synchronized_op(sc, 0); return (rc); } static int sysctl_autoneg(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; if (lc->pcaps & FW_PORT_CAP32_ANEG) val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; else val = -1; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) val = AUTONEG_DISABLE; else if (val == 1) val = AUTONEG_ENABLE; else val = AUTONEG_AUTO; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4aneg"); if (rc) return (rc); PORT_LOCK(pi); if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; goto done; } lc->requested_aneg = val; if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_force_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; val = lc->force_fec; MPASS(val >= -1 && val <= 1); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC)) return (ENOTSUP); if (val < -1 || val > 1) return (EINVAL); rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff"); if (rc) return (rc); PORT_LOCK(pi); lc->force_fec = val; if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; uint64_t val; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; val = t4_read_reg64(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sysctl_handle_64(oidp, &val, 0, req); return (rc); } static int sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, t; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); /* unknown is returned as 0 but we display -1 in that case */ t = val == 0 ? -1 : val; rc = sysctl_handle_int(oidp, &t, 0, req); return (rc); } static int sysctl_vdd(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; uint32_t param, val; if (sc->params.core_vdd == 0) { rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vdd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); sc->params.core_vdd = val; } return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); } static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, v; uint32_t param, val; v = sc->sensor_resets; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL || v <= 0) return (rc); if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || chip_id(sc) < CHELSIO_T5) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR)); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc == 0) sc->sensor_resets++; return (rc); } static int sysctl_loadavg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (val == 0xffffffff) { /* Only debug and custom firmwares report load averages. */ sbuf_printf(sb, "not available"); } else { sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cctrl(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t incr[NMTUS][NCCTRL_WIN]; static const char *dec_fac[] = { "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", "0.9375" }; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_cong_tbl(sc, incr); mtx_unlock(&sc->reg_lock); if (rc) goto done; for (i = 0; i < NCCTRL_WIN; ++i) { sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], incr[5][i], incr[6][i], incr[7][i]); sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", incr[8][i], incr[9][i], incr[10][i], incr[11][i], incr[12][i], incr[13][i], incr[14][i], incr[15][i], sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ }; static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n, qid = arg2; uint32_t *buf, *p; char *qtype; u_int cim_num_obq = sc->chip_params->cim_num_obq; KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, ("%s: bad qid %d\n", __func__, qid)); if (qid < CIM_NUM_IBQ) { /* inbound queue */ qtype = "IBQ"; n = 4 * CIM_IBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_ibq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } else { /* outbound queue */ qtype = "OBQ"; qid -= CIM_NUM_IBQ; n = 4 * cim_num_obq * CIM_OBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_obq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } if (rc < 0) { rc = -rc; goto done; } n = rc * sizeof(uint32_t); /* rc has # of words actually read */ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) { rc = ENOMEM; goto done; } sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); for (i = 0, p = buf; i < n; i += 16, p += 4) sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], p[2], p[3]); rc = sbuf_finish(sb); sbuf_delete(sb); done: free(buf, M_CXGBE); return (rc); } static void sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, p[6], p[7]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, p[4] & 0xff, p[5] >> 8); sbuf_printf(sb, "\n %02x %x%07x %x%07x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); } else { sbuf_printf(sb, "\n %02x %x%07x %x%07x %08x %08x " "%08x%08x%08x%08x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], p[6], p[7]); } } } static void sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Inst Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x %08x", p[3] & 0xff, p[2], p[1], p[0]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16); } else { sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " "%08x %08x %08x %08x %08x %08x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16, p[2], p[1], p[0], p[5], p[4], p[3]); } } } static int sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags) { uint32_t cfg, *buf; int rc; MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (rc == 0) rc = -t4_cim_read_la(sc, buf, NULL); } mtx_unlock(&sc->reg_lock); if (rc == 0) { if (chip_id(sc) < CHELSIO_T6) sbuf_cim_la4(sc, sb, buf, cfg); else sbuf_cim_la6(sc, sb, buf, cfg); } free(buf, M_CXGBE); return (rc); } static int sysctl_cim_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_cim_la(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static void dump_cim_regs(struct adapter *sc) { log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n", device_get_nameunit(sc->dev), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA2), t4_read_reg(sc, A_EDC_H_BIST_DATA_PATTERN), t4_read_reg(sc, A_EDC_H_BIST_STATUS_RDATA)); log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n", device_get_nameunit(sc->dev), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0 + 0x800), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1 + 0x800), t4_read_reg(sc, A_EDC_H_BIST_CMD_LEN)); } static void dump_cimla(struct adapter *sc) { struct sbuf sb; int rc; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) { log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n", device_get_nameunit(sc->dev)); return; } rc = sbuf_cim_la(sc, &sb, M_WAITOK); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } void t4_os_cim_err(struct adapter *sc) { atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); } static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, (p[1] >> 2) | ((p[2] & 3) << 30), (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, p[0] & 1); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; sbuf_printf(sb, "Cntl ID DataBE Addr Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCntl ID Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t thres[CIM_NUM_IBQ]; uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; cim_num_obq = sc->chip_params->cim_num_obq; if (is_t4(sc)) { ibq_rdaddr = A_UP_IBQ_0_RDADDR; obq_rdaddr = A_UP_OBQ_0_REALADDR; } else { ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; } nq = CIM_NUM_IBQ + cim_num_obq; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); if (rc == 0) { rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); if (rc == 0) t4_read_cimq_cfg(sc, base, size, thres); } } mtx_unlock(&sc->reg_lock); if (rc) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); for ( ; i < nq; i++, p += 4, wr += 2) sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_cpl_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_cpl_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc) goto done; if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", stats.req[0], stats.req[1], stats.req[2], stats.req[3]); sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\nCPL requests: %10u %10u", stats.req[0], stats.req[1]); sbuf_printf(sb, "\nCPL responses: %10u %10u", stats.rsp[0], stats.rsp[1]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_usm_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_usm_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Frames: %u\n", stats.frames); sbuf_printf(sb, "Octets: %ju\n", stats.octets); sbuf_printf(sb, "Drops: %u", stats.drops); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tid_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tid_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Delete: %u\n", stats.del); sbuf_printf(sb, "Invalidate: %u\n", stats.inv); sbuf_printf(sb, "Active: %u\n", stats.act); sbuf_printf(sb, "Passive: %u", stats.pas); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static const char * const devlog_level_strings[] = { [FW_DEVLOG_LEVEL_EMERG] = "EMERG", [FW_DEVLOG_LEVEL_CRIT] = "CRIT", [FW_DEVLOG_LEVEL_ERR] = "ERR", [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", [FW_DEVLOG_LEVEL_INFO] = "INFO", [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" }; static const char * const devlog_facility_strings[] = { [FW_DEVLOG_FACILITY_CORE] = "CORE", [FW_DEVLOG_FACILITY_CF] = "CF", [FW_DEVLOG_FACILITY_SCHED] = "SCHED", [FW_DEVLOG_FACILITY_TIMER] = "TIMER", [FW_DEVLOG_FACILITY_RES] = "RES", [FW_DEVLOG_FACILITY_HW] = "HW", [FW_DEVLOG_FACILITY_FLR] = "FLR", [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", [FW_DEVLOG_FACILITY_PHY] = "PHY", [FW_DEVLOG_FACILITY_MAC] = "MAC", [FW_DEVLOG_FACILITY_PORT] = "PORT", [FW_DEVLOG_FACILITY_VI] = "VI", [FW_DEVLOG_FACILITY_FILTER] = "FILTER", [FW_DEVLOG_FACILITY_ACL] = "ACL", [FW_DEVLOG_FACILITY_TM] = "TM", [FW_DEVLOG_FACILITY_QFC] = "QFC", [FW_DEVLOG_FACILITY_DCB] = "DCB", [FW_DEVLOG_FACILITY_ETH] = "ETH", [FW_DEVLOG_FACILITY_OFLD] = "OFLD", [FW_DEVLOG_FACILITY_RI] = "RI", [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", [FW_DEVLOG_FACILITY_FCOE] = "FCOE", [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", [FW_DEVLOG_FACILITY_CHNET] = "CHNET", }; static int sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags) { int i, j, rc, nentries, first = 0; struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e *buf, *e; uint64_t ftstamp = UINT64_MAX; if (dparams->addr == 0) return (ENXIO); MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; nentries = dparams->size / sizeof(struct fw_devlog_e); for (i = 0; i < nentries; i++) { e = &buf[i]; if (e->timestamp == 0) break; /* end */ e->timestamp = be64toh(e->timestamp); e->seqno = be32toh(e->seqno); for (j = 0; j < 8; j++) e->params[j] = be32toh(e->params[j]); if (e->timestamp < ftstamp) { ftstamp = e->timestamp; first = i; } } if (buf[first].timestamp == 0) goto done; /* nothing in the log */ sbuf_printf(sb, "%10s %15s %8s %8s %s\n", "Seq#", "Tstamp", "Level", "Facility", "Message"); i = first; do { e = &buf[i]; if (e->timestamp == 0) break; /* end */ sbuf_printf(sb, "%10d %15ju %8s %8s ", e->seqno, e->timestamp, (e->level < nitems(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); sbuf_printf(sb, e->fmt, e->params[0], e->params[1], e->params[2], e->params[3], e->params[4], e->params[5], e->params[6], e->params[7]); if (++i == nentries) i = 0; } while (i != first); done: free(buf, M_CXGBE); return (rc); } static int sysctl_devlog(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_devlog(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static void dump_devlog(struct adapter *sc) { int rc; struct sbuf sb; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) { log(LOG_DEBUG, "%s: failed to generate devlog dump.\n", device_get_nameunit(sc->dev)); return; } rc = sbuf_devlog(sc, &sb, M_WAITOK); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: device log follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_fcoe_stats stats[MAX_NCHAN]; int i, nchan = sc->chip_params->nchan; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { for (i = 0; i < nchan; i++) t4_get_fcoe_stats(sc, i, &stats[i], 1); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp, stats[2].octets_ddp, stats[3].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp, stats[2].frames_ddp, stats[3].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", stats[0].frames_drop, stats[1].frames_drop, stats[2].frames_drop, stats[3].frames_drop); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u", stats[0].frames_drop, stats[1].frames_drop); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; unsigned int map, kbps, ipg, mode; unsigned int pace_tab[NTX_SCHED]; sb = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { mtx_unlock(&sc->reg_lock); rc = ENXIO; goto done; } map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); t4_read_pace_tbl(sc, pace_tab); mtx_unlock(&sc->reg_lock); sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " "Class IPG (0.1 ns) Flow IPG (us)"); for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { t4_get_tx_sched(sc, i, &kbps, &ipg, 1); sbuf_printf(sb, "\n %u %-5s %u ", i, (mode & (1 << i)) ? "flow" : "class", map & 3); if (kbps) sbuf_printf(sb, "%9u ", kbps); else sbuf_printf(sb, " disabled "); if (ipg) sbuf_printf(sb, "%13u ", ipg); else sbuf_printf(sb, " disabled "); if (pace_tab[i]) sbuf_printf(sb, "%10u", pace_tab[i]); else sbuf_printf(sb, " disabled"); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, j; uint64_t *p0, *p1; struct lb_port_stats s[2]; static const char *stat_name[] = { "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", "Frames128To255:", "Frames256To511:", "Frames512To1023:", "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", "BG2FramesTrunc:", "BG3FramesTrunc:" }; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); memset(s, 0, sizeof(s)); rc = 0; for (i = 0; i < sc->chip_params->nchan; i += 2) { mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_get_lb_stats(sc, i, &s[0]); t4_get_lb_stats(sc, i + 1, &s[1]); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; p0 = &s[0].octets; p1 = &s[1].octets; sbuf_printf(sb, "%s Loopback %u" " Loopback %u", i == 0 ? "" : "\n", i, i + 1); for (j = 0; j < nitems(stat_name); j++) sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], *p0++, *p1++); } if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) { int rc = 0; struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok || lc->link_down_rc == 255) sbuf_printf(sb, "n/a"); else sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } struct mem_desc { u_int base; u_int limit; u_int idx; }; static int mem_desc_cmp(const void *a, const void *b) { const u_int v1 = ((const struct mem_desc *)a)->base; const u_int v2 = ((const struct mem_desc *)b)->base; if (v1 < v2) return (-1); else if (v1 > v2) return (1); return (0); } static void mem_region_show(struct sbuf *sb, const char *name, unsigned int from, unsigned int to) { unsigned int size; if (from == to) return; size = to - from + 1; if (size == 0) return; /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); } static int sysctl_meminfo(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n; uint32_t lo, hi, used, free, alloc; static const char *memory[] = { "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:" }; static const char *region[] = { "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", "TDDP region:", "TPT region:", "STAG region:", "RQ region:", "RQUDP region:", "PBL region:", "TXPBL region:", "TLSKey region:", "DBVFIFO region:", "ULPRX state:", "ULPTX state:", "On-chip queues:", }; struct mem_desc avail[4]; struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ struct mem_desc *md = mem; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); for (i = 0; i < nitems(mem); i++) { mem[i].limit = 0; mem[i].idx = i; } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* Find and sort the populated memory ranges */ i = 0; lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); if (lo & F_EDRAM0_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); avail[i].base = G_EDRAM0_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); avail[i].idx = 0; i++; } if (lo & F_EDRAM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); avail[i].base = G_EDRAM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); avail[i].idx = 1; i++; } if (lo & F_EXT_MEM_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); avail[i].base = G_EXT_MEM_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20); avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ i++; } if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 4; i++; } if (is_t6(sc) && lo & F_HMA_MUX) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 5; i++; } MPASS(i <= nitems(avail)); if (!i) /* no memory available */ goto done; qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); /* the next few have explicit upper bounds */ md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); md++; md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); md++; if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { if (chip_id(sc) <= CHELSIO_T5) md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); else md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); md->limit = 0; } else { md->base = 0; md->idx = nitems(region); /* hide it */ } md++; #define ulp_region(reg) \ md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) ulp_region(RX_ISCSI); ulp_region(RX_TDDP); ulp_region(TX_TPT); ulp_region(RX_STAG); ulp_region(RX_RQ); ulp_region(RX_RQUDP); ulp_region(RX_PBL); ulp_region(TX_PBL); if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { ulp_region(RX_TLS_KEY); } #undef ulp_region md->base = 0; if (is_t4(sc)) md->idx = nitems(region); else { uint32_t size = 0; uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); if (is_t5(sc)) { if (sge_ctrl & F_VFIFO_ENABLE) size = fifo_size << 2; } else size = G_T6_DBVFIFO_SIZE(fifo_size) << 6; if (size) { md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR); md->limit = md->base + size - 1; } else md->idx = nitems(region); } md++; md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); md->limit = 0; md++; md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); md->limit = 0; md++; md->base = sc->vres.ocq.start; if (sc->vres.ocq.size) md->limit = md->base + sc->vres.ocq.size - 1; else md->idx = nitems(region); /* hide it */ md++; /* add any address-space holes, there can be up to 3 */ for (n = 0; n < i - 1; n++) if (avail[n].limit < avail[n + 1].base) (md++)->base = avail[n].limit; if (avail[n].limit) (md++)->base = avail[n].limit; n = md - mem; MPASS(n <= nitems(mem)); qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); for (lo = 0; lo < i; lo++) mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, avail[lo].limit - 1); sbuf_printf(sb, "\n"); for (i = 0; i < n; i++) { if (mem[i].idx >= nitems(region)) continue; /* skip holes */ if (!mem[i].limit) mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; mem_region_show(sb, region[mem[i].idx], mem[i].base, mem[i].limit); } sbuf_printf(sb, "\n"); lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP RAM:", lo, hi); lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP Extmem2:", lo, hi); lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); for (i = 0, free = 0; i < 2; i++) free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT)); sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", G_PMRXMAXPAGE(lo), free, t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, (lo & F_PMRXNUMCHN) ? 2 : 1); lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); for (i = 0, free = 0; i < 4; i++) free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT)); sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", G_PMTXMAXPAGE(lo), free, hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); sbuf_printf(sb, "%u p-structs (%u free)\n", t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT), G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT))); for (i = 0; i < 4; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", i, used, alloc); } for (i = 0; i < sc->chip_params->nchan; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nLoopback %d using %u pages out of %u allocated", i, used, alloc); } done: mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static inline void tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) { *mask = x | y; y = htobe64(y); memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); } static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) <= CHELSIO_T5); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask Vld Ports PF" " VF Replication P0 P1 P2 P3 ML"); rc = 0; for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint64_t tcamx, tcamy, mask; uint32_t cls_lo, cls_hi; uint8_t addr[ETHER_ADDR_LEN]; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', G_PORTMAP(cls_hi), G_PF(cls_lo), (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); if (cls_lo & F_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%36s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) > CHELSIO_T5); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" " Replication" " P0 P1 P2 P3 ML\n"); rc = 0; for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint8_t dip_hit, vlan_vld, lookup_type, port_num; uint16_t ivlan; uint64_t tcamx, tcamy, val, mask; uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; uint8_t addr[ETHER_ADDR_LEN]; ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); if (i < 256) ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); else ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamy = G_DMACH(val) << 32; tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; lookup_type = G_DATALKPTYPE(data2); port_num = G_DATAPORTNUM(data2); if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI */ vniy = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); dip_hit = data2 & F_DATADIPHIT; vlan_vld = 0; } else { vniy = 0; dip_hit = 0; vlan_vld = data2 & F_DATAVIDH2; ivlan = G_VIDL(val); } ctl |= V_CTLXYBITSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamx = G_DMACH(val) << 32; tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI mask */ vnix = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); } else vnix = 0; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx %06x %06x - - %3c" " I %4x %3c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } else { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx - - ", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask); if (vlan_vld) sbuf_printf(sb, "%4u Y ", ivlan); else sbuf_printf(sb, " - N "); sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", lookup_type ? 'I' : 'O', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } if (cls_lo & F_T6_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t6mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x" " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc255_224), be32toh(ldst_cmd.u.mps.rplc.rplc223_192), be32toh(ldst_cmd.u.mps.rplc.rplc191_160), be32toh(ldst_cmd.u.mps.rplc.rplc159_128), be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%72s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#x", G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), (cls_lo >> S_T6_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint16_t mtus[NMTUS]; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_mtu_tbl(sc, mtus, NULL); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], mtus[14], mtus[15]); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; static const char *tx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", "Tx FIFO wait", NULL, "Tx latency" }; static const char *rx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Flush:", "Rx FIFO wait", NULL, "Rx latency" }; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Tx pcmds Tx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); } sbuf_printf(sb, "\n Rx pcmds Rx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } if (chip_id(sc) > CHELSIO_T5) { sbuf_printf(sb, "\n Total wait Total occupancy"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); i += 2; MPASS(i < nitems(tx_stats)); sbuf_printf(sb, "\n Reads Total wait"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_rdma_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_rdma_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tcp_stats v4, v6; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tcp_stats(sc, &v4, &v6, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " IP IPv6\n"); sbuf_printf(sb, "OutRsts: %20u %20u\n", v4.tcp_out_rsts, v6.tcp_out_rsts); sbuf_printf(sb, "InSegs: %20ju %20ju\n", v4.tcp_in_segs, v6.tcp_in_segs); sbuf_printf(sb, "OutSegs: %20ju %20ju\n", v4.tcp_out_segs, v6.tcp_out_segs); sbuf_printf(sb, "RetransSegs: %20ju %20ju", v4.tcp_retrans_segs, v6.tcp_retrans_segs); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tids(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t x, y; struct tid_info *t = &sc->tids; rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (t->natids) { sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, t->atids_in_use); } if (t->nhpftids) { sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", t->hpftid_base, t->hpftid_end, t->hpftids_in_use); } if (t->ntids) { bool hashen = false; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { hashen = true; if (chip_id(sc) <= CHELSIO_T5) { x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; } else { x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "TID range: "); if (hashen) { if (x) sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1); sbuf_printf(sb, "%u-%u", y, t->ntids - 1); } else { sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + t->ntids - 1); } sbuf_printf(sb, ", in use: %u\n", atomic_load_acq_int(&t->tids_in_use)); } if (t->nstids) { sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, t->stid_base + t->nstids - 1, t->stids_in_use); } if (t->nftids) { sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, t->ftid_end, t->ftids_in_use); } if (t->netids) { sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, t->etid_base + t->netids - 1, t->etids_in_use); } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4); y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6); } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y); done: if (rc == 0) rc = sbuf_finish(sb); else (void)sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_err_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_err_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1], stats.mac_in_errs[2], stats.mac_in_errs[3]); sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1], stats.hdr_in_errs[2], stats.hdr_in_errs[3]); sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1], stats.tcp_in_errs[2], stats.tcp_in_errs[3]); sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "macInErrs: %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1]); sbuf_printf(sb, "hdrInErrs: %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1]); sbuf_printf(sb, "tcpInErrs: %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1]); sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); } sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", stats.ofld_no_neigh, stats.ofld_cong_defer); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tnl_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tnl_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1], stats.out_pkt[2], stats.out_pkt[3]); sbuf_printf(sb, "InPkts: %10u %10u %10u %10u", stats.in_pkt[0], stats.in_pkt[1], stats.in_pkt[2], stats.in_pkt[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "OutPkts: %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1]); sbuf_printf(sb, "InPkts: %10u %10u", stats.in_pkt[0], stats.in_pkt[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct tp_params *tpp = &sc->params.tp; u_int mask; int rc; mask = tpp->la_mask >> 16; rc = sysctl_handle_int(oidp, &mask, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (mask > 0xffff) return (EINVAL); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tpp->la_mask = mask << 16; t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); } mtx_unlock(&sc->reg_lock); return (rc); } struct field_desc { const char *name; u_int start; u_int width; }; static void field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) { char buf[32]; int line_size = 0; while (f->name) { uint64_t mask = (1ULL << f->width) - 1; int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, ((uintmax_t)v >> f->start) & mask); if (line_size + len >= 79) { line_size = 8; sbuf_printf(sb, "\n "); } sbuf_printf(sb, "%s ", buf); line_size += len + 1; f++; } sbuf_printf(sb, "\n"); } static const struct field_desc tp_la0[] = { { "RcfOpCodeOut", 60, 4 }, { "State", 56, 4 }, { "WcfState", 52, 4 }, { "RcfOpcSrcOut", 50, 2 }, { "CRxError", 49, 1 }, { "ERxError", 48, 1 }, { "SanityFailed", 47, 1 }, { "SpuriousMsg", 46, 1 }, { "FlushInputMsg", 45, 1 }, { "FlushInputCpl", 44, 1 }, { "RssUpBit", 43, 1 }, { "RssFilterHit", 42, 1 }, { "Tid", 32, 10 }, { "InitTcb", 31, 1 }, { "LineNumber", 24, 7 }, { "Emsg", 23, 1 }, { "EdataOut", 22, 1 }, { "Cmsg", 21, 1 }, { "CdataOut", 20, 1 }, { "EreadPdu", 19, 1 }, { "CreadPdu", 18, 1 }, { "TunnelPkt", 17, 1 }, { "RcfPeerFin", 16, 1 }, { "RcfReasonOut", 12, 4 }, { "TxCchannel", 10, 2 }, { "RcfTxChannel", 8, 2 }, { "RxEchannel", 6, 2 }, { "RcfRxChannel", 5, 1 }, { "RcfDataOutSrdy", 4, 1 }, { "RxDvld", 3, 1 }, { "RxOoDvld", 2, 1 }, { "RxCongestion", 1, 1 }, { "TxCongestion", 0, 1 }, { NULL } }; static const struct field_desc tp_la1[] = { { "CplCmdIn", 56, 8 }, { "CplCmdOut", 48, 8 }, { "ESynOut", 47, 1 }, { "EAckOut", 46, 1 }, { "EFinOut", 45, 1 }, { "ERstOut", 44, 1 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static const struct field_desc tp_la2[] = { { "CplCmdIn", 56, 8 }, { "MpsVfVld", 55, 1 }, { "MpsPf", 52, 3 }, { "MpsVf", 44, 8 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static void tp_la_show(struct sbuf *sb, uint64_t *p, int idx) { field_desc_show(sb, *p, tp_la0); } static void tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], tp_la0); } static void tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); } static int sysctl_tp_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint64_t *buf, *p; int rc; u_int i, inc; void (*show_func)(struct sbuf *, uint64_t *, int); rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_tp_read_la(sc, buf, NULL); switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { case 2: inc = 2; show_func = tp_la_show2; break; case 3: inc = 2; show_func = tp_la_show3; break; default: inc = 1; show_func = tp_la_show; } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) (*show_func)(sb, p, i); rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_chan_txrate(sc, nrate, orate); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", nrate[0], nrate[1], nrate[2], nrate[3]); sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", orate[0], orate[1], orate[2], orate[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", nrate[0], nrate[1]); sbuf_printf(sb, "Offload B/s: %10ju %10ju", orate[0], orate[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint32_t *buf, *p; int rc, i; rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_ulprx_read_la(sc, buf); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; sbuf_printf(sb, " Pcmd Type Message" " Data"); for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t cfg, s1, s2; MPASS(chip_id(sc) >= CHELSIO_T5); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cfg = t4_read_reg(sc, A_SGE_STAT_CFG); s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL); s2 = t4_read_reg(sc, A_SGE_STAT_MATCH); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (G_STATSOURCE_T5(cfg) == 7) { int mode; mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg); if (mode == 0) sbuf_printf(sb, "total %d, incomplete %d", s1, s2); else if (mode == 1) sbuf_printf(sb, "total %d, data overflow %d", s1, s2); else sbuf_printf(sb, "unknown mode %d", mode); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; enum cpu_sets op = arg2; cpuset_t cpuset; struct sbuf *sb; int i, rc; MPASS(op == LOCAL_CPUS || op == INTR_CPUS); CPU_ZERO(&cpuset); rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); CPU_FOREACH(i) sbuf_printf(sb, "%d ", i); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_reset(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int val; int rc; val = atomic_load_int(&sc->num_resets); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) { /* Zero out the counter that tracks reset. */ atomic_store_int(&sc->num_resets, 0); return (0); } if (val != 1) return (EINVAL); /* 0 or 1 are the only legal values */ if (hw_off_limits(sc)) /* harmless race */ return (EALREADY); taskqueue_enqueue(reset_tq, &sc->reset_task); return (0); } #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int i, j, v, rc; struct vi_info *vi; v = sc->tt.tls; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { sc->tt.tls = !!v; for_each_port(sc, i) { for_each_vi(sc->port[i], j, vi) { if (vi->flags & VI_INIT_DONE) t4_update_fl_bufsize(vi->ifp); } } } end_synchronized_op(sc, 0); return (rc); } static void unit_conv(char *buf, size_t len, u_int val, u_int factor) { u_int rem = val % factor; if (rem == 0) snprintf(buf, len, "%u", val / factor); else { while (rem % 10 == 0) rem /= 10; snprintf(buf, len, "%u.%u", val / factor, rem); } } static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; char buf[16]; u_int res, re; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) res = (u_int)-1; else res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); mtx_unlock(&sc->reg_lock); if (res == (u_int)-1) return (ENXIO); switch (arg2) { case 0: /* timer_tick */ re = G_TIMERRESOLUTION(res); break; case 1: /* TCP timestamp tick */ re = G_TIMESTAMPRESOLUTION(res); break; case 2: /* DACK tick */ re = G_DELAYEDACKRESOLUTION(res); break; default: return (EDOOFUS); } unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; u_int dack_tmr, dack_re, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); v = ((cclk_ps << dack_re) / 1000000) * dack_tmr; return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; u_int tre; u_long tp_tick_us, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); tp_tick_us = (cclk_ps << tre) / 1000000; if (reg == A_TP_INIT_SRTT) v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); else v = tp_tick_us * t4_read_reg(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_long(oidp, &v, 0, req)); } /* * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is * passed to this function. */ static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int v; MPASS(idx >= 0 && idx <= 24); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int shift, v, r; MPASS(idx >= 0 && idx < 16); r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); shift = (idx & 3) << 3; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_ofld_rxq *ofld_rxq; uint8_t v; idx = vi->ofld_tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4otmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); for_each_ofld_rxq(vi, i, ofld_rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); #else ofld_rxq->iq.intr_params = v; #endif } vi->ofld_tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->ofld_pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4opktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->ofld_pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } #endif static int get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) { int rc; if (cntxt->cid > M_CTXTQID) return (EINVAL); if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) return (EINVAL); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (sc->flags & FW_OK) { rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); if (rc == 0) goto done; } /* * Read via firmware failed or wasn't even attempted. Read directly via * the backdoor. */ rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); done: end_synchronized_op(sc, 0); return (rc); } static int load_fw(struct adapter *sc, struct t4_data *fw) { int rc; uint8_t *fw_data; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* * The firmware, with the sole exception of the memory parity error * handler, runs from memory and not flash. It is almost always safe to * install a new firmware on a running system. Just set bit 1 in * hw.cxgbe.dflags or dev...dflags first. */ if (sc->flags & FULL_INIT_DONE && (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { rc = EBUSY; goto done; } fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); rc = copyin(fw->data, fw_data, fw->len); if (rc == 0) rc = -t4_load_fw(sc, fw_data, fw->len); free(fw_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_cfg(struct adapter *sc, struct t4_data *cfg) { int rc; uint8_t *cfg_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (cfg->len == 0) { /* clear */ rc = -t4_load_cfg(sc, NULL, 0); goto done; } cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); rc = copyin(cfg->data, cfg_data, cfg->len); if (rc == 0) rc = -t4_load_cfg(sc, cfg_data, cfg->len); free(cfg_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_boot(struct adapter *sc, struct t4_bootrom *br) { int rc; uint8_t *br_data = NULL; u_int offset; if (br->len > 1024 * 1024) return (EFBIG); if (br->pf_offset == 0) { /* pfidx */ if (br->pfidx_addr > 7) return (EINVAL); offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, A_PCIE_PF_EXPROM_OFST))); } else if (br->pf_offset == 1) { /* offset */ offset = G_OFFSET(br->pfidx_addr); } else { return (EINVAL); } rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (br->len == 0) { /* clear */ rc = -t4_load_boot(sc, NULL, offset, 0); goto done; } br_data = malloc(br->len, M_CXGBE, M_WAITOK); rc = copyin(br->data, br_data, br->len); if (rc == 0) rc = -t4_load_boot(sc, br_data, offset, br->len); free(br_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_bootcfg(struct adapter *sc, struct t4_data *bc) { int rc; uint8_t *bc_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (bc->len == 0) { /* clear */ rc = -t4_load_bootcfg(sc, NULL, 0); goto done; } bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); rc = copyin(bc->data, bc_data, bc->len); if (rc == 0) rc = -t4_load_bootcfg(sc, bc_data, bc->len); free(bc_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) { int rc; struct cudbg_init *cudbg; void *handle, *buf; /* buf is large, don't block if no memory is available */ buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); handle = cudbg_alloc_handle(); if (handle == NULL) { rc = ENOMEM; goto done; } cudbg = cudbg_get_init(handle); cudbg->adap = sc; cudbg->print = (cudbg_print_cb)printf; #ifndef notyet device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", __func__, dump->wr_flash, dump->len, dump->data); #endif if (dump->wr_flash) cudbg->use_flash = 1; MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); rc = cudbg_collect(handle, buf, &dump->len); if (rc != 0) goto done; rc = copyout(buf, dump->data, dump->len); done: cudbg_free_handle(handle); free(buf, M_CXGBE); return (rc); } static void free_offload_policy(struct t4_offload_policy *op) { struct offload_rule *r; int i; if (op == NULL) return; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { free(r->bpf_prog.bf_insns, M_CXGBE); } free(op->rule, M_CXGBE); free(op, M_CXGBE); } static int set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop) { int i, rc, len; struct t4_offload_policy *op, *old; struct bpf_program *bf; const struct offload_settings *s; struct offload_rule *r; void *u; if (!is_offload(sc)) return (ENODEV); if (uop->nrules == 0) { /* Delete installed policies. */ op = NULL; goto set_policy; } else if (uop->nrules > 256) { /* arbitrary */ return (E2BIG); } /* Copy userspace offload policy to kernel */ op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK); op->nrules = uop->nrules; len = op->nrules * sizeof(struct offload_rule); op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(uop->rule, op->rule, len); if (rc) { free(op->rule, M_CXGBE); free(op, M_CXGBE); return (rc); } r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { /* Validate open_type */ if (r->open_type != OPEN_TYPE_LISTEN && r->open_type != OPEN_TYPE_ACTIVE && r->open_type != OPEN_TYPE_PASSIVE && r->open_type != OPEN_TYPE_DONTCARE) { error: /* * Rules 0 to i have malloc'd filters that need to be * freed. Rules i+1 to nrules have userspace pointers * and should be left alone. */ op->nrules = i; free_offload_policy(op); return (rc); } /* Validate settings */ s = &r->settings; if ((s->offload != 0 && s->offload != 1) || s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || s->sched_class < -1 || s->sched_class >= sc->params.nsched_cls) { rc = EINVAL; goto error; } bf = &r->bpf_prog; u = bf->bf_insns; /* userspace ptr */ bf->bf_insns = NULL; if (bf->bf_len == 0) { /* legal, matches everything */ continue; } len = bf->bf_len * sizeof(*bf->bf_insns); bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(u, bf->bf_insns, len); if (rc != 0) goto error; if (!bpf_validate(bf->bf_insns, bf->bf_len)) { rc = EINVAL; goto error; } } set_policy: rw_wlock(&sc->policy_lock); old = sc->policy; sc->policy = op; rw_wunlock(&sc->policy_lock); free_offload_policy(old); return (0); } #define MAX_READ_BUF_SIZE (128 * 1024) static int read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) { uint32_t addr, remaining, n; uint32_t *buf; int rc; uint8_t *dst; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = validate_mem_range(sc, mr->addr, mr->len); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); addr = mr->addr; remaining = mr->len; dst = (void *)mr->data; while (remaining) { n = min(remaining, MAX_READ_BUF_SIZE); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else read_via_memwin(sc, 2, addr, buf, n); mtx_unlock(&sc->reg_lock); if (rc != 0) break; rc = copyout(buf, dst, n); if (rc != 0) break; dst += n; remaining -= n; addr += n; } free(buf, M_CXGBE); return (rc); } #undef MAX_READ_BUF_SIZE static int read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) { int rc; if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) return (EINVAL); if (i2cd->len > sizeof(i2cd->data)) return (EFBIG); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, i2cd->offset, i2cd->len, &i2cd->data[0]); end_synchronized_op(sc, 0); return (rc); } static int clear_stats(struct adapter *sc, u_int port_id) { int i, v, chan_map; struct port_info *pi; struct vi_info *vi; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif if (port_id >= sc->params.nports) return (EINVAL); pi = sc->port[port_id]; if (pi == NULL) return (EIO); mtx_lock(&sc->reg_lock); if (!hw_off_limits(sc)) { /* MAC stats */ t4_clr_port_stats(sc, pi->tx_chan); if (is_t6(sc)) { if (pi->fcs_reg != -1) pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); else pi->stats.rx_fcs_err = 0; } for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) t4_clr_vi_stats(sc, vi->vin); } chan_map = pi->rx_e_chan_map; v = 0; /* reuse */ while (chan_map) { i = ffs(chan_map) - 1; t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); chan_map &= ~(1 << i); } } mtx_unlock(&sc->reg_lock); pi->tx_parse_error = 0; pi->tnl_cong_drops = 0; /* * Since this command accepts a port, clear stats for * all VIs on this port. */ for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) { for_each_rxq(vi, i, rxq) { #if defined(INET) || defined(INET6) rxq->lro.lro_queued = 0; rxq->lro.lro_flushed = 0; #endif rxq->rxcsum = 0; rxq->vlan_extraction = 0; rxq->vxlan_rxcsum = 0; rxq->fl.cl_allocated = 0; rxq->fl.cl_recycled = 0; rxq->fl.cl_fast_recycled = 0; } for_each_txq(vi, i, txq) { txq->txcsum = 0; txq->tso_wrs = 0; txq->vlan_insertion = 0; txq->imm_wrs = 0; txq->sgl_wrs = 0; txq->txpkt_wrs = 0; txq->txpkts0_wrs = 0; txq->txpkts1_wrs = 0; txq->txpkts0_pkts = 0; txq->txpkts1_pkts = 0; txq->txpkts_flush = 0; txq->raw_wrs = 0; txq->vxlan_tso_wrs = 0; txq->vxlan_txcsum = 0; txq->kern_tls_records = 0; txq->kern_tls_short = 0; txq->kern_tls_partial = 0; txq->kern_tls_full = 0; txq->kern_tls_octets = 0; txq->kern_tls_waste = 0; txq->kern_tls_options = 0; txq->kern_tls_header = 0; txq->kern_tls_fin = 0; txq->kern_tls_fin_short = 0; txq->kern_tls_cbc = 0; txq->kern_tls_gcm = 0; mp_ring_reset_stats(txq->r); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { ofld_txq->wrq.tx_wrs_direct = 0; ofld_txq->wrq.tx_wrs_copied = 0; counter_u64_zero(ofld_txq->tx_iscsi_pdus); counter_u64_zero(ofld_txq->tx_iscsi_octets); counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs); counter_u64_zero(ofld_txq->tx_aio_jobs); counter_u64_zero(ofld_txq->tx_aio_octets); counter_u64_zero(ofld_txq->tx_toe_tls_records); counter_u64_zero(ofld_txq->tx_toe_tls_octets); } #endif #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { ofld_rxq->fl.cl_allocated = 0; ofld_rxq->fl.cl_recycled = 0; ofld_rxq->fl.cl_fast_recycled = 0; counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_ok); counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_error); ofld_rxq->rx_iscsi_ddp_pdus = 0; ofld_rxq->rx_iscsi_ddp_octets = 0; ofld_rxq->rx_iscsi_fl_pdus = 0; ofld_rxq->rx_iscsi_fl_octets = 0; ofld_rxq->rx_aio_ddp_jobs = 0; ofld_rxq->rx_aio_ddp_octets = 0; ofld_rxq->rx_toe_tls_records = 0; ofld_rxq->rx_toe_tls_octets = 0; ofld_rxq->rx_toe_ddp_octets = 0; counter_u64_zero(ofld_rxq->ddp_buffer_alloc); counter_u64_zero(ofld_rxq->ddp_buffer_reuse); counter_u64_zero(ofld_rxq->ddp_buffer_free); } #endif if (IS_MAIN_VI(vi)) { wrq = &sc->sge.ctrlq[pi->port_id]; wrq->tx_wrs_direct = 0; wrq->tx_wrs_copied = 0; } } } return (0); } static int hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); if (t4_get_clip_entry(sc, &in6, true) != NULL) return (0); else return (EIO); #else return (ENOTSUP); #endif } static int release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); return (t4_release_clip_addr(sc, &in6)); #else return (ENOTSUP); #endif } int t4_os_find_pci_capability(struct adapter *sc, int cap) { int i; return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); } int t4_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t4_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } void t4_os_portmod_changed(struct port_info *pi) { struct adapter *sc = pi->adapter; struct vi_info *vi; if_t ifp; static const char *mod_str[] = { NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" }; KASSERT((pi->flags & FIXED_IFMEDIA) == 0, ("%s: port_type %u", __func__, pi->port_type)); vi = &pi->vi[0]; if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { PORT_LOCK(pi); build_medialist(pi); if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { fixup_link_config(pi); apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, LOCK_HELD); } ifp = vi->ifp; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) if_printf(ifp, "transceiver unplugged.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) if_printf(ifp, "unknown transceiver inserted.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) if_printf(ifp, "unsupported transceiver inserted.\n"); else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { if_printf(ifp, "%dGbps %s transceiver inserted.\n", port_top_speed(pi), mod_str[pi->mod_type]); } else { if_printf(ifp, "transceiver (type %d) inserted.\n", pi->mod_type); } } void t4_os_link_changed(struct port_info *pi) { struct vi_info *vi; if_t ifp; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int v; PORT_LOCK_ASSERT_OWNED(pi); if (is_t6(sc)) { if (lc->link_ok) { if (lc->speed > 25000 || (lc->speed == 25000 && lc->fec == FEC_RS)) { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS); } else { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS); } pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); pi->stats.rx_fcs_err = 0; } else { pi->fcs_reg = -1; } } else { MPASS(pi->fcs_reg != -1); MPASS(pi->fcs_base == 0); } for_each_vi(pi, v, vi) { ifp = vi->ifp; if (ifp == NULL || IS_DETACHING(vi)) continue; if (lc->link_ok) { if_setbaudrate(ifp, IF_Mbps(lc->speed)); if_link_state_change(ifp, LINK_STATE_UP); } else { if_link_state_change(ifp, LINK_STATE_DOWN); } } } void t4_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; sx_slock(&t4_list_lock); SLIST_FOREACH(sc, &t4_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } sx_sunlock(&t4_list_lock); } static int t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int rc; struct adapter *sc = dev->si_drv1; rc = priv_check(td, PRIV_DRIVER); if (rc != 0) return (rc); switch (cmd) { case CHELSIO_T4_GETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) edata->val = t4_read_reg(sc, edata->addr); else if (edata->size == 8) edata->val = t4_read_reg64(sc, edata->addr); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_SETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) { if (edata->val & 0xffffffff00000000) rc = EINVAL; t4_write_reg(sc, edata->addr, (uint32_t) edata->val); } else if (edata->size == 8) t4_write_reg64(sc, edata->addr, edata->val); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_REGDUMP: { struct t4_regdump *regs = (struct t4_regdump *)data; int reglen = t4_get_regs_len(sc); uint8_t *buf; if (regs->len < reglen) { regs->len = reglen; /* hint to the caller */ return (ENOBUFS); } regs->len = reglen; buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else get_regs(sc, regs, buf); mtx_unlock(&sc->reg_lock); if (rc == 0) rc = copyout(buf, regs->data, reglen); free(buf, M_CXGBE); break; } case CHELSIO_T4_GET_FILTER_MODE: rc = get_filter_mode(sc, (uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MODE: rc = set_filter_mode(sc, *(uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MASK: rc = set_filter_mask(sc, *(uint32_t *)data); break; case CHELSIO_T4_GET_FILTER: rc = get_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_SET_FILTER: rc = set_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_DEL_FILTER: rc = del_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_GET_SGE_CONTEXT: rc = get_sge_context(sc, (struct t4_sge_context *)data); break; case CHELSIO_T4_LOAD_FW: rc = load_fw(sc, (struct t4_data *)data); break; case CHELSIO_T4_GET_MEM: rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); break; case CHELSIO_T4_GET_I2C: rc = read_i2c(sc, (struct t4_i2c_data *)data); break; case CHELSIO_T4_CLEAR_STATS: rc = clear_stats(sc, *(uint32_t *)data); break; case CHELSIO_T4_SCHED_CLASS: rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); break; case CHELSIO_T4_SCHED_QUEUE: rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); break; case CHELSIO_T4_GET_TRACER: rc = t4_get_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_SET_TRACER: rc = t4_set_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_LOAD_CFG: rc = load_cfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_LOAD_BOOT: rc = load_boot(sc, (struct t4_bootrom *)data); break; case CHELSIO_T4_LOAD_BOOTCFG: rc = load_bootcfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_CUDBG_DUMP: rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); break; case CHELSIO_T4_SET_OFLD_POLICY: rc = set_offload_policy(sc, (struct t4_offload_policy *)data); break; case CHELSIO_T4_HOLD_CLIP_ADDR: rc = hold_clip_addr(sc, (struct t4_clip_addr *)data); break; case CHELSIO_T4_RELEASE_CLIP_ADDR: rc = release_clip_addr(sc, (struct t4_clip_addr *)data); break; default: rc = ENOTTY; } return (rc); } #ifdef TCP_OFFLOAD int toe_capability(struct vi_info *vi, bool enable) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; ASSERT_SYNCHRONIZED_OP(sc); if (!is_offload(sc)) return (ENODEV); if (hw_off_limits(sc)) return (ENXIO); if (enable) { #ifdef KERN_TLS if (sc->flags & KERN_TLS_ON && is_t6(sc)) { int i, j, n; struct port_info *p; struct vi_info *v; /* * Reconfigure hardware for TOE if TXTLS is not enabled * on any ifnet. */ n = 0; for_each_port(sc, i) { p = sc->port[i]; for_each_vi(p, j, v) { if (if_getcapenable(v->ifp) & IFCAP_TXTLS) { CH_WARN(sc, "%s has NIC TLS enabled.\n", device_get_nameunit(v->dev)); n++; } } } if (n > 0) { CH_WARN(sc, "Disable NIC TLS on all interfaces " "associated with this adapter before " "trying to enable TOE.\n"); return (EAGAIN); } rc = t6_config_kern_tls(sc, false); if (rc) return (rc); } #endif if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) { /* TOE is already enabled. */ return (0); } /* * We need the port's queues around so that we're able to send * and receive CPLs to/from the TOE even if the ifnet for this * port has never been UP'd administratively. */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); if (!(pi->vi[0].flags & VI_INIT_DONE) && ((rc = vi_init(&pi->vi[0])) != 0)) return (rc); if (isset(&sc->offload_map, pi->port_id)) { /* TOE is enabled on another VI of this port. */ MPASS(pi->uld_vis > 0); pi->uld_vis++; return (0); } if (!uld_active(sc, ULD_TOM)) { rc = t4_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t4_tom.ko before trying " "to enable TOE on a cxgbe interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM activated but flag not set", __func__)); } /* Activate iWARP and iSCSI too, if the modules are loaded. */ if (!uld_active(sc, ULD_IWARP)) (void) t4_activate_uld(sc, ULD_IWARP); if (!uld_active(sc, ULD_ISCSI)) (void) t4_activate_uld(sc, ULD_ISCSI); if (pi->uld_vis++ == 0) setbit(&sc->offload_map, pi->port_id); } else { if ((if_getcapenable(vi->ifp) & IFCAP_TOE) == 0) { /* TOE is already disabled. */ return (0); } MPASS(isset(&sc->offload_map, pi->port_id)); MPASS(pi->uld_vis > 0); if (--pi->uld_vis == 0) clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t4_register_uld(struct uld_info *ui, int id) { int rc; if (id < 0 || id > ULD_MAX) return (EINVAL); sx_xlock(&t4_uld_list_lock); if (t4_uld_list[id] != NULL) rc = EEXIST; else { t4_uld_list[id] = ui; rc = 0; } sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_unregister_uld(struct uld_info *ui, int id) { if (id < 0 || id > ULD_MAX) return (EINVAL); sx_xlock(&t4_uld_list_lock); MPASS(t4_uld_list[id] == ui); t4_uld_list[id] = NULL; sx_xunlock(&t4_uld_list_lock); return (0); } int t4_activate_uld(struct adapter *sc, int id) { int rc; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); /* Adapter needs to be initialized before any ULD can be activated. */ if (!(sc->flags & FULL_INIT_DONE)) { rc = adapter_init(sc); if (rc != 0) return (rc); } sx_slock(&t4_uld_list_lock); if (t4_uld_list[id] == NULL) rc = EAGAIN; /* load the KLD with this ULD and try again. */ else { rc = t4_uld_list[id]->uld_activate(sc); if (rc == 0) setbit(&sc->active_ulds, id); } sx_sunlock(&t4_uld_list_lock); return (rc); } int t4_deactivate_uld(struct adapter *sc, int id) { int rc; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); sx_slock(&t4_uld_list_lock); if (t4_uld_list[id] == NULL) rc = ENXIO; else { rc = t4_uld_list[id]->uld_deactivate(sc); if (rc == 0) clrbit(&sc->active_ulds, id); } sx_sunlock(&t4_uld_list_lock); return (rc); } static int deactivate_all_uld(struct adapter *sc) { int i, rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld"); if (rc != 0) return (ENXIO); sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i)) continue; rc = t4_uld_list[i]->uld_deactivate(sc); if (rc != 0) break; clrbit(&sc->active_ulds, i); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); return (rc); } static void stop_all_uld(struct adapter *sc) { int i; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldst") != 0) return; sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i) || t4_uld_list[i]->uld_stop == NULL) continue; (void) t4_uld_list[i]->uld_stop(sc); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); } static void restart_all_uld(struct adapter *sc) { int i; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldre") != 0) return; sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i) || t4_uld_list[i]->uld_restart == NULL) continue; (void) t4_uld_list[i]->uld_restart(sc); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); } int uld_active(struct adapter *sc, int id) { MPASS(id >= 0 && id <= ULD_MAX); return (isset(&sc->active_ulds, id)); } #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *sc, bool enable) { ASSERT_SYNCHRONIZED_OP(sc); if (!is_ktls(sc)) return (ENODEV); if (!is_t6(sc)) return (0); if (hw_off_limits(sc)) return (ENXIO); if (enable) { if (sc->flags & KERN_TLS_ON) return (0); /* already on */ if (sc->offload_map != 0) { CH_WARN(sc, "Disable TOE on all interfaces associated with " "this adapter before trying to enable NIC TLS.\n"); return (EAGAIN); } return (t6_config_kern_tls(sc, true)); } else { /* * Nothing to do for disable. If TOE is enabled sometime later * then toe_capability will reconfigure the hardware. */ return (0); } } #endif /* * t = ptr to tunable. * nc = number of CPUs. * c = compiled in default for that tunable. */ static void calculate_nqueues(int *t, int nc, const int c) { int nq; if (*t > 0) return; nq = *t < 0 ? -*t : c; *t = min(nc, nq); } /* * Come up with reasonable defaults for some of the tunables, provided they're * not set by the user (in which case we'll use the values as is). */ static void tweak_tunables(void) { int nc = mp_ncpus; /* our snapshot of the number of CPUs */ if (t4_ntxq < 1) { #ifdef RSS t4_ntxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_ntxq, nc, NTXQ); #endif } calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); if (t4_nrxq < 1) { #ifdef RSS t4_nrxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_nrxq, nc, NRXQ); #endif } calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); #endif #ifdef TCP_OFFLOAD calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); #endif #if defined(TCP_OFFLOAD) || defined(KERN_TLS) if (t4_toecaps_allowed == -1) t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; #else if (t4_toecaps_allowed == -1) t4_toecaps_allowed = 0; #endif #ifdef TCP_OFFLOAD if (t4_rdmacaps_allowed == -1) { t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | FW_CAPS_CONFIG_RDMA_RDMAC; } if (t4_iscsicaps_allowed == -1) { t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | FW_CAPS_CONFIG_ISCSI_TARGET_PDU | FW_CAPS_CONFIG_ISCSI_T10DIF; } if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) t4_tmr_idx_ofld = TMR_IDX_OFLD; if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) t4_pktc_idx_ofld = PKTC_IDX_OFLD; #else if (t4_rdmacaps_allowed == -1) t4_rdmacaps_allowed = 0; if (t4_iscsicaps_allowed == -1) t4_iscsicaps_allowed = 0; #endif #ifdef DEV_NETMAP calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ); calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ); calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); #endif if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) t4_tmr_idx = TMR_IDX; if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) t4_pktc_idx = PKTC_IDX; if (t4_qsize_txq < 128) t4_qsize_txq = 128; if (t4_qsize_rxq < 128) t4_qsize_rxq = 128; while (t4_qsize_rxq & 7) t4_qsize_rxq++; t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; /* * Number of VIs to create per-port. The first VI is the "main" regular * VI for the port. The rest are additional virtual interfaces on the * same physical port. Note that the main VI does not have native * netmap support but the extra VIs do. * * Limit the number of VIs per port to the number of available * MAC addresses per port. */ if (t4_num_vis < 1) t4_num_vis = 1; if (t4_num_vis > nitems(vi_mac_funcs)) { t4_num_vis = nitems(vi_mac_funcs); printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); } if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { pcie_relaxed_ordering = 1; #if defined(__i386__) || defined(__amd64__) if (cpu_vendor_id == CPU_VENDOR_INTEL) pcie_relaxed_ordering = 0; #endif } } #ifdef DDB static void t4_dump_mem(struct adapter *sc, u_int addr, u_int len) { uint32_t base, j, off, pf, reg, save, win_pos; reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); save = t4_read_reg(sc, reg); base = sc->memwin[2].mw_base; if (is_t4(sc)) { pf = 0; win_pos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); win_pos = addr & ~0x7f; /* start must be 128B aligned */ } off = addr - win_pos; t4_write_reg(sc, reg, win_pos | pf); t4_read_reg(sc, reg); while (len > 0 && !db_pager_quit) { uint32_t buf[8]; for (j = 0; j < 8; j++, off += 4) buf[j] = htonl(t4_read_reg(sc, base + off)); db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); if (len <= sizeof(buf)) len = 0; else len -= sizeof(buf); } t4_write_reg(sc, reg, save); t4_read_reg(sc, reg); } static void t4_dump_tcb(struct adapter *sc, int tid) { uint32_t tcb_addr; /* Dump TCB for the tid */ tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); tcb_addr += tid * TCB_SIZE; t4_dump_mem(sc, tcb_addr, TCB_SIZE); } static void t4_dump_devlog(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e e; int i, first, j, m, nentries, rc; uint64_t ftstamp = UINT64_MAX; if (dparams->start == 0) { db_printf("devlog params not valid\n"); return; } nentries = dparams->size / sizeof(struct fw_devlog_e); m = fwmtype_to_hwmtype(dparams->memtype); /* Find the first entry. */ first = -1; for (i = 0; i < nentries && !db_pager_quit; i++) { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) break; if (e.timestamp == 0) break; e.timestamp = be64toh(e.timestamp); if (e.timestamp < ftstamp) { ftstamp = e.timestamp; first = i; } } if (first == -1) return; i = first; do { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) return; if (e.timestamp == 0) return; e.timestamp = be64toh(e.timestamp); e.seqno = be32toh(e.seqno); for (j = 0; j < 8; j++) e.params[j] = be32toh(e.params[j]); db_printf("%10d %15ju %8s %8s ", e.seqno, e.timestamp, (e.level < nitems(devlog_level_strings) ? devlog_level_strings[e.level] : "UNKNOWN"), (e.facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e.facility] : "UNKNOWN")); db_printf(e.fmt, e.params[0], e.params[1], e.params[2], e.params[3], e.params[4], e.params[5], e.params[6], e.params[7]); if (++i == nentries) i = 0; } while (i != first && !db_pager_quit); } static DB_DEFINE_TABLE(show, t4, show_t4); DB_TABLE_COMMAND_FLAGS(show_t4, devlog, db_show_devlog, CS_OWN) { device_t dev; int t; bool valid; valid = false; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); valid = true; } db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 devlog \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } t4_dump_devlog(device_get_softc(dev)); } DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN) { device_t dev; int radix, tid, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { tid = db_tok_number; valid = true; } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 tcb \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (tid < 0) { db_printf("invalid tid\n"); return; } t4_dump_tcb(device_get_softc(dev), tid); } DB_TABLE_COMMAND_FLAGS(show_t4, memdump, db_show_memdump, CS_OWN) { device_t dev; int radix, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { addr = db_tok_number; t = db_read_token(); if (t == tNUMBER) { count = db_tok_number; valid = true; } } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 memdump \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (addr < 0) { db_printf("invalid address\n"); return; } if (count <= 0) { db_printf("invalid length\n"); return; } t4_dump_mem(device_get_softc(dev), addr, count); } #endif static eventhandler_tag vxlan_start_evtag; static eventhandler_tag vxlan_stop_evtag; struct vxlan_evargs { if_t ifp; uint16_t port; }; static void enable_vxlan_rx(struct adapter *sc) { int i, rc; struct port_info *pi; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) | F_VXLAN_EN); for_each_port(sc, i) { pi = sc->port[i]; if (pi->vxlan_tcam_entry == true) continue; rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; CH_ERR(&pi->vi[0], "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); pi->vxlan_tcam_entry = true; } } } static void t4_vxlan_start(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0) return; if (sc->vxlan_refcount == 0) { sc->vxlan_port = v->port; sc->vxlan_refcount = 1; if (!hw_off_limits(sc)) enable_vxlan_rx(sc); } else if (sc->vxlan_port == v->port) { sc->vxlan_refcount++; } else { CH_ERR(sc, "VXLAN already configured on port %d; " "ignoring attempt to configure it on port %d\n", sc->vxlan_port, v->port); } end_synchronized_op(sc, 0); } static void t4_vxlan_stop(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0) return; /* * VXLANs may have been configured before the driver was loaded so we * may see more stops than starts. This is not handled cleanly but at * least we keep the refcount sane. */ if (sc->vxlan_port != v->port) goto done; if (sc->vxlan_refcount == 0) { CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; " "ignoring attempt to stop it again.\n", sc->vxlan_port); } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc)) t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0); done: end_synchronized_op(sc, 0); } static void t4_vxlan_start_handler(void *arg __unused, if_t ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_start, &v); } static void t4_vxlan_stop_handler(void *arg __unused, if_t ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_stop, &v); } static struct sx mlu; /* mod load unload */ SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); static int mod_event(module_t mod, int cmd, void *arg) { int rc = 0; static int loaded = 0; switch (cmd) { case MOD_LOAD: sx_xlock(&mlu); if (loaded++ == 0) { t4_sge_modload(); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_filter_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER); t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); t4_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); sx_init(&t4_list_lock, "T4/T5 adapters"); SLIST_INIT(&t4_list); callout_init(&fatal_callout, 1); #ifdef TCP_OFFLOAD sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); #endif #ifdef INET6 t4_clip_modload(); #endif #ifdef KERN_TLS t6_ktls_modload(); #endif t4_tracer_modload(); tweak_tunables(); vxlan_start_evtag = EVENTHANDLER_REGISTER(vxlan_start, t4_vxlan_start_handler, NULL, EVENTHANDLER_PRI_ANY); vxlan_stop_evtag = EVENTHANDLER_REGISTER(vxlan_stop, t4_vxlan_stop_handler, NULL, EVENTHANDLER_PRI_ANY); reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK, taskqueue_thread_enqueue, &reset_tq); taskqueue_start_threads(&reset_tq, 1, PI_SOFT, "t4_rst_thr"); } sx_xunlock(&mlu); break; case MOD_UNLOAD: sx_xlock(&mlu); if (--loaded == 0) { #ifdef TCP_OFFLOAD int i; #endif int tries; taskqueue_free(reset_tq); tries = 0; while (tries++ < 5 && t4_sge_extfree_refs() != 0) { uprintf("%ju clusters with custom free routine " "still is use.\n", t4_sge_extfree_refs()); pause("t4unload", 2 * hz); } sx_slock(&t4_list_lock); if (!SLIST_EMPTY(&t4_list)) { rc = EBUSY; sx_sunlock(&t4_list_lock); goto done_unload; } #ifdef TCP_OFFLOAD sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] != NULL) { rc = EBUSY; sx_sunlock(&t4_uld_list_lock); sx_sunlock(&t4_list_lock); goto done_unload; } } sx_sunlock(&t4_uld_list_lock); #endif sx_sunlock(&t4_list_lock); if (t4_sge_extfree_refs() == 0) { EVENTHANDLER_DEREGISTER(vxlan_start, vxlan_start_evtag); EVENTHANDLER_DEREGISTER(vxlan_stop, vxlan_stop_evtag); t4_tracer_modunload(); #ifdef KERN_TLS t6_ktls_modunload(); #endif #ifdef INET6 t4_clip_modunload(); #endif #ifdef TCP_OFFLOAD sx_destroy(&t4_uld_list_lock); #endif sx_destroy(&t4_list_lock); t4_sge_modunload(); loaded = 0; } else { rc = EBUSY; loaded++; /* undo earlier decrement */ } } done_unload: sx_xunlock(&mlu); break; } return (rc); } DRIVER_MODULE(t4nex, pci, t4_driver, mod_event, 0); MODULE_VERSION(t4nex, 1); MODULE_DEPEND(t4nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t4nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t5nex, pci, t5_driver, mod_event, 0); MODULE_VERSION(t5nex, 1); MODULE_DEPEND(t5nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t5nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t6nex, pci, t6_driver, mod_event, 0); MODULE_VERSION(t6nex, 1); MODULE_DEPEND(t6nex, crypto, 1, 1, 1); MODULE_DEPEND(t6nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t6nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0); MODULE_VERSION(cxgbe, 1); DRIVER_MODULE(cxl, t5nex, cxl_driver, 0, 0); MODULE_VERSION(cxl, 1); DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0); MODULE_VERSION(cc, 1); DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0); MODULE_VERSION(vcxgbe, 1); DRIVER_MODULE(vcxl, cxl, vcxl_driver, 0, 0); MODULE_VERSION(vcxl, 1); DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0); MODULE_VERSION(vcc, 1); diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index 1d0275127ced..c84b22d1d74d 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,882 +1,880 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } bus_identify_children(dev); bus_attach_children(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; error = bus_generic_detach(dev); if (error != 0) return (error); sc = device_get_softc(dev); - if (sc->rcdev) - device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type, flags); if (rm == NULL) return (bus_generic_alloc_resource(mcdev, child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, end, count, flags); if (res == NULL) goto fail; return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_adjust_resource(mcdev, child, r, start, end)); return (bus_generic_adjust_resource(mcdev, child, r, start, end)); } int dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_release_resource(mcdev, child, r)); return (bus_generic_release_resource(mcdev, child, r)); } int dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_activate_resource(mcdev, child, r)); return (bus_generic_activate_resource(mcdev, child, r)); } int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_deactivate_resource(mcdev, child, r)); return (bus_generic_deactivate_resource(mcdev, child, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype, 0); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ struct rman * dpaa2_mc_rman(device_t mcdev, int type, u_int flags) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/etherswitch/felix/felix.c b/sys/dev/etherswitch/felix/felix.c index 622cf3bca140..b6fb8cbb67c8 100644 --- a/sys/dev/etherswitch/felix/felix.c +++ b/sys/dev/etherswitch/felix/felix.c @@ -1,1006 +1,1004 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "etherswitch_if.h" #include "miibus_if.h" MALLOC_DECLARE(M_FELIX); MALLOC_DEFINE(M_FELIX, "felix", "felix switch"); static device_probe_t felix_probe; static device_attach_t felix_attach; static device_detach_t felix_detach; static etherswitch_info_t* felix_getinfo(device_t); static int felix_getconf(device_t, etherswitch_conf_t *); static int felix_setconf(device_t, etherswitch_conf_t *); static void felix_lock(device_t); static void felix_unlock(device_t); static int felix_getport(device_t, etherswitch_port_t *); static int felix_setport(device_t, etherswitch_port_t *); static int felix_readreg_wrapper(device_t, int); static int felix_writereg_wrapper(device_t, int, int); static int felix_readphy(device_t, int, int); static int felix_writephy(device_t, int, int, int); static int felix_setvgroup(device_t, etherswitch_vlangroup_t *); static int felix_getvgroup(device_t, etherswitch_vlangroup_t *); static int felix_parse_port_fdt(felix_softc_t, phandle_t, int *); static int felix_setup(felix_softc_t); static void felix_setup_port(felix_softc_t, int); static void felix_tick(void *); static int felix_ifmedia_upd(if_t); static void felix_ifmedia_sts(if_t, struct ifmediareq *); static void felix_get_port_cfg(felix_softc_t, etherswitch_port_t *); static void felix_set_port_cfg(felix_softc_t, etherswitch_port_t *); static bool felix_is_phyport(felix_softc_t, int); static struct mii_data *felix_miiforport(felix_softc_t, unsigned int); static struct felix_pci_id felix_pci_ids[] = { {PCI_VENDOR_FREESCALE, FELIX_DEV_ID, FELIX_DEV_NAME}, {0, 0, NULL} }; static device_method_t felix_methods[] = { /* device interface */ DEVMETHOD(device_probe, felix_probe), DEVMETHOD(device_attach, felix_attach), DEVMETHOD(device_detach, felix_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, felix_getinfo), DEVMETHOD(etherswitch_getconf, felix_getconf), DEVMETHOD(etherswitch_setconf, felix_setconf), DEVMETHOD(etherswitch_lock, felix_lock), DEVMETHOD(etherswitch_unlock, felix_unlock), DEVMETHOD(etherswitch_getport, felix_getport), DEVMETHOD(etherswitch_setport, felix_setport), DEVMETHOD(etherswitch_readreg, felix_readreg_wrapper), DEVMETHOD(etherswitch_writereg, felix_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, felix_readphy), DEVMETHOD(etherswitch_writephyreg, felix_writephy), DEVMETHOD(etherswitch_setvgroup, felix_setvgroup), DEVMETHOD(etherswitch_getvgroup, felix_getvgroup), /* miibus interface */ DEVMETHOD(miibus_readreg, felix_readphy), DEVMETHOD(miibus_writereg, felix_writephy), DEVMETHOD_END }; DEFINE_CLASS_0(felix, felix_driver, felix_methods, sizeof(struct felix_softc)); DRIVER_MODULE_ORDERED(felix, pci, felix_driver, NULL, NULL, SI_ORDER_ANY); DRIVER_MODULE(miibus, felix, miibus_fdt_driver, NULL, NULL); DRIVER_MODULE(etherswitch, felix, etherswitch_driver, NULL, NULL); MODULE_VERSION(felix, 1); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, felix, felix_pci_ids, nitems(felix_pci_ids) - 1); static int felix_probe(device_t dev) { struct felix_pci_id *id; felix_softc_t sc; sc = device_get_softc(dev); sc->dev = dev; for (id = felix_pci_ids; id->vendor != 0; ++id) { if (pci_get_device(dev) != id->device || pci_get_vendor(dev) != id->vendor) continue; device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int felix_parse_port_fdt(felix_softc_t sc, phandle_t child, int *pport) { uint32_t port, status; phandle_t node; if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) { device_printf(sc->dev, "Port node doesn't have reg property\n"); return (ENXIO); } *pport = port; node = OF_getproplen(child, "ethernet"); if (node <= 0) sc->ports[port].cpu_port = false; else sc->ports[port].cpu_port = true; node = ofw_bus_find_child(child, "fixed-link"); if (node <= 0) { sc->ports[port].fixed_port = false; return (0); } sc->ports[port].fixed_port = true; if (OF_getencprop(node, "speed", &status, sizeof(status)) <= 0) { device_printf(sc->dev, "Port has fixed-link node without link speed specified\n"); return (ENXIO); } switch (status) { case 2500: status = IFM_2500_T; break; case 1000: status = IFM_1000_T; break; case 100: status = IFM_100_T; break; case 10: status = IFM_10_T; break; default: device_printf(sc->dev, "Unsupported link speed value of %d\n", status); return (ENXIO); } if (OF_hasprop(node, "full-duplex")) status |= IFM_FDX; else status |= IFM_HDX; status |= IFM_ETHER; sc->ports[port].fixed_link_status = status; return (0); } static int felix_init_interface(felix_softc_t sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ports[port].ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ports[port].ifp, sc); if_setflags(sc->ports[port].ifp, IFF_UP | IFF_BROADCAST | IFF_MULTICAST | IFF_DRV_RUNNING | IFF_SIMPLEX); sc->ports[port].ifname = malloc(strlen(name) + 1, M_FELIX, M_NOWAIT); if (sc->ports[port].ifname == NULL) { if_free(sc->ports[port].ifp); return (ENOMEM); } memcpy(sc->ports[port].ifname, name, strlen(name) + 1); if_initname(sc->ports[port].ifp, sc->ports[port].ifname, port); return (0); } static void felix_setup_port(felix_softc_t sc, int port) { /* Link speed has to be always set to 1000 in the clock register. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_CLK_CFG, FELIX_DEVGMII_CLK_CFG_SPEED_1000); FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_MAC_CFG, FELIX_DEVGMII_MAC_CFG_TX_ENA | FELIX_DEVGMII_MAC_CFG_RX_ENA); FELIX_WR4(sc, FELIX_QSYS_PORT_MODE(port), FELIX_QSYS_PORT_MODE_PORT_ENA); /* * Enable "VLANMTU". Each port has a configurable MTU. * Accept frames that are 8 and 4 bytes longer than it * for double and single tagged frames respectively. * Since etherswitch API doesn't provide an option to change * MTU don't touch it for now. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_VLAN_CFG, FELIX_DEVGMII_VLAN_CFG_ENA | FELIX_DEVGMII_VLAN_CFG_LEN_ENA | FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA); } static int felix_setup(felix_softc_t sc) { int timeout, i; uint32_t reg; /* Trigger soft reset, bit is self-clearing, with 5s timeout. */ FELIX_WR4(sc, FELIX_DEVCPU_GCB_RST, FELIX_DEVCPU_GCB_RST_EN); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_DEVCPU_GCB_RST); if ((reg & FELIX_DEVCPU_GCB_RST_EN) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch to reset\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_RAM_CTRL, FELIX_SYS_RAM_CTRL_INIT); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_SYS_RAM_CTRL); if ((reg & FELIX_SYS_RAM_CTRL_INIT) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch RAM init.\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_CFG, FELIX_SYS_CFG_CORE_EN); for (i = 0; i < sc->info.es_nports; i++) felix_setup_port(sc, i); return (0); } static int felix_timer_rate(SYSCTL_HANDLER_ARGS) { felix_softc_t sc; int error, value, old; sc = arg1; old = value = sc->timer_ticks; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 0) return (EINVAL); if (value == old) return (0); FELIX_LOCK(sc); sc->timer_ticks = value; callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); FELIX_UNLOCK(sc); return (0); } static int felix_attach(device_t dev) { phandle_t child, ports, node; int error, port, rid; felix_softc_t sc; uint32_t phy_addr; ssize_t size; sc = device_get_softc(dev); sc->info.es_nports = 0; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; strlcpy(sc->info.es_name, "Felix TSN Switch", sizeof(sc->info.es_name)); rid = PCIR_BAR(FELIX_BAR_MDIO); sc->mdio = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mdio == NULL) { device_printf(dev, "Failed to allocate MDIO registers.\n"); return (ENXIO); } rid = PCIR_BAR(FELIX_BAR_REGS); sc->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) { device_printf(dev, "Failed to allocate registers BAR.\n"); error = ENXIO; goto out_fail; } mtx_init(&sc->mtx, "felix lock", NULL, MTX_DEF); callout_init_mtx(&sc->tick_callout, &sc->mtx, 0); node = ofw_bus_get_node(dev); if (node <= 0) { error = ENXIO; goto out_fail; } ports = ofw_bus_find_child(node, "ports"); if (ports == 0) { device_printf(dev, "Failed to find \"ports\" property in DTS.\n"); error = ENXIO; goto out_fail; } for (child = OF_child(ports); child != 0; child = OF_peer(child)) { /* Do not parse disabled ports. */ if (ofw_bus_node_status_okay(child) == 0) continue; error = felix_parse_port_fdt(sc, child, &port); if (error != 0) goto out_fail; error = felix_init_interface(sc, port); if (error != 0) { device_printf(sc->dev, "Failed to initialize interface.\n"); goto out_fail; } if (sc->ports[port].fixed_port) { sc->info.es_nports++; continue; } size = OF_getencprop(child, "phy-handle", &node, sizeof(node)); if (size <= 0) { device_printf(sc->dev, "Failed to acquire PHY handle from FDT.\n"); error = ENXIO; goto out_fail; } node = OF_node_from_xref(node); size = OF_getencprop(node, "reg", &phy_addr, sizeof(phy_addr)); if (size <= 0) { device_printf(sc->dev, "Failed to obtain PHY address.\n"); error = ENXIO; goto out_fail; } sc->ports[port].phyaddr = phy_addr; sc->ports[port].miibus = NULL; error = mii_attach(dev, &sc->ports[port].miibus, sc->ports[port].ifp, felix_ifmedia_upd, felix_ifmedia_sts, BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, 0); if (error != 0) goto out_fail; sc->info.es_nports++; } error = felix_setup(sc); if (error != 0) goto out_fail; sc->timer_ticks = hz; /* Default to 1s. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "timer_ticks", CTLTYPE_INT | CTLFLAG_RW, sc, 0, felix_timer_rate, "I", "Number of ticks between timer invocations"); /* The tick routine has to be called with the lock held. */ FELIX_LOCK(sc); felix_tick(sc); FELIX_UNLOCK(sc); /* Allow etherswitch to attach as our child. */ bus_identify_children(dev); bus_attach_children(dev); return (0); out_fail: felix_detach(dev); return (error); } static int felix_detach(device_t dev) { felix_softc_t sc; int error; int i; error = 0; sc = device_get_softc(dev); bus_generic_detach(dev); mtx_lock(&sc->mtx); callout_stop(&sc->tick_callout); mtx_unlock(&sc->mtx); mtx_destroy(&sc->mtx); /* * If we have been fully attached do a soft reset. * This way after when driver is unloaded switch is left in unmanaged mode. */ if (device_is_attached(dev)) felix_setup(sc); for (i = 0; i < sc->info.es_nports; i++) { - if (sc->ports[i].miibus != NULL) - device_delete_child(dev, sc->ports[i].miibus); if (sc->ports[i].ifp != NULL) if_free(sc->ports[i].ifp); if (sc->ports[i].ifname != NULL) free(sc->ports[i].ifname, M_FELIX); } if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); if (sc->mdio != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->mdio), sc->mdio); return (error); } static etherswitch_info_t* felix_getinfo(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); return (&sc->info); } static int felix_getconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int felix_init_vlan(felix_softc_t sc) { int timeout = FELIX_INIT_TIMEOUT; uint32_t reg; int i; /* Flush VLAN table in hardware. */ FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_RESET); do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) == FELIX_ANA_VT_IDLE) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout during VLAN table reset.\n"); return (ETIMEDOUT); } /* Flush VLAN table in sc. */ for (i = 0; i < sc->info.es_nvlangroups; i++) sc->vlans[i] = 0; /* * Make all ports VLAN aware. * Read VID from incoming frames and use it for port grouping * purposes. * Don't set this if pvid is set. */ for (i = 0; i < sc->info.es_nports; i++) { reg = FELIX_ANA_PORT_RD4(sc, i, FELIX_ANA_PORT_VLAN_CFG); if ((reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK) != 0) continue; reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, i, FELIX_ANA_PORT_VLAN_CFG, reg); } return (0); } static int felix_setconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; int error; error = 0; /* Set the VLAN mode. */ sc = device_get_softc(dev); FELIX_LOCK(sc); if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { switch (conf->vlan_mode) { case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; sc->info.es_nvlangroups = FELIX_NUM_VLANS; error = felix_init_vlan(sc); break; default: error = EINVAL; } } FELIX_UNLOCK(sc); return (error); } static void felix_lock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); FELIX_LOCK(sc); } static void felix_unlock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_OWNED); FELIX_UNLOCK(sc); } static void felix_get_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; p->es_flags = 0; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (reg & FELIX_ANA_PORT_DROP_CFG_TAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED; if (reg & FELIX_ANA_PORT_DROP_CFG_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; reg = FELIX_DEVGMII_PORT_RD4(sc, p->es_port, FELIX_DEVGMII_VLAN_CFG); if (reg & FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA) p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG; reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (reg & FELIX_REW_PORT_TAG_CFG_ALL) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (reg & FELIX_ANA_PORT_VLAN_CFG_POP) p->es_flags |= ETHERSWITCH_PORT_STRIPTAGINGRESS; p->es_pvid = reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; } static int felix_getport(device_t dev, etherswitch_port_t *p) { struct ifmediareq *ifmr; struct mii_data *mii; felix_softc_t sc; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_get_port_cfg(sc, p); if (sc->ports[p->es_port].fixed_port) { ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; ifmr->ifm_active = sc->ports[p->es_port].fixed_link_status; ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; } else { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static void felix_set_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_TAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_TAGGED; if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_UNTAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_UNTAGGED; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG, reg); reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) reg |= FELIX_REW_PORT_TAG_CFG_ALL; else reg &= ~FELIX_REW_PORT_TAG_CFG_ALL; FELIX_REW_PORT_WR4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG, reg); reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (p->es_flags & ETHERSWITCH_PORT_STRIPTAGINGRESS) reg |= FELIX_ANA_PORT_VLAN_CFG_POP; else reg &= ~FELIX_ANA_PORT_VLAN_CFG_POP; reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_MASK; reg |= p->es_pvid & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; /* * If port VID is set use it for VLAN classification, * instead of frame VID. * By default the frame tag takes precedence. * Force the switch to ignore it. */ if (p->es_pvid != 0) reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; else reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG, reg); } static int felix_setport(device_t dev, etherswitch_port_t *p) { felix_softc_t sc; struct mii_data *mii; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_set_port_cfg(sc, p); if (felix_is_phyport(sc, p->es_port)) { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static int felix_readreg_wrapper(device_t dev, int addr_reg) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (UINT32_MAX); /* Can't return errors here. */ return (FELIX_RD4(sc, addr_reg)); } static int felix_writereg_wrapper(device_t dev, int addr_reg, int val) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (EINVAL); FELIX_WR4(sc, addr_reg, val); return (0); } static int felix_readphy(device_t dev, int phy, int reg) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_read(sc->mdio, FELIX_MDIO_BASE, phy, reg)); } static int felix_writephy(device_t dev, int phy, int reg, int data) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_write(sc->mdio, FELIX_MDIO_BASE, phy, reg, data)); } static int felix_set_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int i, vid; vid = vg->es_vid & ETHERSWITCH_VID_MASK; /* Tagged mode is not supported. */ if (vg->es_member_ports != vg->es_untagged_ports) return (EINVAL); /* * Hardware support 4096 groups, but we can't do group_id == vid. * Note that hw_group_id == vid. */ if (vid == 0) { /* Clear VLAN table entry using old VID. */ FELIX_WR4(sc, FELIX_ANA_VTIDX, sc->vlans[vg->es_vlangroup]); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_WRITE); sc->vlans[vg->es_vlangroup] = 0; return (0); } /* The VID is already used in a different group. */ for (i = 0; i < sc->info.es_nvlangroups; i++) if (i != vg->es_vlangroup && vid == sc->vlans[i]) return (EINVAL); /* This group already uses a different VID. */ if (sc->vlans[vg->es_vlangroup] != 0 && sc->vlans[vg->es_vlangroup] != vid) return (EINVAL); sc->vlans[vg->es_vlangroup] = vid; /* Assign members to the given group. */ reg = vg->es_member_ports & FELIX_ANA_VT_PORTMASK_MASK; reg <<= FELIX_ANA_VT_PORTMASK_SHIFT; reg |= FELIX_ANA_VT_WRITE; FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, reg); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); return (0); } static int felix_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_set_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static int felix_get_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int vid; vid = sc->vlans[vg->es_vlangroup]; if (vid == 0) return (0); FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_READ); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); reg >>= FELIX_ANA_VT_PORTMASK_SHIFT; reg &= FELIX_ANA_VT_PORTMASK_MASK; vg->es_untagged_ports = vg->es_member_ports = reg; vg->es_fid = 0; vg->es_vid = vid | ETHERSWITCH_VID_VALID; return (0); } static int felix_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_get_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static void felix_tick(void *arg) { struct mii_data *mii; felix_softc_t sc; int port; sc = arg; FELIX_LOCK_ASSERT(sc, MA_OWNED); for (port = 0; port < sc->info.es_nports; port++) { if (!felix_is_phyport(sc, port)) continue; mii = felix_miiforport(sc, port); MPASS(mii != NULL); mii_tick(mii); } if (sc->timer_ticks != 0) callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); } static int felix_ifmedia_upd(if_t ifp) { struct mii_data *mii; felix_softc_t sc; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void felix_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { felix_softc_t sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static bool felix_is_phyport(felix_softc_t sc, int port) { return (!sc->ports[port].fixed_port); } static struct mii_data* felix_miiforport(felix_softc_t sc, unsigned int port) { if (!felix_is_phyport(sc, port)) return (NULL); return (device_get_softc(sc->ports[port].miibus)); } diff --git a/sys/dev/firewire/fwohci_pci.c b/sys/dev/firewire/fwohci_pci.c index 591503728093..a6f9f50701f0 100644 --- a/sys/dev/firewire/fwohci_pci.c +++ b/sys/dev/firewire/fwohci_pci.c @@ -1,470 +1,465 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. SHimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #define BOUNCE_BUFFER_TEST 0 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int fwohci_pci_attach(device_t self); static int fwohci_pci_detach(device_t self); /* * The probe routine. */ static int fwohci_pci_probe(device_t dev) { uint32_t id; id = pci_get_devid(dev); if (id == (FW_VENDORID_NATSEMI | FW_DEVICE_CS4210)) { device_set_desc(dev, "National Semiconductor CS4210"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD861)) { device_set_desc(dev, "NEC uPD72861"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD871)) { device_set_desc(dev, "NEC uPD72871/2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72870)) { device_set_desc(dev, "NEC uPD72870"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72873)) { device_set_desc(dev, "NEC uPD72873"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72874)) { device_set_desc(dev, "NEC uPD72874"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_SIS | FW_DEVICE_7007)) { /* It has no real identifier, using device id. */ device_set_desc(dev, "SiS 7007"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB22)) { device_set_desc(dev, "Texas Instruments TSB12LV22"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB23)) { device_set_desc(dev, "Texas Instruments TSB12LV23"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB26)) { device_set_desc(dev, "Texas Instruments TSB12LV26"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43)) { device_set_desc(dev, "Texas Instruments TSB43AA22"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43A)) { device_set_desc(dev, "Texas Instruments TSB43AB22/A"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43AB21)) { device_set_desc(dev, "Texas Instruments TSB43AB21/A/AI/A-EP"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43AB23)) { device_set_desc(dev, "Texas Instruments TSB43AB23"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB82AA2)) { device_set_desc(dev, "Texas Instruments TSB82AA2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4450)) { device_set_desc(dev, "Texas Instruments PCI4450"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4410A)) { device_set_desc(dev, "Texas Instruments PCI4410A"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4451)) { device_set_desc(dev, "Texas Instruments PCI4451"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_SONY | FW_DEVICE_CXD1947)) { device_printf(dev, "Sony i.LINK (CXD1947) not supported\n"); return ENXIO; } if (id == (FW_VENDORID_SONY | FW_DEVICE_CXD3222)) { device_set_desc(dev, "Sony i.LINK (CXD3222)"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_VIA | FW_DEVICE_VT6306)) { device_set_desc(dev, "VIA Fire II (VT6306)"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_RICOH | FW_DEVICE_R5C551)) { device_set_desc(dev, "Ricoh R5C551"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_RICOH | FW_DEVICE_R5C552)) { device_set_desc(dev, "Ricoh R5C552"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_APPLE | FW_DEVICE_PANGEA)) { device_set_desc(dev, "Apple Pangea"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_APPLE | FW_DEVICE_UNINORTH2)) { device_set_desc(dev, "Apple UniNorth 2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_LUCENT | FW_DEVICE_FW322)) { device_set_desc(dev, "Lucent FW322/323"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_INTEL | FW_DEVICE_82372FB)) { device_set_desc(dev, "Intel 82372FB"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_ADAPTEC | FW_DEVICE_AIC5800)) { device_set_desc(dev, "Adaptec AHA-894x/AIC-5800"); return BUS_PROBE_DEFAULT; } if (pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_FW && pci_get_progif(dev) == PCI_INTERFACE_OHCI) { if (bootverbose) device_printf(dev, "vendor=%x, dev=%x\n", pci_get_vendor(dev), pci_get_device(dev)); device_set_desc(dev, "1394 Open Host Controller Interface"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int fwohci_pci_init(device_t self) { int olatency, latency, ocache_line, cache_line; uint16_t cmd; cmd = pci_read_config(self, PCIR_COMMAND, 2); cmd |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; #if 1 /* for broken hardware */ cmd &= ~PCIM_CMD_MWRICEN; #endif pci_write_config(self, PCIR_COMMAND, cmd, 2); latency = olatency = pci_read_config(self, PCIR_LATTIMER, 1); #define DEF_LATENCY 0x20 if (olatency < DEF_LATENCY) { latency = DEF_LATENCY; pci_write_config(self, PCIR_LATTIMER, latency, 1); } cache_line = ocache_line = pci_read_config(self, PCIR_CACHELNSZ, 1); #define DEF_CACHE_LINE 8 if (ocache_line < DEF_CACHE_LINE) { cache_line = DEF_CACHE_LINE; pci_write_config(self, PCIR_CACHELNSZ, cache_line, 1); } if (firewire_debug) { device_printf(self, "latency timer %d -> %d.\n", olatency, latency); device_printf(self, "cache size %d -> %d.\n", ocache_line, cache_line); } return 0; } static int fwohci_pci_attach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int err; int rid; #if 0 if (bootverbose) firewire_debug = bootverbose; #endif mtx_init(FW_GMTX(&sc->fc), "firewire", NULL, MTX_DEF); fwohci_pci_init(self); rid = PCI_CBMEM; sc->bsr = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->bsr) { device_printf(self, "Could not map memory\n"); return ENXIO; } sc->bst = rman_get_bustag(sc->bsr); sc->bsh = rman_get_bushandle(sc->bsr); rid = 0; sc->irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(self, "Could not allocate irq\n"); fwohci_pci_detach(self); return ENXIO; } err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, (driver_intr_t *) fwohci_intr, sc, &sc->ih); if (err) { device_printf(self, "Could not setup irq, %d\n", err); fwohci_pci_detach(self); return ENXIO; } err = bus_dma_tag_create( /*parent*/bus_get_dma_tag(self), /*alignment*/1, /*boundary*/0, #if BOUNCE_BUFFER_TEST /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, #else /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, #endif /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/0x20, /*maxsegsz*/0x8000, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/FW_GMTX(&sc->fc), &sc->fc.dmat); if (err != 0) { device_printf(self, "fwohci_pci_attach: Could not allocate DMA " "tag - error %d\n", err); fwohci_pci_detach(self); return (ENOMEM); } err = fwohci_init(sc, self); if (err != 0) { device_printf(self, "fwohci_init failed with err=%d\n", err); fwohci_pci_detach(self); return EIO; } /* probe and attach a child device(firewire) */ bus_identify_children(self); bus_attach_children(self); return 0; } static int fwohci_pci_detach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int s; s = splfw(); if (sc->bsr) fwohci_stop(sc, self); bus_generic_detach(self); - if (sc->fc.bdev) { - device_delete_child(self, sc->fc.bdev); - sc->fc.bdev = NULL; - } - /* disable interrupts that might have been switched on */ if (sc->bst && sc->bsh) bus_space_write_4(sc->bst, sc->bsh, FWOHCI_INTMASKCLR, OHCI_INT_EN); if (sc->irq_res) { int err; if (sc->ih) { err = bus_teardown_intr(self, sc->irq_res, sc->ih); if (err) device_printf(self, "Could not tear down irq, %d\n", err); sc->ih = NULL; } bus_release_resource(self, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } if (sc->bsr) { bus_release_resource(self, SYS_RES_MEMORY, PCI_CBMEM, sc->bsr); sc->bsr = NULL; sc->bst = 0; sc->bsh = 0; } fwohci_detach(sc, self); mtx_destroy(FW_GMTX(&sc->fc)); splx(s); return 0; } static int fwohci_pci_suspend(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); int err; device_printf(dev, "fwohci_pci_suspend\n"); err = bus_generic_suspend(dev); if (err) return err; fwohci_stop(sc, dev); return 0; } static int fwohci_pci_resume(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); fwohci_pci_init(dev); fwohci_resume(sc, dev); return 0; } static int fwohci_pci_shutdown(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); bus_generic_shutdown(dev); fwohci_stop(sc, dev); return 0; } static device_t fwohci_pci_add_child(device_t dev, u_int order, const char *name, int unit) { struct fwohci_softc *sc; device_t child; int err = 0; sc = (struct fwohci_softc *)device_get_softc(dev); child = device_add_child(dev, name, unit); if (child == NULL) return (child); sc->fc.bdev = child; device_set_ivars(child, &sc->fc); err = device_probe_and_attach(child); if (err) { device_printf(dev, "probe_and_attach failed with err=%d\n", err); fwohci_pci_detach(dev); device_delete_child(dev, child); return NULL; } /* XXX * Clear the bus reset event flag to start transactions even when * interrupt is disabled during the boot process. */ if (cold) { int s; DELAY(250); /* 2 cycles */ s = splfw(); fwohci_poll(&sc->fc, 0, -1); splx(s); } return (child); } static device_method_t fwohci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fwohci_pci_probe), DEVMETHOD(device_attach, fwohci_pci_attach), DEVMETHOD(device_detach, fwohci_pci_detach), DEVMETHOD(device_suspend, fwohci_pci_suspend), DEVMETHOD(device_resume, fwohci_pci_resume), DEVMETHOD(device_shutdown, fwohci_pci_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, fwohci_pci_add_child), DEVMETHOD_END }; static driver_t fwohci_driver = { "fwohci", fwohci_methods, sizeof(fwohci_softc_t), }; #ifdef FWOHCI_MODULE MODULE_DEPEND(fwohci, firewire, 1, 1, 1); #endif DRIVER_MODULE(fwohci, pci, fwohci_driver, 0, 0); diff --git a/sys/dev/glxiic/glxiic.c b/sys/dev/glxiic/glxiic.c index 5dfea9acbe4a..ddaa77b6b73c 100644 --- a/sys/dev/glxiic/glxiic.c +++ b/sys/dev/glxiic/glxiic.c @@ -1,1080 +1,1077 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Henrik Brix Andersen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include /* * AMD Geode LX CS5536 System Management Bus controller. * * Although AMD refers to this device as an SMBus controller, it * really is an I2C controller (It lacks SMBus ALERT# and Alert * Response support). * * The driver is implemented as an interrupt-driven state machine, * supporting both master and slave mode. */ #include #include #include #include #include #include #include #include #ifdef GLXIIC_DEBUG #include #endif #include #include #include #include #include #include #include #include "iicbus_if.h" /* CS5536 PCI-ISA ID. */ #define GLXIIC_CS5536_DEV_ID 0x20901022 /* MSRs. */ #define GLXIIC_MSR_PIC_YSEL_HIGH 0x51400021 /* Bus speeds. */ #define GLXIIC_SLOW 0x0258 /* 10 kHz. */ #define GLXIIC_FAST 0x0078 /* 50 kHz. */ #define GLXIIC_FASTEST 0x003c /* 100 kHz. */ /* Default bus activity timeout in milliseconds. */ #define GLXIIC_DEFAULT_TIMEOUT 35 /* GPIO register offsets. */ #define GLXIIC_GPIOL_OUT_AUX1_SEL 0x10 #define GLXIIC_GPIOL_IN_AUX1_SEL 0x34 /* GPIO 14 (SMB_CLK) and 15 (SMB_DATA) bitmasks. */ #define GLXIIC_GPIO_14_15_ENABLE 0x0000c000 #define GLXIIC_GPIO_14_15_DISABLE 0xc0000000 /* SMB register offsets. */ #define GLXIIC_SMB_SDA 0x00 #define GLXIIC_SMB_STS 0x01 #define GLXIIC_SMB_STS_SLVSTP_BIT (1 << 7) #define GLXIIC_SMB_STS_SDAST_BIT (1 << 6) #define GLXIIC_SMB_STS_BER_BIT (1 << 5) #define GLXIIC_SMB_STS_NEGACK_BIT (1 << 4) #define GLXIIC_SMB_STS_STASTR_BIT (1 << 3) #define GLXIIC_SMB_STS_NMATCH_BIT (1 << 2) #define GLXIIC_SMB_STS_MASTER_BIT (1 << 1) #define GLXIIC_SMB_STS_XMIT_BIT (1 << 0) #define GLXIIC_SMB_CTRL_STS 0x02 #define GLXIIC_SMB_CTRL_STS_TGSCL_BIT (1 << 5) #define GLXIIC_SMB_CTRL_STS_TSDA_BIT (1 << 4) #define GLXIIC_SMB_CTRL_STS_GCMTCH_BIT (1 << 3) #define GLXIIC_SMB_CTRL_STS_MATCH_BIT (1 << 2) #define GLXIIC_SMB_CTRL_STS_BB_BIT (1 << 1) #define GLXIIC_SMB_CTRL_STS_BUSY_BIT (1 << 0) #define GLXIIC_SMB_CTRL1 0x03 #define GLXIIC_SMB_CTRL1_STASTRE_BIT (1 << 7) #define GLXIIC_SMB_CTRL1_NMINTE_BIT (1 << 6) #define GLXIIC_SMB_CTRL1_GCMEN_BIT (1 << 5) #define GLXIIC_SMB_CTRL1_ACK_BIT (1 << 4) #define GLXIIC_SMB_CTRL1_INTEN_BIT (1 << 2) #define GLXIIC_SMB_CTRL1_STOP_BIT (1 << 1) #define GLXIIC_SMB_CTRL1_START_BIT (1 << 0) #define GLXIIC_SMB_ADDR 0x04 #define GLXIIC_SMB_ADDR_SAEN_BIT (1 << 7) #define GLXIIC_SMB_CTRL2 0x05 #define GLXIIC_SMB_CTRL2_EN_BIT (1 << 0) #define GLXIIC_SMB_CTRL3 0x06 typedef enum { GLXIIC_STATE_IDLE, GLXIIC_STATE_SLAVE_TX, GLXIIC_STATE_SLAVE_RX, GLXIIC_STATE_MASTER_ADDR, GLXIIC_STATE_MASTER_TX, GLXIIC_STATE_MASTER_RX, GLXIIC_STATE_MASTER_STOP, GLXIIC_STATE_MAX, } glxiic_state_t; struct glxiic_softc { device_t dev; /* Myself. */ device_t iicbus; /* IIC bus. */ struct mtx mtx; /* Lock. */ glxiic_state_t state; /* Driver state. */ struct callout callout; /* Driver state timeout callout. */ int timeout; /* Driver state timeout (ms). */ int smb_rid; /* SMB controller resource ID. */ struct resource *smb_res; /* SMB controller resource. */ int gpio_rid; /* GPIO resource ID. */ struct resource *gpio_res; /* GPIO resource. */ int irq_rid; /* IRQ resource ID. */ struct resource *irq_res; /* IRQ resource. */ void *irq_handler; /* IRQ handler cookie. */ int old_irq; /* IRQ mapped by board firmware. */ struct iic_msg *msg; /* Current master mode message. */ uint32_t nmsgs; /* Number of messages remaining. */ uint8_t *data; /* Current master mode data byte. */ uint16_t ndata; /* Number of data bytes remaining. */ int error; /* Last master mode error. */ uint8_t addr; /* Own address. */ uint16_t sclfrq; /* Bus frequency. */ }; #ifdef GLXIIC_DEBUG #define GLXIIC_DEBUG_LOG(fmt, args...) \ log(LOG_DEBUG, "%s: " fmt "\n" , __func__ , ## args) #else #define GLXIIC_DEBUG_LOG(fmt, args...) #endif #define GLXIIC_SCLFRQ(n) ((n << 1)) #define GLXIIC_SMBADDR(n) ((n >> 1)) #define GLXIIC_SMB_IRQ_TO_MAP(n) ((n << 16)) #define GLXIIC_MAP_TO_SMB_IRQ(n) ((n >> 16) & 0xf) #define GLXIIC_LOCK(_sc) mtx_lock(&_sc->mtx) #define GLXIIC_UNLOCK(_sc) mtx_unlock(&_sc->mtx) #define GLXIIC_LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "glxiic", MTX_DEF) #define GLXIIC_SLEEP(_sc) \ mtx_sleep(_sc, &_sc->mtx, IICPRI, "glxiic", 0) #define GLXIIC_WAKEUP(_sc) wakeup(_sc); #define GLXIIC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx); #define GLXIIC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED); typedef int (glxiic_state_callback_t)(struct glxiic_softc *sc, uint8_t status); static glxiic_state_callback_t glxiic_state_idle_callback; static glxiic_state_callback_t glxiic_state_slave_tx_callback; static glxiic_state_callback_t glxiic_state_slave_rx_callback; static glxiic_state_callback_t glxiic_state_master_addr_callback; static glxiic_state_callback_t glxiic_state_master_tx_callback; static glxiic_state_callback_t glxiic_state_master_rx_callback; static glxiic_state_callback_t glxiic_state_master_stop_callback; struct glxiic_state_table_entry { glxiic_state_callback_t *callback; boolean_t master; }; typedef struct glxiic_state_table_entry glxiic_state_table_entry_t; static glxiic_state_table_entry_t glxiic_state_table[GLXIIC_STATE_MAX] = { [GLXIIC_STATE_IDLE] = { .callback = &glxiic_state_idle_callback, .master = FALSE, }, [GLXIIC_STATE_SLAVE_TX] = { .callback = &glxiic_state_slave_tx_callback, .master = FALSE, }, [GLXIIC_STATE_SLAVE_RX] = { .callback = &glxiic_state_slave_rx_callback, .master = FALSE, }, [GLXIIC_STATE_MASTER_ADDR] = { .callback = &glxiic_state_master_addr_callback, .master = TRUE, }, [GLXIIC_STATE_MASTER_TX] = { .callback = &glxiic_state_master_tx_callback, .master = TRUE, }, [GLXIIC_STATE_MASTER_RX] = { .callback = &glxiic_state_master_rx_callback, .master = TRUE, }, [GLXIIC_STATE_MASTER_STOP] = { .callback = &glxiic_state_master_stop_callback, .master = TRUE, }, }; static void glxiic_identify(driver_t *driver, device_t parent); static int glxiic_probe(device_t dev); static int glxiic_attach(device_t dev); static int glxiic_detach(device_t dev); static uint8_t glxiic_read_status_locked(struct glxiic_softc *sc); static void glxiic_stop_locked(struct glxiic_softc *sc); static void glxiic_timeout(void *arg); static void glxiic_start_timeout_locked(struct glxiic_softc *sc); static void glxiic_set_state_locked(struct glxiic_softc *sc, glxiic_state_t state); static int glxiic_handle_slave_match_locked(struct glxiic_softc *sc, uint8_t status); static void glxiic_intr(void *arg); static int glxiic_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr); static int glxiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs); static void glxiic_smb_map_interrupt(int irq); static void glxiic_gpio_enable(struct glxiic_softc *sc); static void glxiic_gpio_disable(struct glxiic_softc *sc); static void glxiic_smb_enable(struct glxiic_softc *sc, uint8_t speed, uint8_t addr); static void glxiic_smb_disable(struct glxiic_softc *sc); static device_method_t glxiic_methods[] = { DEVMETHOD(device_identify, glxiic_identify), DEVMETHOD(device_probe, glxiic_probe), DEVMETHOD(device_attach, glxiic_attach), DEVMETHOD(device_detach, glxiic_detach), DEVMETHOD(iicbus_reset, glxiic_reset), DEVMETHOD(iicbus_transfer, glxiic_transfer), DEVMETHOD(iicbus_callback, iicbus_null_callback), { 0, 0 } }; static driver_t glxiic_driver = { "glxiic", glxiic_methods, sizeof(struct glxiic_softc), }; DRIVER_MODULE(glxiic, isab, glxiic_driver, 0, 0); DRIVER_MODULE(iicbus, glxiic, iicbus_driver, 0, 0); MODULE_DEPEND(glxiic, iicbus, 1, 1, 1); static void glxiic_identify(driver_t *driver, device_t parent) { /* Prevent child from being added more than once. */ if (device_find_child(parent, driver->name, -1) != NULL) return; if (pci_get_devid(parent) == GLXIIC_CS5536_DEV_ID) { if (device_add_child(parent, driver->name, -1) == NULL) device_printf(parent, "Could not add glxiic child\n"); } } static int glxiic_probe(device_t dev) { if (resource_disabled("glxiic", device_get_unit(dev))) return (ENXIO); device_set_desc(dev, "AMD Geode CS5536 SMBus controller"); return (BUS_PROBE_DEFAULT); } static int glxiic_attach(device_t dev) { struct glxiic_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; int error, irq, unit; uint32_t irq_map; sc = device_get_softc(dev); sc->dev = dev; sc->state = GLXIIC_STATE_IDLE; error = 0; GLXIIC_LOCK_INIT(sc); callout_init_mtx(&sc->callout, &sc->mtx, 0); sc->smb_rid = PCIR_BAR(0); sc->smb_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->smb_rid, RF_ACTIVE); if (sc->smb_res == NULL) { device_printf(dev, "Could not allocate SMBus I/O port\n"); error = ENXIO; goto out; } sc->gpio_rid = PCIR_BAR(1); sc->gpio_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->gpio_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->gpio_res == NULL) { device_printf(dev, "Could not allocate GPIO I/O port\n"); error = ENXIO; goto out; } /* Ensure the controller is not enabled by firmware. */ glxiic_smb_disable(sc); /* Read the existing IRQ map. */ irq_map = rdmsr(GLXIIC_MSR_PIC_YSEL_HIGH); sc->old_irq = GLXIIC_MAP_TO_SMB_IRQ(irq_map); unit = device_get_unit(dev); if (resource_int_value("glxiic", unit, "irq", &irq) == 0) { if (irq < 1 || irq > 15) { device_printf(dev, "Bad value %d for glxiic.%d.irq\n", irq, unit); error = ENXIO; goto out; } if (bootverbose) device_printf(dev, "Using irq %d set by hint\n", irq); } else if (sc->old_irq != 0) { if (bootverbose) device_printf(dev, "Using irq %d set by firmware\n", irq); irq = sc->old_irq; } else { device_printf(dev, "No irq mapped by firmware"); printf(" and no glxiic.%d.irq hint provided\n", unit); error = ENXIO; goto out; } /* Map the SMBus interrupt to the requested legacy IRQ. */ glxiic_smb_map_interrupt(irq); sc->irq_rid = 0; sc->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irq_rid, irq, irq, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Could not allocate IRQ %d\n", irq); error = ENXIO; goto out; } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, glxiic_intr, sc, &(sc->irq_handler)); if (error != 0) { device_printf(dev, "Could not setup IRQ handler\n"); error = ENXIO; goto out; } if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL) { device_printf(dev, "Could not allocate iicbus instance\n"); error = ENXIO; goto out; } ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); sc->timeout = GLXIIC_DEFAULT_TIMEOUT; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "timeout", CTLFLAG_RWTUN, &sc->timeout, 0, "activity timeout in ms"); glxiic_gpio_enable(sc); glxiic_smb_enable(sc, IIC_FASTEST, 0); /* Probe and attach the iicbus when interrupts are available. */ bus_delayed_attach_children(dev); out: if (error != 0) { callout_drain(&sc->callout); if (sc->iicbus != NULL) device_delete_child(dev, sc->iicbus); if (sc->smb_res != NULL) { glxiic_smb_disable(sc); bus_release_resource(dev, SYS_RES_IOPORT, sc->smb_rid, sc->smb_res); } if (sc->gpio_res != NULL) { glxiic_gpio_disable(sc); bus_release_resource(dev, SYS_RES_IOPORT, sc->gpio_rid, sc->gpio_res); } if (sc->irq_handler != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_handler); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); /* Restore the old SMBus interrupt mapping. */ glxiic_smb_map_interrupt(sc->old_irq); GLXIIC_LOCK_DESTROY(sc); } return (error); } static int glxiic_detach(device_t dev) { struct glxiic_softc *sc; int error; sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error != 0) - goto out; - if (sc->iicbus != NULL) - error = device_delete_child(dev, sc->iicbus); + return (error); -out: callout_drain(&sc->callout); if (sc->smb_res != NULL) { glxiic_smb_disable(sc); bus_release_resource(dev, SYS_RES_IOPORT, sc->smb_rid, sc->smb_res); } if (sc->gpio_res != NULL) { glxiic_gpio_disable(sc); bus_release_resource(dev, SYS_RES_IOPORT, sc->gpio_rid, sc->gpio_res); } if (sc->irq_handler != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_handler); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); /* Restore the old SMBus interrupt mapping. */ glxiic_smb_map_interrupt(sc->old_irq); GLXIIC_LOCK_DESTROY(sc); - return (error); + return (0); } static uint8_t glxiic_read_status_locked(struct glxiic_softc *sc) { uint8_t status; GLXIIC_ASSERT_LOCKED(sc); status = bus_read_1(sc->smb_res, GLXIIC_SMB_STS); /* Clear all status flags except SDAST and STASTR after reading. */ bus_write_1(sc->smb_res, GLXIIC_SMB_STS, (GLXIIC_SMB_STS_SLVSTP_BIT | GLXIIC_SMB_STS_BER_BIT | GLXIIC_SMB_STS_NEGACK_BIT | GLXIIC_SMB_STS_NMATCH_BIT)); return (status); } static void glxiic_stop_locked(struct glxiic_softc *sc) { uint8_t status, ctrl1; GLXIIC_ASSERT_LOCKED(sc); status = glxiic_read_status_locked(sc); ctrl1 = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL1); bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_STOP_BIT); /* * Perform a dummy read of SDA in master receive mode to clear * SDAST if set. */ if ((status & GLXIIC_SMB_STS_XMIT_BIT) == 0 && (status & GLXIIC_SMB_STS_SDAST_BIT) != 0) bus_read_1(sc->smb_res, GLXIIC_SMB_SDA); /* Check stall after start bit and clear if needed */ if ((status & GLXIIC_SMB_STS_STASTR_BIT) != 0) { bus_write_1(sc->smb_res, GLXIIC_SMB_STS, GLXIIC_SMB_STS_STASTR_BIT); } } static void glxiic_timeout(void *arg) { struct glxiic_softc *sc; uint8_t error; sc = (struct glxiic_softc *)arg; GLXIIC_DEBUG_LOG("timeout in state %d", sc->state); if (glxiic_state_table[sc->state].master) { sc->error = IIC_ETIMEOUT; GLXIIC_WAKEUP(sc); } else { error = IIC_ETIMEOUT; iicbus_intr(sc->iicbus, INTR_ERROR, &error); } glxiic_smb_disable(sc); glxiic_smb_enable(sc, IIC_UNKNOWN, sc->addr); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); } static void glxiic_start_timeout_locked(struct glxiic_softc *sc) { GLXIIC_ASSERT_LOCKED(sc); callout_reset_sbt(&sc->callout, SBT_1MS * sc->timeout, 0, glxiic_timeout, sc, 0); } static void glxiic_set_state_locked(struct glxiic_softc *sc, glxiic_state_t state) { GLXIIC_ASSERT_LOCKED(sc); if (state == GLXIIC_STATE_IDLE) callout_stop(&sc->callout); else if (sc->timeout > 0) glxiic_start_timeout_locked(sc); sc->state = state; } static int glxiic_handle_slave_match_locked(struct glxiic_softc *sc, uint8_t status) { uint8_t ctrl_sts, addr; GLXIIC_ASSERT_LOCKED(sc); ctrl_sts = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL_STS); if ((ctrl_sts & GLXIIC_SMB_CTRL_STS_MATCH_BIT) != 0) { if ((status & GLXIIC_SMB_STS_XMIT_BIT) != 0) { addr = sc->addr | LSB; glxiic_set_state_locked(sc, GLXIIC_STATE_SLAVE_TX); } else { addr = sc->addr & ~LSB; glxiic_set_state_locked(sc, GLXIIC_STATE_SLAVE_RX); } iicbus_intr(sc->iicbus, INTR_START, &addr); } else if ((ctrl_sts & GLXIIC_SMB_CTRL_STS_GCMTCH_BIT) != 0) { addr = 0; glxiic_set_state_locked(sc, GLXIIC_STATE_SLAVE_RX); iicbus_intr(sc->iicbus, INTR_GENERAL, &addr); } else { GLXIIC_DEBUG_LOG("unknown slave match"); return (IIC_ESTATUS); } return (IIC_NOERR); } static int glxiic_state_idle_callback(struct glxiic_softc *sc, uint8_t status) { GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in idle"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_NMATCH_BIT) != 0) { return (glxiic_handle_slave_match_locked(sc, status)); } return (IIC_NOERR); } static int glxiic_state_slave_tx_callback(struct glxiic_softc *sc, uint8_t status) { uint8_t data; GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in slave tx"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_SLVSTP_BIT) != 0) { iicbus_intr(sc->iicbus, INTR_STOP, NULL); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); return (IIC_NOERR); } if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) { iicbus_intr(sc->iicbus, INTR_NOACK, NULL); return (IIC_NOERR); } if ((status & GLXIIC_SMB_STS_NMATCH_BIT) != 0) { /* Handle repeated start in slave mode. */ return (glxiic_handle_slave_match_locked(sc, status)); } if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) { GLXIIC_DEBUG_LOG("not awaiting data in slave tx"); return (IIC_ESTATUS); } iicbus_intr(sc->iicbus, INTR_TRANSMIT, &data); bus_write_1(sc->smb_res, GLXIIC_SMB_SDA, data); glxiic_start_timeout_locked(sc); return (IIC_NOERR); } static int glxiic_state_slave_rx_callback(struct glxiic_softc *sc, uint8_t status) { uint8_t data; GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in slave rx"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_SLVSTP_BIT) != 0) { iicbus_intr(sc->iicbus, INTR_STOP, NULL); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); return (IIC_NOERR); } if ((status & GLXIIC_SMB_STS_NMATCH_BIT) != 0) { /* Handle repeated start in slave mode. */ return (glxiic_handle_slave_match_locked(sc, status)); } if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) { GLXIIC_DEBUG_LOG("no pending data in slave rx"); return (IIC_ESTATUS); } data = bus_read_1(sc->smb_res, GLXIIC_SMB_SDA); iicbus_intr(sc->iicbus, INTR_RECEIVE, &data); glxiic_start_timeout_locked(sc); return (IIC_NOERR); } static int glxiic_state_master_addr_callback(struct glxiic_softc *sc, uint8_t status) { uint8_t slave; uint8_t ctrl1; GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error after master start"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) { GLXIIC_DEBUG_LOG("not bus master after master start"); return (IIC_ESTATUS); } if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) { GLXIIC_DEBUG_LOG("not awaiting address in master addr"); return (IIC_ESTATUS); } if ((sc->msg->flags & IIC_M_RD) != 0) { slave = sc->msg->slave | LSB; glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_RX); } else { slave = sc->msg->slave & ~LSB; glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_TX); } sc->data = sc->msg->buf; sc->ndata = sc->msg->len; /* Handle address-only transfer. */ if (sc->ndata == 0) glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_STOP); bus_write_1(sc->smb_res, GLXIIC_SMB_SDA, slave); if ((sc->msg->flags & IIC_M_RD) != 0 && sc->ndata == 1) { /* Last byte from slave, set NACK. */ ctrl1 = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL1); bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_ACK_BIT); } return (IIC_NOERR); } static int glxiic_state_master_tx_callback(struct glxiic_softc *sc, uint8_t status) { GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in master tx"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) { GLXIIC_DEBUG_LOG("not bus master in master tx"); return (IIC_ESTATUS); } if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) { GLXIIC_DEBUG_LOG("slave nack in master tx"); return (IIC_ENOACK); } if ((status & GLXIIC_SMB_STS_STASTR_BIT) != 0) { bus_write_1(sc->smb_res, GLXIIC_SMB_STS, GLXIIC_SMB_STS_STASTR_BIT); } if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) { GLXIIC_DEBUG_LOG("not awaiting data in master tx"); return (IIC_ESTATUS); } bus_write_1(sc->smb_res, GLXIIC_SMB_SDA, *sc->data++); if (--sc->ndata == 0) glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_STOP); else glxiic_start_timeout_locked(sc); return (IIC_NOERR); } static int glxiic_state_master_rx_callback(struct glxiic_softc *sc, uint8_t status) { uint8_t ctrl1; GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in master rx"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) { GLXIIC_DEBUG_LOG("not bus master in master rx"); return (IIC_ESTATUS); } if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) { GLXIIC_DEBUG_LOG("slave nack in rx"); return (IIC_ENOACK); } if ((status & GLXIIC_SMB_STS_STASTR_BIT) != 0) { /* Bus is stalled, clear and wait for data. */ bus_write_1(sc->smb_res, GLXIIC_SMB_STS, GLXIIC_SMB_STS_STASTR_BIT); return (IIC_NOERR); } if ((status & GLXIIC_SMB_STS_SDAST_BIT) == 0) { GLXIIC_DEBUG_LOG("no pending data in master rx"); return (IIC_ESTATUS); } *sc->data++ = bus_read_1(sc->smb_res, GLXIIC_SMB_SDA); if (--sc->ndata == 0) { /* Proceed with stop on reading last byte. */ glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_STOP); return (glxiic_state_table[sc->state].callback(sc, status)); } if (sc->ndata == 1) { /* Last byte from slave, set NACK. */ ctrl1 = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL1); bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_ACK_BIT); } glxiic_start_timeout_locked(sc); return (IIC_NOERR); } static int glxiic_state_master_stop_callback(struct glxiic_softc *sc, uint8_t status) { uint8_t ctrl1; GLXIIC_ASSERT_LOCKED(sc); if ((status & GLXIIC_SMB_STS_BER_BIT) != 0) { GLXIIC_DEBUG_LOG("bus error in master stop"); return (IIC_EBUSERR); } if ((status & GLXIIC_SMB_STS_MASTER_BIT) == 0) { GLXIIC_DEBUG_LOG("not bus master in master stop"); return (IIC_ESTATUS); } if ((status & GLXIIC_SMB_STS_NEGACK_BIT) != 0) { GLXIIC_DEBUG_LOG("slave nack in master stop"); return (IIC_ENOACK); } if (--sc->nmsgs > 0) { /* Start transfer of next message. */ if ((sc->msg->flags & IIC_M_NOSTOP) == 0) { glxiic_stop_locked(sc); } ctrl1 = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL1); bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_START_BIT); glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_ADDR); sc->msg++; } else { /* Last message. */ glxiic_stop_locked(sc); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); sc->error = IIC_NOERR; GLXIIC_WAKEUP(sc); } return (IIC_NOERR); } static void glxiic_intr(void *arg) { struct glxiic_softc *sc; int error; uint8_t status, data; sc = (struct glxiic_softc *)arg; GLXIIC_LOCK(sc); status = glxiic_read_status_locked(sc); /* Check if this interrupt originated from the SMBus. */ if ((status & ~(GLXIIC_SMB_STS_MASTER_BIT | GLXIIC_SMB_STS_XMIT_BIT)) != 0) { error = glxiic_state_table[sc->state].callback(sc, status); if (error != IIC_NOERR) { if (glxiic_state_table[sc->state].master) { glxiic_stop_locked(sc); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); sc->error = error; GLXIIC_WAKEUP(sc); } else { data = error & 0xff; iicbus_intr(sc->iicbus, INTR_ERROR, &data); glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); } } } GLXIIC_UNLOCK(sc); } static int glxiic_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct glxiic_softc *sc; sc = device_get_softc(dev); GLXIIC_LOCK(sc); if (oldaddr != NULL) *oldaddr = sc->addr; sc->addr = addr; /* A disable/enable cycle resets the controller. */ glxiic_smb_disable(sc); glxiic_smb_enable(sc, speed, addr); if (glxiic_state_table[sc->state].master) { sc->error = IIC_ESTATUS; GLXIIC_WAKEUP(sc); } glxiic_set_state_locked(sc, GLXIIC_STATE_IDLE); GLXIIC_UNLOCK(sc); return (IIC_NOERR); } static int glxiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct glxiic_softc *sc; int error; uint8_t ctrl1; sc = device_get_softc(dev); GLXIIC_LOCK(sc); if (sc->state != GLXIIC_STATE_IDLE) { error = IIC_EBUSBSY; goto out; } sc->msg = msgs; sc->nmsgs = nmsgs; glxiic_set_state_locked(sc, GLXIIC_STATE_MASTER_ADDR); /* Set start bit and let glxiic_intr() handle the transfer. */ ctrl1 = bus_read_1(sc->smb_res, GLXIIC_SMB_CTRL1); bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_START_BIT); GLXIIC_SLEEP(sc); error = sc->error; out: GLXIIC_UNLOCK(sc); return (error); } static void glxiic_smb_map_interrupt(int irq) { uint32_t irq_map; int old_irq; /* Protect the read-modify-write operation. */ critical_enter(); irq_map = rdmsr(GLXIIC_MSR_PIC_YSEL_HIGH); old_irq = GLXIIC_MAP_TO_SMB_IRQ(irq_map); if (irq != old_irq) { irq_map &= ~GLXIIC_SMB_IRQ_TO_MAP(old_irq); irq_map |= GLXIIC_SMB_IRQ_TO_MAP(irq); wrmsr(GLXIIC_MSR_PIC_YSEL_HIGH, irq_map); } critical_exit(); } static void glxiic_gpio_enable(struct glxiic_softc *sc) { bus_write_4(sc->gpio_res, GLXIIC_GPIOL_IN_AUX1_SEL, GLXIIC_GPIO_14_15_ENABLE); bus_write_4(sc->gpio_res, GLXIIC_GPIOL_OUT_AUX1_SEL, GLXIIC_GPIO_14_15_ENABLE); } static void glxiic_gpio_disable(struct glxiic_softc *sc) { bus_write_4(sc->gpio_res, GLXIIC_GPIOL_OUT_AUX1_SEL, GLXIIC_GPIO_14_15_DISABLE); bus_write_4(sc->gpio_res, GLXIIC_GPIOL_IN_AUX1_SEL, GLXIIC_GPIO_14_15_DISABLE); } static void glxiic_smb_enable(struct glxiic_softc *sc, uint8_t speed, uint8_t addr) { uint8_t ctrl1; ctrl1 = 0; switch (speed) { case IIC_SLOW: sc->sclfrq = GLXIIC_SLOW; break; case IIC_FAST: sc->sclfrq = GLXIIC_FAST; break; case IIC_FASTEST: sc->sclfrq = GLXIIC_FASTEST; break; case IIC_UNKNOWN: default: /* Reuse last frequency. */ break; } /* Set bus speed and enable controller. */ bus_write_2(sc->smb_res, GLXIIC_SMB_CTRL2, GLXIIC_SCLFRQ(sc->sclfrq) | GLXIIC_SMB_CTRL2_EN_BIT); if (addr != 0) { /* Enable new match and global call match interrupts. */ ctrl1 |= GLXIIC_SMB_CTRL1_NMINTE_BIT | GLXIIC_SMB_CTRL1_GCMEN_BIT; bus_write_1(sc->smb_res, GLXIIC_SMB_ADDR, GLXIIC_SMB_ADDR_SAEN_BIT | GLXIIC_SMBADDR(addr)); } else { bus_write_1(sc->smb_res, GLXIIC_SMB_ADDR, 0); } /* Enable stall after start and interrupt. */ bus_write_1(sc->smb_res, GLXIIC_SMB_CTRL1, ctrl1 | GLXIIC_SMB_CTRL1_STASTRE_BIT | GLXIIC_SMB_CTRL1_INTEN_BIT); } static void glxiic_smb_disable(struct glxiic_softc *sc) { uint16_t sclfrq; sclfrq = bus_read_2(sc->smb_res, GLXIIC_SMB_CTRL2); bus_write_2(sc->smb_res, GLXIIC_SMB_CTRL2, sclfrq & ~GLXIIC_SMB_CTRL2_EN_BIT); } diff --git a/sys/dev/ichiic/ig4_iic.c b/sys/dev/ichiic/ig4_iic.c index 806b406af326..c9346ff12350 100644 --- a/sys/dev/ichiic/ig4_iic.c +++ b/sys/dev/ichiic/ig4_iic.c @@ -1,1228 +1,1224 @@ /* * Copyright (c) 2014 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon and was subsequently ported * to FreeBSD by Michael Gmelin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Intel fourth generation mobile cpus integrated I2C device. * * See ig4_reg.h for datasheet reference and notes. * See ig4_var.h for locking semantics. */ #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #include #endif #include #include #include #include #define DO_POLL(sc) (cold || kdb_active || SCHEDULER_STOPPED() || sc->poll) /* * tLOW, tHIGH periods of the SCL clock and maximal falling time of both * lines are taken from I2C specifications. */ #define IG4_SPEED_STD_THIGH 4000 /* nsec */ #define IG4_SPEED_STD_TLOW 4700 /* nsec */ #define IG4_SPEED_STD_TF_MAX 300 /* nsec */ #define IG4_SPEED_FAST_THIGH 600 /* nsec */ #define IG4_SPEED_FAST_TLOW 1300 /* nsec */ #define IG4_SPEED_FAST_TF_MAX 300 /* nsec */ /* * Ig4 hardware parameters except Haswell are taken from intel_lpss driver */ static const struct ig4_hw ig4iic_hw[] = { [IG4_EMAG] = { .ic_clock_rate = 100, /* MHz */ }, [IG4_HASWELL] = { .ic_clock_rate = 100, /* MHz */ .sda_hold_time = 90, /* nsec */ .txfifo_depth = 32, .rxfifo_depth = 32, }, [IG4_ATOM] = { .ic_clock_rate = 100, .sda_fall_time = 280, .scl_fall_time = 240, .sda_hold_time = 60, .txfifo_depth = 32, .rxfifo_depth = 32, }, [IG4_SKYLAKE] = { .ic_clock_rate = 120, .sda_hold_time = 230, }, [IG4_APL] = { .ic_clock_rate = 133, .sda_fall_time = 171, .scl_fall_time = 208, .sda_hold_time = 207, }, [IG4_CANNONLAKE] = { .ic_clock_rate = 216, .sda_hold_time = 230, }, [IG4_TIGERLAKE] = { .ic_clock_rate = 133, .sda_fall_time = 171, .scl_fall_time = 208, .sda_hold_time = 42, }, [IG4_GEMINILAKE] = { .ic_clock_rate = 133, .sda_fall_time = 171, .scl_fall_time = 290, .sda_hold_time = 313, }, }; static int ig4iic_set_config(ig4iic_softc_t *sc, bool reset); static driver_filter_t ig4iic_intr; static void ig4iic_dump(ig4iic_softc_t *sc); static int ig4_dump; SYSCTL_INT(_debug, OID_AUTO, ig4_dump, CTLFLAG_RW, &ig4_dump, 0, "Dump controller registers"); /* * Clock registers initialization control * 0 - Try read clock registers from ACPI and fallback to p.1. * 1 - Calculate values based on controller type (IC clock rate). * 2 - Use values inherited from DragonflyBSD driver (old behavior). * 3 - Keep clock registers intact. */ static int ig4_timings; SYSCTL_INT(_debug, OID_AUTO, ig4_timings, CTLFLAG_RDTUN, &ig4_timings, 0, "Controller timings 0=ACPI, 1=predefined, 2=legacy, 3=do not change"); /* * Low-level inline support functions */ static __inline void reg_write(ig4iic_softc_t *sc, uint32_t reg, uint32_t value) { bus_write_4(sc->regs_res, reg, value); bus_barrier(sc->regs_res, reg, 4, BUS_SPACE_BARRIER_WRITE); } static __inline uint32_t reg_read(ig4iic_softc_t *sc, uint32_t reg) { uint32_t value; bus_barrier(sc->regs_res, reg, 4, BUS_SPACE_BARRIER_READ); value = bus_read_4(sc->regs_res, reg); return (value); } static void ig4iic_set_intr_mask(ig4iic_softc_t *sc, uint32_t val) { if (sc->intr_mask != val) { reg_write(sc, IG4_REG_INTR_MASK, val); sc->intr_mask = val; } } static int intrstat2iic(ig4iic_softc_t *sc, uint32_t val) { uint32_t src; if (val & IG4_INTR_RX_UNDER) reg_read(sc, IG4_REG_CLR_RX_UNDER); if (val & IG4_INTR_RX_OVER) reg_read(sc, IG4_REG_CLR_RX_OVER); if (val & IG4_INTR_TX_OVER) reg_read(sc, IG4_REG_CLR_TX_OVER); if (val & IG4_INTR_TX_ABRT) { src = reg_read(sc, IG4_REG_TX_ABRT_SOURCE); reg_read(sc, IG4_REG_CLR_TX_ABORT); /* User-requested abort. Not really a error */ if (src & IG4_ABRTSRC_TRANSFER) return (IIC_ESTATUS); /* Master has lost arbitration */ if (src & IG4_ABRTSRC_ARBLOST) return (IIC_EBUSBSY); /* Did not receive an acknowledge from the remote slave */ if (src & (IG4_ABRTSRC_TXNOACK_ADDR7 | IG4_ABRTSRC_TXNOACK_ADDR10_1 | IG4_ABRTSRC_TXNOACK_ADDR10_2 | IG4_ABRTSRC_TXNOACK_DATA | IG4_ABRTSRC_GENCALL_NOACK)) return (IIC_ENOACK); /* Programming errors */ if (src & (IG4_ABRTSRC_GENCALL_READ | IG4_ABRTSRC_NORESTART_START | IG4_ABRTSRC_NORESTART_10)) return (IIC_ENOTSUPP); /* Other errors */ if (src & IG4_ABRTSRC_ACKED_START) return (IIC_EBUSERR); } /* * TX_OVER, RX_OVER and RX_UNDER are caused by wrong RX/TX FIFO depth * detection or driver's read/write pipelining errors. */ if (val & (IG4_INTR_TX_OVER | IG4_INTR_RX_OVER)) return (IIC_EOVERFLOW); if (val & IG4_INTR_RX_UNDER) return (IIC_EUNDERFLOW); return (IIC_NOERR); } /* * Enable or disable the controller and wait for the controller to acknowledge * the state change. */ static int set_controller(ig4iic_softc_t *sc, uint32_t ctl) { int retry; int error; uint32_t v; /* * When the controller is enabled, interrupt on STOP detect * or receive character ready and clear pending interrupts. */ ig4iic_set_intr_mask(sc, 0); if (ctl & IG4_I2C_ENABLE) reg_read(sc, IG4_REG_CLR_INTR); reg_write(sc, IG4_REG_I2C_EN, ctl); error = IIC_ETIMEOUT; for (retry = 100; retry > 0; --retry) { v = reg_read(sc, IG4_REG_ENABLE_STATUS); if (((v ^ ctl) & IG4_I2C_ENABLE) == 0) { error = 0; break; } pause("i2cslv", 1); } return (error); } /* * Wait up to 25ms for the requested interrupt using a 25uS polling loop. */ static int wait_intr(ig4iic_softc_t *sc, uint32_t intr) { uint32_t v; int error; int txlvl = -1; u_int count_us = 0; u_int limit_us = 1000000; /* 1sec */ for (;;) { /* * Check requested status */ v = reg_read(sc, IG4_REG_RAW_INTR_STAT); error = intrstat2iic(sc, v & IG4_INTR_ERR_MASK); if (error || (v & intr)) break; /* * When waiting for the transmit FIFO to become empty, * reset the timeout if we see a change in the transmit * FIFO level as progress is being made. */ if (intr & (IG4_INTR_TX_EMPTY | IG4_INTR_STOP_DET)) { v = reg_read(sc, IG4_REG_TXFLR) & IG4_FIFOLVL_MASK; if (txlvl != v) { txlvl = v; count_us = 0; } } /* * Stop if we've run out of time. */ if (count_us >= limit_us) { error = IIC_ETIMEOUT; break; } /* * When polling is not requested let the interrupt do its work. */ if (!DO_POLL(sc)) { mtx_lock_spin(&sc->io_lock); ig4iic_set_intr_mask(sc, intr | IG4_INTR_ERR_MASK); msleep_spin(sc, &sc->io_lock, "i2cwait", (hz + 99) / 100); /* sleep up to 10ms */ ig4iic_set_intr_mask(sc, 0); mtx_unlock_spin(&sc->io_lock); count_us += 10000; } else { DELAY(25); count_us += 25; } } return (error); } /* * Set the slave address. The controller must be disabled when * changing the address. * * This operation does not issue anything to the I2C bus but sets * the target address for when the controller later issues a START. */ static void set_slave_addr(ig4iic_softc_t *sc, uint8_t slave) { uint32_t tar; uint32_t ctl; bool use_10bit; use_10bit = false; if (sc->slave_valid && sc->last_slave == slave && sc->use_10bit == use_10bit) { return; } sc->use_10bit = use_10bit; /* * Wait for TXFIFO to drain before disabling the controller. */ reg_write(sc, IG4_REG_TX_TL, 0); wait_intr(sc, IG4_INTR_TX_EMPTY); set_controller(sc, 0); ctl = reg_read(sc, IG4_REG_CTL); ctl &= ~IG4_CTL_10BIT; ctl |= IG4_CTL_RESTARTEN; tar = slave; if (sc->use_10bit) { tar |= IG4_TAR_10BIT; ctl |= IG4_CTL_10BIT; } reg_write(sc, IG4_REG_CTL, ctl); reg_write(sc, IG4_REG_TAR_ADD, tar); set_controller(sc, IG4_I2C_ENABLE); sc->slave_valid = true; sc->last_slave = slave; } /* * IICBUS API FUNCTIONS */ static int ig4iic_xfer_start(ig4iic_softc_t *sc, uint16_t slave, bool repeated_start) { set_slave_addr(sc, slave >> 1); if (!repeated_start) { /* * Clear any previous TX/RX FIFOs overflow/underflow bits * and I2C bus STOP condition. */ reg_read(sc, IG4_REG_CLR_INTR); } return (0); } static bool ig4iic_xfer_is_started(ig4iic_softc_t *sc) { /* * It requires that no IG4_REG_CLR_INTR or IG4_REG_CLR_START/STOP_DET * register reads is issued after START condition. */ return ((reg_read(sc, IG4_REG_RAW_INTR_STAT) & (IG4_INTR_START_DET | IG4_INTR_STOP_DET)) == IG4_INTR_START_DET); } static int ig4iic_xfer_abort(ig4iic_softc_t *sc) { int error; /* Request send of STOP condition and flush of TX FIFO */ set_controller(sc, IG4_I2C_ABORT | IG4_I2C_ENABLE); /* * Wait for the TX_ABRT interrupt with ABRTSRC_TRANSFER * bit set in TX_ABRT_SOURCE register. */ error = wait_intr(sc, IG4_INTR_STOP_DET); set_controller(sc, IG4_I2C_ENABLE); return (error == IIC_ESTATUS ? 0 : error); } /* * Amount of unread data before next burst to get better I2C bus utilization. * 2 bytes is enough in FAST mode. 8 bytes is better in FAST+ and HIGH modes. * Intel-recommended value is 16 for DMA transfers with 64-byte depth FIFOs. */ #define IG4_FIFO_LOWAT 2 static int ig4iic_read(ig4iic_softc_t *sc, uint8_t *buf, uint16_t len, bool repeated_start, bool stop) { uint32_t cmd; int requested = 0; int received = 0; int burst, target, lowat = 0; int error; if (len == 0) return (0); while (received < len) { /* Ensure we have some free space in TXFIFO */ burst = sc->cfg.txfifo_depth - (reg_read(sc, IG4_REG_TXFLR) & IG4_FIFOLVL_MASK); if (burst <= 0) { reg_write(sc, IG4_REG_TX_TL, IG4_FIFO_LOWAT); error = wait_intr(sc, IG4_INTR_TX_EMPTY); if (error) break; burst = sc->cfg.txfifo_depth - (reg_read(sc, IG4_REG_TXFLR) & IG4_FIFOLVL_MASK); } /* Ensure we have enough free space in RXFIFO */ burst = MIN(burst, sc->cfg.rxfifo_depth - (requested - received)); target = MIN(requested + burst, (int)len); while (requested < target) { cmd = IG4_DATA_COMMAND_RD; if (repeated_start && requested == 0) cmd |= IG4_DATA_RESTART; if (stop && requested == len - 1) cmd |= IG4_DATA_STOP; reg_write(sc, IG4_REG_DATA_CMD, cmd); requested++; } /* Leave some data queued to maintain the hardware pipeline */ lowat = 0; if (requested != len && requested - received > IG4_FIFO_LOWAT) lowat = IG4_FIFO_LOWAT; /* After TXFLR fills up, clear it by reading available data */ while (received < requested - lowat) { burst = MIN(requested - received, reg_read(sc, IG4_REG_RXFLR) & IG4_FIFOLVL_MASK); if (burst > 0) { while (burst--) buf[received++] = 0xFF & reg_read(sc, IG4_REG_DATA_CMD); } else { reg_write(sc, IG4_REG_RX_TL, requested - received - lowat - 1); error = wait_intr(sc, IG4_INTR_RX_FULL); if (error) goto out; } } } out: return (error); } static int ig4iic_write(ig4iic_softc_t *sc, uint8_t *buf, uint16_t len, bool repeated_start, bool stop) { uint32_t cmd; int sent = 0; int burst, target; int error, lowat; if (len == 0) return (0); while (sent < len) { burst = sc->cfg.txfifo_depth - (reg_read(sc, IG4_REG_TXFLR) & IG4_FIFOLVL_MASK); target = MIN(sent + burst, (int)len); while (sent < target) { cmd = buf[sent]; if (repeated_start && sent == 0) cmd |= IG4_DATA_RESTART; if (stop && sent == len - 1) cmd |= IG4_DATA_STOP; reg_write(sc, IG4_REG_DATA_CMD, cmd); sent++; } if (sent < len) { if (len - sent <= sc->cfg.txfifo_depth) lowat = sc->cfg.txfifo_depth - (len - sent); else lowat = IG4_FIFO_LOWAT; reg_write(sc, IG4_REG_TX_TL, lowat); error = wait_intr(sc, IG4_INTR_TX_EMPTY); if (error) break; } } return (error); } int ig4iic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { ig4iic_softc_t *sc = device_get_softc(dev); const char *reason = NULL; uint32_t i; int error; int unit; bool rpstart; bool stop; bool allocated; /* * The hardware interface imposes limits on allowed I2C messages. * It is not possible to explicitly send a start or stop. * They are automatically sent (or not sent, depending on the * configuration) when a data byte is transferred. * For this reason it's impossible to send a message with no data * at all (like an SMBus quick message). * The start condition is automatically generated after the stop * condition, so it's impossible to not have a start after a stop. * The repeated start condition is automatically sent if a change * of the transfer direction happens, so it's impossible to have * a change of direction without a (repeated) start. * The repeated start can be forced even without the change of * direction. * Changing the target slave address requires resetting the hardware * state, so it's impossible to do that without the stop followed * by the start. */ for (i = 0; i < nmsgs; i++) { #if 0 if (i == 0 && (msgs[i].flags & IIC_M_NOSTART) != 0) { reason = "first message without start"; break; } if (i == nmsgs - 1 && (msgs[i].flags & IIC_M_NOSTOP) != 0) { reason = "last message without stop"; break; } #endif if (msgs[i].len == 0) { reason = "message with no data"; break; } if (i > 0) { if ((msgs[i].flags & IIC_M_NOSTART) != 0 && (msgs[i - 1].flags & IIC_M_NOSTOP) == 0) { reason = "stop not followed by start"; break; } if ((msgs[i - 1].flags & IIC_M_NOSTOP) != 0 && msgs[i].slave != msgs[i - 1].slave) { reason = "change of slave without stop"; break; } if ((msgs[i].flags & IIC_M_NOSTART) != 0 && (msgs[i].flags & IIC_M_RD) != (msgs[i - 1].flags & IIC_M_RD)) { reason = "change of direction without repeated" " start"; break; } } } if (reason != NULL) { if (bootverbose) device_printf(dev, "%s\n", reason); return (IIC_ENOTSUPP); } /* Check if device is already allocated with iicbus_request_bus() */ allocated = sx_xlocked(&sc->call_lock) != 0; if (!allocated) sx_xlock(&sc->call_lock); /* Debugging - dump registers. */ if (ig4_dump) { unit = device_get_unit(dev); if (ig4_dump & (1 << unit)) { ig4_dump &= ~(1 << unit); ig4iic_dump(sc); } } /* * Clear any previous abort condition that may have been holding * the txfifo in reset. */ reg_read(sc, IG4_REG_CLR_TX_ABORT); rpstart = false; error = 0; for (i = 0; i < nmsgs; i++) { if ((msgs[i].flags & IIC_M_NOSTART) == 0) { error = ig4iic_xfer_start(sc, msgs[i].slave, rpstart); } else { if (!sc->slave_valid || (msgs[i].slave >> 1) != sc->last_slave) { device_printf(dev, "start condition suppressed" "but slave address is not set up"); error = EINVAL; break; } rpstart = false; } if (error != 0) break; stop = (msgs[i].flags & IIC_M_NOSTOP) == 0; if (msgs[i].flags & IIC_M_RD) error = ig4iic_read(sc, msgs[i].buf, msgs[i].len, rpstart, stop); else error = ig4iic_write(sc, msgs[i].buf, msgs[i].len, rpstart, stop); /* Wait for error or stop condition occurred on the I2C bus */ if (stop && error == 0) { error = wait_intr(sc, IG4_INTR_STOP_DET); if (error == 0) reg_read(sc, IG4_REG_CLR_INTR); } if (error != 0) { /* * Send STOP condition if it's not done yet and flush * both FIFOs. Do a controller soft reset if transfer * abort is failed. */ if (ig4iic_xfer_is_started(sc) && ig4iic_xfer_abort(sc) != 0) { device_printf(sc->dev, "Failed to abort " "transfer. Do the controller reset.\n"); ig4iic_set_config(sc, true); } else { while (reg_read(sc, IG4_REG_I2C_STA) & IG4_STATUS_RX_NOTEMPTY) reg_read(sc, IG4_REG_DATA_CMD); reg_read(sc, IG4_REG_TX_ABRT_SOURCE); reg_read(sc, IG4_REG_CLR_INTR); } break; } rpstart = !stop; } if (error == IIC_ENOACK && bootverbose) device_printf(dev, "Warning: NACK for slave address 0x%x\n", msgs[i].slave >> 1); if (!allocated) sx_unlock(&sc->call_lock); return (error); } int ig4iic_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { ig4iic_softc_t *sc = device_get_softc(dev); bool allocated; allocated = sx_xlocked(&sc->call_lock) != 0; if (!allocated) sx_xlock(&sc->call_lock); /* TODO handle speed configuration? */ if (oldaddr != NULL) *oldaddr = sc->last_slave << 1; set_slave_addr(sc, addr >> 1); if (addr == IIC_UNKNOWN) sc->slave_valid = false; if (!allocated) sx_unlock(&sc->call_lock); return (0); } int ig4iic_callback(device_t dev, int index, caddr_t data) { ig4iic_softc_t *sc = device_get_softc(dev); int error = 0; int how; switch (index) { case IIC_REQUEST_BUS: /* force polling if ig4iic is requested with IIC_DONTWAIT */ how = *(int *)data; if ((how & IIC_WAIT) == 0) { if (sx_try_xlock(&sc->call_lock) == 0) error = IIC_EBUSBSY; else sc->poll = true; } else sx_xlock(&sc->call_lock); break; case IIC_RELEASE_BUS: sc->poll = false; sx_unlock(&sc->call_lock); break; default: error = errno2iic(EINVAL); } return (error); } /* * Clock register values can be calculated with following rough equations: * SCL_HCNT = ceil(IC clock rate * tHIGH) * SCL_LCNT = ceil(IC clock rate * tLOW) * SDA_HOLD = ceil(IC clock rate * SDA hold time) * Precise equations take signal's falling, rising and spike suppression * times in to account. They can be found in Synopsys or Intel documentation. * * Here we snarf formulas and defaults from Linux driver to be able to use * timing values provided by Intel LPSS driver "as is". */ static int ig4iic_clk_params(const struct ig4_hw *hw, int speed, uint16_t *scl_hcnt, uint16_t *scl_lcnt, uint16_t *sda_hold) { uint32_t thigh, tlow, tf_max; /* nsec */ uint32_t sda_fall_time; /* nsec */ uint32_t scl_fall_time; /* nsec */ switch (speed) { case IG4_CTL_SPEED_STD: thigh = IG4_SPEED_STD_THIGH; tlow = IG4_SPEED_STD_TLOW; tf_max = IG4_SPEED_STD_TF_MAX; break; case IG4_CTL_SPEED_FAST: thigh = IG4_SPEED_FAST_THIGH; tlow = IG4_SPEED_FAST_TLOW; tf_max = IG4_SPEED_FAST_TF_MAX; break; default: return (EINVAL); } /* Use slowest falling time defaults to be on the safe side */ sda_fall_time = hw->sda_fall_time == 0 ? tf_max : hw->sda_fall_time; *scl_hcnt = (uint16_t) ((hw->ic_clock_rate * (thigh + sda_fall_time) + 500) / 1000 - 3); scl_fall_time = hw->scl_fall_time == 0 ? tf_max : hw->scl_fall_time; *scl_lcnt = (uint16_t) ((hw->ic_clock_rate * (tlow + scl_fall_time) + 500) / 1000 - 1); /* * There is no "known good" default value for tHD;DAT so keep SDA_HOLD * intact if sda_hold_time value is not provided. */ if (hw->sda_hold_time != 0) *sda_hold = (uint16_t) ((hw->ic_clock_rate * hw->sda_hold_time + 500) / 1000); return (0); } #ifdef DEV_ACPI static ACPI_STATUS ig4iic_acpi_params(ACPI_HANDLE handle, char *method, uint16_t *scl_hcnt, uint16_t *scl_lcnt, uint16_t *sda_hold) { ACPI_BUFFER buf; ACPI_OBJECT *obj, *elems; ACPI_STATUS status; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(handle, method, NULL, &buf); if (ACPI_FAILURE(status)) return (status); status = AE_TYPE; obj = (ACPI_OBJECT *)buf.Pointer; if (obj->Type == ACPI_TYPE_PACKAGE && obj->Package.Count == 3) { elems = obj->Package.Elements; *scl_hcnt = elems[0].Integer.Value & IG4_SCL_CLOCK_MASK; *scl_lcnt = elems[1].Integer.Value & IG4_SCL_CLOCK_MASK; *sda_hold = elems[2].Integer.Value & IG4_SDA_TX_HOLD_MASK; status = AE_OK; } AcpiOsFree(obj); return (status); } #endif /* DEV_ACPI */ static void ig4iic_get_config(ig4iic_softc_t *sc) { const struct ig4_hw *hw; uint32_t v; #ifdef DEV_ACPI ACPI_HANDLE handle; #endif /* Fetch default hardware config from controller */ sc->cfg.version = reg_read(sc, IG4_REG_COMP_VER); sc->cfg.bus_speed = reg_read(sc, IG4_REG_CTL) & IG4_CTL_SPEED_MASK; sc->cfg.ss_scl_hcnt = reg_read(sc, IG4_REG_SS_SCL_HCNT) & IG4_SCL_CLOCK_MASK; sc->cfg.ss_scl_lcnt = reg_read(sc, IG4_REG_SS_SCL_LCNT) & IG4_SCL_CLOCK_MASK; sc->cfg.fs_scl_hcnt = reg_read(sc, IG4_REG_FS_SCL_HCNT) & IG4_SCL_CLOCK_MASK; sc->cfg.fs_scl_lcnt = reg_read(sc, IG4_REG_FS_SCL_LCNT) & IG4_SCL_CLOCK_MASK; sc->cfg.ss_sda_hold = sc->cfg.fs_sda_hold = reg_read(sc, IG4_REG_SDA_HOLD) & IG4_SDA_TX_HOLD_MASK; if (sc->cfg.bus_speed != IG4_CTL_SPEED_STD) sc->cfg.bus_speed = IG4_CTL_SPEED_FAST; /* REG_COMP_PARAM1 is not documented in latest Intel specs */ if (sc->version == IG4_HASWELL || sc->version == IG4_ATOM) { v = reg_read(sc, IG4_REG_COMP_PARAM1); if (IG4_PARAM1_TXFIFO_DEPTH(v) != 0) sc->cfg.txfifo_depth = IG4_PARAM1_TXFIFO_DEPTH(v); if (IG4_PARAM1_RXFIFO_DEPTH(v) != 0) sc->cfg.rxfifo_depth = IG4_PARAM1_RXFIFO_DEPTH(v); } /* Override hardware config with IC_clock-based counter values */ if (ig4_timings < 2 && sc->version < nitems(ig4iic_hw)) { hw = &ig4iic_hw[sc->version]; sc->cfg.bus_speed = IG4_CTL_SPEED_FAST; ig4iic_clk_params(hw, IG4_CTL_SPEED_STD, &sc->cfg.ss_scl_hcnt, &sc->cfg.ss_scl_lcnt, &sc->cfg.ss_sda_hold); ig4iic_clk_params(hw, IG4_CTL_SPEED_FAST, &sc->cfg.fs_scl_hcnt, &sc->cfg.fs_scl_lcnt, &sc->cfg.fs_sda_hold); if (hw->txfifo_depth != 0) sc->cfg.txfifo_depth = hw->txfifo_depth; if (hw->rxfifo_depth != 0) sc->cfg.rxfifo_depth = hw->rxfifo_depth; } else if (ig4_timings == 2) { /* * Timings of original ig4 driver: * Program based on a 25000 Hz clock. This is a bit of a * hack (obviously). The defaults are 400 and 470 for standard * and 60 and 130 for fast. The defaults for standard fail * utterly (presumably cause an abort) because the clock time * is ~18.8ms by default. This brings it down to ~4ms. */ sc->cfg.bus_speed = IG4_CTL_SPEED_STD; sc->cfg.ss_scl_hcnt = sc->cfg.fs_scl_hcnt = 100; sc->cfg.ss_scl_lcnt = sc->cfg.fs_scl_lcnt = 125; if (sc->version == IG4_SKYLAKE) sc->cfg.ss_sda_hold = sc->cfg.fs_sda_hold = 28; } #ifdef DEV_ACPI /* Evaluate SSCN and FMCN ACPI methods to fetch timings */ if (ig4_timings == 0 && (handle = acpi_get_handle(sc->dev)) != NULL) { ig4iic_acpi_params(handle, "SSCN", &sc->cfg.ss_scl_hcnt, &sc->cfg.ss_scl_lcnt, &sc->cfg.ss_sda_hold); ig4iic_acpi_params(handle, "FMCN", &sc->cfg.fs_scl_hcnt, &sc->cfg.fs_scl_lcnt, &sc->cfg.fs_sda_hold); } #endif if (bootverbose) { device_printf(sc->dev, "Controller parameters:\n"); printf(" Speed: %s\n", sc->cfg.bus_speed == IG4_CTL_SPEED_STD ? "Std" : "Fast"); printf(" Regs: HCNT :LCNT :SDAHLD\n"); printf(" Std: 0x%04hx:0x%04hx:0x%04hx\n", sc->cfg.ss_scl_hcnt, sc->cfg.ss_scl_lcnt, sc->cfg.ss_sda_hold); printf(" Fast: 0x%04hx:0x%04hx:0x%04hx\n", sc->cfg.fs_scl_hcnt, sc->cfg.fs_scl_lcnt, sc->cfg.fs_sda_hold); } } static int ig4iic_set_config(ig4iic_softc_t *sc, bool reset) { uint32_t v; v = reg_read(sc, IG4_REG_DEVIDLE_CTRL); if (IG4_HAS_ADDREGS(sc->version) && (v & IG4_RESTORE_REQUIRED)) { reg_write(sc, IG4_REG_DEVIDLE_CTRL, IG4_DEVICE_IDLE | IG4_RESTORE_REQUIRED); reg_write(sc, IG4_REG_DEVIDLE_CTRL, 0); pause("i2crst", 1); reset = true; } if ((sc->version == IG4_HASWELL || sc->version == IG4_ATOM) && reset) { reg_write(sc, IG4_REG_RESETS_HSW, IG4_RESETS_ASSERT_HSW); reg_write(sc, IG4_REG_RESETS_HSW, IG4_RESETS_DEASSERT_HSW); } else if (IG4_HAS_ADDREGS(sc->version) && reset) { reg_write(sc, IG4_REG_RESETS_SKL, IG4_RESETS_ASSERT_SKL); reg_write(sc, IG4_REG_RESETS_SKL, IG4_RESETS_DEASSERT_SKL); } if (sc->version == IG4_ATOM) v = reg_read(sc, IG4_REG_COMP_TYPE); if (sc->version == IG4_HASWELL || sc->version == IG4_ATOM) { v = reg_read(sc, IG4_REG_COMP_PARAM1); v = reg_read(sc, IG4_REG_GENERAL); /* * The content of IG4_REG_GENERAL is different for each * controller version. */ if (sc->version == IG4_HASWELL && (v & IG4_GENERAL_SWMODE) == 0) { v |= IG4_GENERAL_SWMODE; reg_write(sc, IG4_REG_GENERAL, v); v = reg_read(sc, IG4_REG_GENERAL); } } if (sc->version == IG4_HASWELL) { v = reg_read(sc, IG4_REG_SW_LTR_VALUE); v = reg_read(sc, IG4_REG_AUTO_LTR_VALUE); } else if (IG4_HAS_ADDREGS(sc->version)) { v = reg_read(sc, IG4_REG_ACTIVE_LTR_VALUE); v = reg_read(sc, IG4_REG_IDLE_LTR_VALUE); } if (sc->version == IG4_HASWELL || sc->version == IG4_ATOM) { v = reg_read(sc, IG4_REG_COMP_VER); if (v < IG4_COMP_MIN_VER) return(ENXIO); } if (set_controller(sc, 0)) { device_printf(sc->dev, "controller error during attach-1\n"); return (ENXIO); } reg_read(sc, IG4_REG_CLR_INTR); reg_write(sc, IG4_REG_INTR_MASK, 0); sc->intr_mask = 0; reg_write(sc, IG4_REG_SS_SCL_HCNT, sc->cfg.ss_scl_hcnt); reg_write(sc, IG4_REG_SS_SCL_LCNT, sc->cfg.ss_scl_lcnt); reg_write(sc, IG4_REG_FS_SCL_HCNT, sc->cfg.fs_scl_hcnt); reg_write(sc, IG4_REG_FS_SCL_LCNT, sc->cfg.fs_scl_lcnt); reg_write(sc, IG4_REG_SDA_HOLD, (sc->cfg.bus_speed & IG4_CTL_SPEED_MASK) == IG4_CTL_SPEED_STD ? sc->cfg.ss_sda_hold : sc->cfg.fs_sda_hold); reg_write(sc, IG4_REG_RX_TL, 0); reg_write(sc, IG4_REG_TX_TL, 0); reg_write(sc, IG4_REG_CTL, IG4_CTL_MASTER | IG4_CTL_SLAVE_DISABLE | IG4_CTL_RESTARTEN | (sc->cfg.bus_speed & IG4_CTL_SPEED_MASK)); /* Force setting of the target address on the next transfer */ sc->slave_valid = false; return (0); } static void ig4iic_get_fifo(ig4iic_softc_t *sc) { uint32_t v; /* * Hardware does not allow FIFO Threshold Levels value to be set larger * than the depth of the buffer. If an attempt is made to do that, the * actual value set will be the maximum depth of the buffer. */ if (sc->cfg.txfifo_depth == 0) { v = reg_read(sc, IG4_REG_TX_TL); reg_write(sc, IG4_REG_TX_TL, v | IG4_FIFO_MASK); sc->cfg.txfifo_depth = (reg_read(sc, IG4_REG_TX_TL) & IG4_FIFO_MASK) + 1; reg_write(sc, IG4_REG_TX_TL, v); } if (sc->cfg.rxfifo_depth == 0) { v = reg_read(sc, IG4_REG_RX_TL); reg_write(sc, IG4_REG_RX_TL, v | IG4_FIFO_MASK); sc->cfg.rxfifo_depth = (reg_read(sc, IG4_REG_RX_TL) & IG4_FIFO_MASK) + 1; reg_write(sc, IG4_REG_RX_TL, v); } if (bootverbose) { printf(" FIFO: RX:0x%04x: TX:0x%04x\n", sc->cfg.rxfifo_depth, sc->cfg.txfifo_depth); } } /* * Called from ig4iic_pci_attach/detach() */ int ig4iic_attach(ig4iic_softc_t *sc) { int error; mtx_init(&sc->io_lock, "IG4 I/O lock", NULL, MTX_SPIN); sx_init(&sc->call_lock, "IG4 call lock"); ig4iic_get_config(sc); error = ig4iic_set_config(sc, IG4_HAS_ADDREGS(sc->version)); if (error) goto done; ig4iic_get_fifo(sc); sc->iicbus = device_add_child(sc->dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(sc->dev, "iicbus driver not found\n"); error = ENXIO; goto done; } if (set_controller(sc, IG4_I2C_ENABLE)) { device_printf(sc->dev, "controller error during attach-2\n"); error = ENXIO; goto done; } if (set_controller(sc, 0)) { device_printf(sc->dev, "controller error during attach-3\n"); error = ENXIO; goto done; } error = bus_setup_intr(sc->dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE, ig4iic_intr, NULL, sc, &sc->intr_handle); if (error) { device_printf(sc->dev, "Unable to setup irq: error %d\n", error); } bus_attach_children(sc->dev); done: return (error); } int ig4iic_detach(ig4iic_softc_t *sc) { int error; - if (device_is_attached(sc->dev)) { - error = bus_generic_detach(sc->dev); - if (error) - return (error); - } - if (sc->iicbus) - device_delete_child(sc->dev, sc->iicbus); + error = bus_generic_detach(sc->dev); + if (error) + return (error); if (sc->intr_handle) bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_handle); sx_xlock(&sc->call_lock); sc->iicbus = NULL; sc->intr_handle = NULL; reg_write(sc, IG4_REG_INTR_MASK, 0); set_controller(sc, 0); sx_xunlock(&sc->call_lock); mtx_destroy(&sc->io_lock); sx_destroy(&sc->call_lock); return (0); } int ig4iic_suspend(ig4iic_softc_t *sc) { int error; /* suspend all children */ error = bus_generic_suspend(sc->dev); sx_xlock(&sc->call_lock); set_controller(sc, 0); if (IG4_HAS_ADDREGS(sc->version)) { /* * Place the device in the idle state, just to be safe */ reg_write(sc, IG4_REG_DEVIDLE_CTRL, IG4_DEVICE_IDLE); /* * Controller can become dysfunctional if I2C lines are pulled * down when suspend procedure turns off power to I2C device. * Place device in the reset state to avoid this. */ reg_write(sc, IG4_REG_RESETS_SKL, IG4_RESETS_ASSERT_SKL); } sx_xunlock(&sc->call_lock); return (error); } int ig4iic_resume(ig4iic_softc_t *sc) { int error; sx_xlock(&sc->call_lock); if (ig4iic_set_config(sc, IG4_HAS_ADDREGS(sc->version))) device_printf(sc->dev, "controller error during resume\n"); sx_xunlock(&sc->call_lock); error = bus_generic_resume(sc->dev); return (error); } /* * Interrupt Operation, see ig4_var.h for locking semantics. */ static int ig4iic_intr(void *cookie) { ig4iic_softc_t *sc = cookie; int retval = FILTER_STRAY; mtx_lock_spin(&sc->io_lock); /* Ignore stray interrupts */ if (sc->intr_mask != 0 && reg_read(sc, IG4_REG_INTR_STAT) != 0) { /* Interrupt bits are cleared in wait_intr() loop */ ig4iic_set_intr_mask(sc, 0); wakeup(sc); retval = FILTER_HANDLED; } mtx_unlock_spin(&sc->io_lock); return (retval); } #define REGDUMP(sc, reg) \ device_printf(sc->dev, " %-23s %08x\n", #reg, reg_read(sc, reg)) static void ig4iic_dump(ig4iic_softc_t *sc) { device_printf(sc->dev, "ig4iic register dump:\n"); REGDUMP(sc, IG4_REG_CTL); REGDUMP(sc, IG4_REG_TAR_ADD); REGDUMP(sc, IG4_REG_SS_SCL_HCNT); REGDUMP(sc, IG4_REG_SS_SCL_LCNT); REGDUMP(sc, IG4_REG_FS_SCL_HCNT); REGDUMP(sc, IG4_REG_FS_SCL_LCNT); REGDUMP(sc, IG4_REG_INTR_STAT); REGDUMP(sc, IG4_REG_INTR_MASK); REGDUMP(sc, IG4_REG_RAW_INTR_STAT); REGDUMP(sc, IG4_REG_RX_TL); REGDUMP(sc, IG4_REG_TX_TL); REGDUMP(sc, IG4_REG_I2C_EN); REGDUMP(sc, IG4_REG_I2C_STA); REGDUMP(sc, IG4_REG_TXFLR); REGDUMP(sc, IG4_REG_RXFLR); REGDUMP(sc, IG4_REG_SDA_HOLD); REGDUMP(sc, IG4_REG_TX_ABRT_SOURCE); REGDUMP(sc, IG4_REG_SLV_DATA_NACK); REGDUMP(sc, IG4_REG_DMA_CTRL); REGDUMP(sc, IG4_REG_DMA_TDLR); REGDUMP(sc, IG4_REG_DMA_RDLR); REGDUMP(sc, IG4_REG_SDA_SETUP); REGDUMP(sc, IG4_REG_ENABLE_STATUS); REGDUMP(sc, IG4_REG_COMP_PARAM1); REGDUMP(sc, IG4_REG_COMP_VER); if (sc->version == IG4_ATOM) { REGDUMP(sc, IG4_REG_COMP_TYPE); REGDUMP(sc, IG4_REG_CLK_PARMS); } if (sc->version == IG4_HASWELL || sc->version == IG4_ATOM) { REGDUMP(sc, IG4_REG_RESETS_HSW); REGDUMP(sc, IG4_REG_GENERAL); } else if (sc->version == IG4_SKYLAKE) { REGDUMP(sc, IG4_REG_RESETS_SKL); } if (sc->version == IG4_HASWELL) { REGDUMP(sc, IG4_REG_SW_LTR_VALUE); REGDUMP(sc, IG4_REG_AUTO_LTR_VALUE); } else if (IG4_HAS_ADDREGS(sc->version)) { REGDUMP(sc, IG4_REG_ACTIVE_LTR_VALUE); REGDUMP(sc, IG4_REG_IDLE_LTR_VALUE); } } #undef REGDUMP DRIVER_MODULE(iicbus, ig4iic, iicbus_driver, NULL, NULL); #ifdef DEV_ACPI DRIVER_MODULE(acpi_iicbus, ig4iic, acpi_iicbus_driver, NULL, NULL); #endif MODULE_DEPEND(ig4iic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER); MODULE_VERSION(ig4iic, 1); diff --git a/sys/dev/ichsmb/ichsmb.c b/sys/dev/ichsmb/ichsmb.c index 063fbf66a75a..28503b9e574d 100644 --- a/sys/dev/ichsmb/ichsmb.c +++ b/sys/dev/ichsmb/ichsmb.c @@ -1,706 +1,705 @@ /*- * ichsmb.c * * Author: Archie Cobbs * Copyright (c) 2000 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ #include /* * Support for the SMBus controller logical device which is part of the * Intel 81801AA (ICH) and 81801AB (ICH0) I/O controller hub chips. * * This driver assumes that the generic SMBus code will ensure that * at most one process at a time calls into the SMBus methods below. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Enable debugging by defining ICHSMB_DEBUG to a non-zero value. */ #define ICHSMB_DEBUG 0 #if ICHSMB_DEBUG != 0 #define DBG(fmt, args...) \ do { printf("%s: " fmt, __func__ , ## args); } while (0) #else #define DBG(fmt, args...) do { } while (0) #endif /* * Our child device driver name */ #define DRIVER_SMBUS "smbus" /* * Internal functions */ static int ichsmb_wait(sc_p sc); /******************************************************************** BUS-INDEPENDENT BUS METHODS ********************************************************************/ /* * Handle probe-time duties that are independent of the bus * our device lives on. */ int ichsmb_probe(device_t dev) { return (BUS_PROBE_DEFAULT); } /* * Handle attach-time duties that are independent of the bus * our device lives on. */ int ichsmb_attach(device_t dev) { const sc_p sc = device_get_softc(dev); int error; /* Create mutex */ mtx_init(&sc->mutex, device_get_nameunit(dev), "ichsmb", MTX_DEF); /* Add child: an instance of the "smbus" device */ if ((sc->smb = device_add_child(dev, DRIVER_SMBUS, -1)) == NULL) { device_printf(dev, "no \"%s\" child found\n", DRIVER_SMBUS); error = ENXIO; goto fail; } /* Clear interrupt conditions */ bus_write_1(sc->io_res, ICH_HST_STA, 0xff); /* Set up interrupt handler */ error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ichsmb_device_intr, sc, &sc->irq_handle); if (error != 0) { device_printf(dev, "can't setup irq\n"); goto fail; } /* Attach children when interrupts are available */ bus_delayed_attach_children(dev); return (0); fail: mtx_destroy(&sc->mutex); return (error); } /******************************************************************** SMBUS METHODS ********************************************************************/ int ichsmb_callback(device_t dev, int index, void *data) { int smb_error = 0; DBG("index=%d how=%d\n", index, data ? *(int *)data : -1); switch (index) { case SMB_REQUEST_BUS: break; case SMB_RELEASE_BUS: break; default: smb_error = SMB_EABORT; /* XXX */ break; } DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_quick(device_t dev, u_char slave, int how) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x how=%d\n", slave, how); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); switch (how) { case SMB_QREAD: case SMB_QWRITE: mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_QUICK; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | (how == SMB_QREAD ? ICH_XMIT_SLVA_READ : ICH_XMIT_SLVA_WRITE)); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); mtx_unlock(&sc->mutex); break; default: smb_error = SMB_ENOTSUPP; } DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_sendb(device_t dev, u_char slave, char byte) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x byte=0x%02x\n", slave, (u_char)byte); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_WRITE); bus_write_1(sc->io_res, ICH_HST_CMD, byte); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_recvb(device_t dev, u_char slave, char *byte) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x\n", slave); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_READ); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) *byte = bus_read_1(sc->io_res, ICH_D0); mtx_unlock(&sc->mutex); DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte); return (smb_error); } int ichsmb_writeb(device_t dev, u_char slave, char cmd, char byte) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x byte=0x%02x\n", slave, (u_char)cmd, (u_char)byte); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_WRITE); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_D0, byte); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_writew(device_t dev, u_char slave, char cmd, short word) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x word=0x%04x\n", slave, (u_char)cmd, (u_int16_t)word); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_WRITE); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_D0, word & 0xff); bus_write_1(sc->io_res, ICH_D1, word >> 8); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_readb(device_t dev, u_char slave, char cmd, char *byte) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_READ); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) *byte = bus_read_1(sc->io_res, ICH_D0); mtx_unlock(&sc->mutex); DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte); return (smb_error); } int ichsmb_readw(device_t dev, u_char slave, char cmd, short *word) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_READ); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) { *word = (bus_read_1(sc->io_res, ICH_D0) & 0xff) | (bus_read_1(sc->io_res, ICH_D1) << 8); } mtx_unlock(&sc->mutex); DBG("smb_error=%d word=0x%04x\n", smb_error, (u_int16_t)*word); return (smb_error); } int ichsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x sdata=0x%04x\n", slave, (u_char)cmd, (u_int16_t)sdata); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_PROC_CALL; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_WRITE); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_D0, sdata & 0xff); bus_write_1(sc->io_res, ICH_D1, sdata >> 8); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) { *rdata = (bus_read_1(sc->io_res, ICH_D0) & 0xff) | (bus_read_1(sc->io_res, ICH_D1) << 8); } mtx_unlock(&sc->mutex); DBG("smb_error=%d rdata=0x%04x\n", smb_error, (u_int16_t)*rdata); return (smb_error); } int ichsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x count=%d\n", slave, (u_char)cmd, count); #if ICHSMB_DEBUG #define DISP(ch) (((ch) < 0x20 || (ch) >= 0x7e) ? '.' : (ch)) { u_char *p; for (p = (u_char *)buf; p - (u_char *)buf < 32; p += 8) { DBG("%02x: %02x %02x %02x %02x %02x %02x %02x %02x" " %c%c%c%c%c%c%c%c", (p - (u_char *)buf), p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], DISP(p[0]), DISP(p[1]), DISP(p[2]), DISP(p[3]), DISP(p[4]), DISP(p[5]), DISP(p[6]), DISP(p[7])); } } #undef DISP #endif KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); if (count < 1 || count > 32) return (SMB_EINVAL); bcopy(buf, sc->block_data, count); sc->block_count = count; sc->block_index = 1; /* buf[0] is written here */ sc->block_write = true; mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_WRITE); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_D0, count); bus_write_1(sc->io_res, ICH_BLOCK_DB, buf[0]); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } int ichsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { const sc_p sc = device_get_softc(dev); int smb_error; DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); bzero(sc->block_data, sizeof(sc->block_data)); sc->block_count = 0; sc->block_index = 0; sc->block_write = false; mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK; bus_write_1(sc->io_res, ICH_XMIT_SLVA, slave | ICH_XMIT_SLVA_READ); bus_write_1(sc->io_res, ICH_HST_CMD, cmd); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) { bcopy(sc->block_data, buf, sc->block_count); *count = sc->block_count; } mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); #if ICHSMB_DEBUG #define DISP(ch) (((ch) < 0x20 || (ch) >= 0x7e) ? '.' : (ch)) { u_char *p; for (p = (u_char *)buf; p - (u_char *)buf < 32; p += 8) { DBG("%02x: %02x %02x %02x %02x %02x %02x %02x %02x" " %c%c%c%c%c%c%c%c", (p - (u_char *)buf), p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], DISP(p[0]), DISP(p[1]), DISP(p[2]), DISP(p[3]), DISP(p[4]), DISP(p[5]), DISP(p[6]), DISP(p[7])); } } #undef DISP #endif return (smb_error); } /******************************************************************** OTHER FUNCTIONS ********************************************************************/ /* * This table describes what interrupts we should ever expect to * see after each ICH command, not including the SMBALERT interrupt. */ static const u_int8_t ichsmb_state_irqs[] = { /* quick */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR), /* byte */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR), /* byte data */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR), /* word data */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR), /* process call */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR), /* block */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR | ICH_HST_STA_BYTE_DONE_STS), /* i2c read (not used) */ (ICH_HST_STA_BUS_ERR | ICH_HST_STA_DEV_ERR | ICH_HST_STA_INTR | ICH_HST_STA_BYTE_DONE_STS) }; /* * Interrupt handler. This handler is bus-independent. Note that our * interrupt may be shared, so we must handle "false" interrupts. */ void ichsmb_device_intr(void *cookie) { const sc_p sc = cookie; const device_t dev = sc->dev; const int maxloops = 16; u_int8_t status; u_int8_t ok_bits; int cmd_index; int count; mtx_lock(&sc->mutex); for (count = 0; count < maxloops; count++) { /* Get and reset status bits */ status = bus_read_1(sc->io_res, ICH_HST_STA); #if ICHSMB_DEBUG if ((status & ~(ICH_HST_STA_INUSE_STS | ICH_HST_STA_HOST_BUSY)) || count > 0) { DBG("%d stat=0x%02x\n", count, status); } #endif status &= ~(ICH_HST_STA_INUSE_STS | ICH_HST_STA_HOST_BUSY | ICH_HST_STA_SMBALERT_STS); if (status == 0) break; /* Check for unexpected interrupt */ ok_bits = ICH_HST_STA_SMBALERT_STS; if (sc->killed) { sc->killed = 0; ok_bits |= ICH_HST_STA_FAILED; bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_INTREN); } if (sc->ich_cmd != -1) { cmd_index = sc->ich_cmd >> 2; KASSERT(cmd_index < sizeof(ichsmb_state_irqs), ("%s: ich_cmd=%d", device_get_nameunit(dev), sc->ich_cmd)); ok_bits |= ichsmb_state_irqs[cmd_index]; } if ((status & ~ok_bits) != 0) { device_printf(dev, "irq 0x%02x during 0x%02x\n", status, sc->ich_cmd); bus_write_1(sc->io_res, ICH_HST_STA, (status & ~ok_bits)); continue; } /* Check for killed / aborted command */ if (status & ICH_HST_STA_FAILED) { sc->smb_error = SMB_EABORT; goto finished; } /* Check for bus error */ if (status & ICH_HST_STA_BUS_ERR) { sc->smb_error = SMB_ECOLLI; /* XXX SMB_EBUSERR? */ goto finished; } /* Check for device error */ if (status & ICH_HST_STA_DEV_ERR) { sc->smb_error = SMB_ENOACK; /* or SMB_ETIMEOUT? */ goto finished; } /* Check for byte completion in block transfer */ if (status & ICH_HST_STA_BYTE_DONE_STS) { if (sc->block_write) { if (sc->block_index < sc->block_count) { /* Write next byte */ bus_write_1(sc->io_res, ICH_BLOCK_DB, sc->block_data[sc->block_index++]); } } else { /* First interrupt, get the count also */ if (sc->block_index == 0) { sc->block_count = bus_read_1( sc->io_res, ICH_D0); if (sc->block_count < 1 || sc->block_count > 32) { device_printf(dev, "block read " "wrong length: %d\n", sc->block_count); bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_KILL | ICH_HST_CNT_INTREN); sc->block_count = 0; sc->killed = true; } } /* Get next byte, if any */ if (sc->block_index < sc->block_count) { /* Read next byte */ sc->block_data[sc->block_index++] = bus_read_1(sc->io_res, ICH_BLOCK_DB); /* * Set "LAST_BYTE" bit before reading * the last byte of block data */ if (sc->block_index == sc->block_count - 1) { bus_write_1(sc->io_res, ICH_HST_CNT, ICH_HST_CNT_LAST_BYTE | ICH_HST_CNT_INTREN | sc->ich_cmd); } } } } /* Check command completion */ if (status & ICH_HST_STA_INTR) { sc->smb_error = SMB_ENOERR; finished: sc->ich_cmd = -1; bus_write_1(sc->io_res, ICH_HST_STA, status); wakeup(sc); break; } /* Clear status bits and try again */ bus_write_1(sc->io_res, ICH_HST_STA, status); } mtx_unlock(&sc->mutex); /* Too many loops? */ if (count == maxloops) { device_printf(dev, "interrupt loop, status=0x%02x\n", bus_read_1(sc->io_res, ICH_HST_STA)); } } /* * Wait for command completion. Assumes mutex is held. * Returns an SMB_* error code. */ static int ichsmb_wait(sc_p sc) { const device_t dev = sc->dev; int error, smb_error; KASSERT(sc->ich_cmd != -1, ("%s: ich_cmd=%d\n", __func__ , sc->ich_cmd)); mtx_assert(&sc->mutex, MA_OWNED); error = msleep(sc, &sc->mutex, PZERO, "ichsmb", hz / 4); DBG("msleep -> %d\n", error); switch (error) { case 0: smb_error = sc->smb_error; break; case EWOULDBLOCK: device_printf(dev, "device timeout, status=0x%02x\n", bus_read_1(sc->io_res, ICH_HST_STA)); sc->ich_cmd = -1; smb_error = SMB_ETIMEOUT; break; default: smb_error = SMB_EABORT; break; } return (smb_error); } /* * Release resources associated with device. */ void ichsmb_release_resources(sc_p sc) { const device_t dev = sc->dev; if (sc->irq_handle != NULL) { bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); sc->irq_handle = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = NULL; } if (sc->io_res != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); sc->io_res = NULL; } } int ichsmb_detach(device_t dev) { const sc_p sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); - device_delete_child(dev, sc->smb); ichsmb_release_resources(sc); mtx_destroy(&sc->mutex); return 0; } DRIVER_MODULE(smbus, ichsmb, smbus_driver, 0, 0); diff --git a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c index 3bed24c36070..bf1f88ec9ca5 100644 --- a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c +++ b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c @@ -1,707 +1,702 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019-2020 Thomas Skibo * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Cadence / Zynq i2c driver. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.2) July 1, 2018. Xilinx doc UG585. I2C Controller is documented * in Chapter 20. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #ifdef I2CDEBUG #define DPRINTF(...) do { printf(__VA_ARGS__); } while (0) #else #define DPRINTF(...) do { } while (0) #endif #if 0 #define HWTYPE_CDNS_R1P10 1 #endif #define HWTYPE_CDNS_R1P14 2 static struct ofw_compat_data compat_data[] = { #if 0 {"cdns,i2c-r1p10", HWTYPE_CDNS_R1P10}, #endif {"cdns,i2c-r1p14", HWTYPE_CDNS_R1P14}, {NULL, 0} }; struct cdnc_i2c_softc { device_t dev; device_t iicbus; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint16_t cfg_reg_shadow; uint16_t istat; clk_t ref_clk; uint32_t ref_clock_freq; uint32_t i2c_clock_freq; int hwtype; int hold; /* sysctls */ unsigned int i2c_clk_real_freq; unsigned int interrupts; unsigned int timeout_ints; }; #define I2C_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define I2C_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define I2C_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define I2C_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define I2C_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD2(sc, off) (bus_read_2((sc)->mem_res, (off))) #define WR2(sc, off, val) (bus_write_2((sc)->mem_res, (off), (val))) #define RD1(sc, off) (bus_read_1((sc)->mem_res, (off))) #define WR1(sc, off, val) (bus_write_1((sc)->mem_res, (off), (val))) /* Cadence I2C controller device registers. */ #define CDNC_I2C_CR 0x0000 /* Config register. */ #define CDNC_I2C_CR_DIV_A_MASK (3 << 14) #define CDNC_I2C_CR_DIV_A_SHIFT 14 #define CDNC_I2C_CR_DIV_A(a) ((a) << 14) #define CDNC_I2C_CR_DIV_A_MAX 3 #define CDNC_I2C_CR_DIV_B_MASK (0x3f << 8) #define CDNC_I2C_CR_DIV_B_SHIFT 8 #define CDNC_I2C_CR_DIV_B(b) ((b) << 8) #define CDNC_I2C_CR_DIV_B_MAX 63 #define CDNC_I2C_CR_CLR_FIFO (1 << 6) #define CDNC_I2C_CR_SLVMON_MODE (1 << 5) #define CDNC_I2C_CR_HOLD (1 << 4) #define CDNC_I2C_CR_ACKEN (1 << 3) #define CDNC_I2C_CR_NEA (1 << 2) #define CDNC_I2C_CR_MAST (1 << 1) #define CDNC_I2C_CR_RNW (1 << 0) #define CDNC_I2C_SR 0x0004 /* Status register. */ #define CDNC_I2C_SR_BUS_ACTIVE (1 << 8) #define CDNC_I2C_SR_RX_OVF (1 << 7) #define CDNC_I2C_SR_TX_VALID (1 << 6) #define CDNC_I2C_SR_RX_VALID (1 << 5) #define CDNC_I2C_SR_RXRW (1 << 3) #define CDNC_I2C_ADDR 0x0008 /* i2c address register. */ #define CDNC_I2C_DATA 0x000C /* i2c data register. */ #define CDNC_I2C_ISR 0x0010 /* Int status register. */ #define CDNC_I2C_ISR_ARB_LOST (1 << 9) #define CDNC_I2C_ISR_RX_UNDF (1 << 7) #define CDNC_I2C_ISR_TX_OVF (1 << 6) #define CDNC_I2C_ISR_RX_OVF (1 << 5) #define CDNC_I2C_ISR_SLV_RDY (1 << 4) #define CDNC_I2C_ISR_XFER_TMOUT (1 << 3) #define CDNC_I2C_ISR_XFER_NACK (1 << 2) #define CDNC_I2C_ISR_XFER_DATA (1 << 1) #define CDNC_I2C_ISR_XFER_DONE (1 << 0) #define CDNC_I2C_ISR_ALL 0x2ff #define CDNC_I2C_TRANS_SIZE 0x0014 /* Transfer size. */ #define CDNC_I2C_PAUSE 0x0018 /* Slv Monitor Pause reg. */ #define CDNC_I2C_TIME_OUT 0x001C /* Time-out register. */ #define CDNC_I2C_TIME_OUT_MIN 31 #define CDNC_I2C_TIME_OUT_MAX 255 #define CDNC_I2C_IMR 0x0020 /* Int mask register. */ #define CDNC_I2C_IER 0x0024 /* Int enable register. */ #define CDNC_I2C_IDR 0x0028 /* Int disable register. */ #define CDNC_I2C_FIFO_SIZE 16 #define CDNC_I2C_DEFAULT_I2C_CLOCK 400000 /* 400Khz default */ #define CDNC_I2C_ISR_ERRS (CDNC_I2C_ISR_ARB_LOST | CDNC_I2C_ISR_RX_UNDF | \ CDNC_I2C_ISR_TX_OVF | CDNC_I2C_ISR_RX_OVF | CDNC_I2C_ISR_XFER_TMOUT | \ CDNC_I2C_ISR_XFER_NACK) /* Configure clock dividers. */ static int cdnc_i2c_set_freq(struct cdnc_i2c_softc *sc) { uint32_t div_a, div_b, err, clk_out; uint32_t best_div_a, best_div_b, best_err; best_div_a = 0; best_div_b = 0; best_err = ~0U; /* * The i2c controller has a two-stage clock divider to create * the "clock enable" signal used to sample the incoming SCL and * SDA signals. The Clock Enable signal is divided by 22 to create * the outgoing SCL signal. * * Try all div_a values and pick best match. */ for (div_a = 0; div_a <= CDNC_I2C_CR_DIV_A_MAX; div_a++) { div_b = sc->ref_clock_freq / (22 * sc->i2c_clock_freq * (div_a + 1)); if (div_b > CDNC_I2C_CR_DIV_B_MAX) continue; clk_out = sc->ref_clock_freq / (22 * (div_a + 1) * (div_b + 1)); err = clk_out > sc->i2c_clock_freq ? clk_out - sc->i2c_clock_freq : sc->i2c_clock_freq - clk_out; if (err < best_err) { best_err = err; best_div_a = div_a; best_div_b = div_b; } } if (best_err == ~0U) { device_printf(sc->dev, "cannot configure clock divider.\n"); return (EINVAL); /* out of range */ } clk_out = sc->ref_clock_freq / (22 * (best_div_a + 1) * (best_div_b + 1)); DPRINTF("%s: ref_clock_freq=%d i2c_clock_freq=%d\n", __func__, sc->ref_clock_freq, sc->i2c_clock_freq); DPRINTF("%s: div_a=%d div_b=%d real-freq=%d\n", __func__, best_div_a, best_div_b, clk_out); sc->cfg_reg_shadow &= ~(CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK); sc->cfg_reg_shadow |= CDNC_I2C_CR_DIV_A(best_div_a) | CDNC_I2C_CR_DIV_B(best_div_b); WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->i2c_clk_real_freq = clk_out; return (0); } /* Initialize hardware. */ static int cdnc_i2c_init_hw(struct cdnc_i2c_softc *sc) { /* Reset config register and clear FIFO. */ sc->cfg_reg_shadow = 0; WR2(sc, CDNC_I2C_CR, CDNC_I2C_CR_CLR_FIFO); sc->hold = 0; /* Clear and disable all interrupts. */ WR2(sc, CDNC_I2C_ISR, CDNC_I2C_ISR_ALL); WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_ALL); /* Max out bogus time-out register. */ WR1(sc, CDNC_I2C_TIME_OUT, CDNC_I2C_TIME_OUT_MAX); /* Set up clock dividers. */ return (cdnc_i2c_set_freq(sc)); } static int cdnc_i2c_errs(struct cdnc_i2c_softc *sc, uint16_t istat) { DPRINTF("%s: istat=0x%x\n", __func__, istat); /* XXX: clean up after errors. */ /* Reset config register and clear FIFO. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 0; if (istat & CDNC_I2C_ISR_XFER_TMOUT) return (IIC_ETIMEOUT); else if (istat & CDNC_I2C_ISR_RX_UNDF) return (IIC_EUNDERFLOW); else if (istat & (CDNC_I2C_ISR_RX_OVF | CDNC_I2C_ISR_TX_OVF)) return (IIC_EOVERFLOW); else if (istat & CDNC_I2C_ISR_XFER_NACK) return (IIC_ENOACK); else if (istat & CDNC_I2C_ISR_ARB_LOST) return (IIC_EBUSERR); /* XXX: ???? */ else /* Should not happen */ return (IIC_NOERR); } static int cdnc_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct cdnc_i2c_softc *sc = device_get_softc(dev); int error; DPRINTF("%s: speed=%d addr=0x%x\n", __func__, speed, addr); I2C_SC_LOCK(sc); sc->i2c_clock_freq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); error = cdnc_i2c_init_hw(sc); I2C_SC_UNLOCK(sc); return (error ? IIC_ENOTSUPP : IIC_NOERR); } static void cdnc_i2c_intr(void *arg) { struct cdnc_i2c_softc *sc = (struct cdnc_i2c_softc *)arg; uint16_t status; I2C_SC_LOCK(sc); sc->interrupts++; /* Read active interrupts. */ status = RD2(sc, CDNC_I2C_ISR) & ~RD2(sc, CDNC_I2C_IMR); /* Clear interrupts. */ WR2(sc, CDNC_I2C_ISR, status); if (status & CDNC_I2C_ISR_XFER_TMOUT) sc->timeout_ints++; sc->istat |= status; if (status) wakeup(sc); I2C_SC_UNLOCK(sc); } static int cdnc_i2c_xfer_rd(struct cdnc_i2c_softc *sc, struct iic_msg *msg) { int error = IIC_NOERR; uint16_t flags = msg->flags; uint16_t len = msg->len; int idx = 0, nbytes, last, first = 1; uint16_t statr; DPRINTF("%s: flags=0x%x len=%d\n", __func__, flags, len); #if 0 if (sc->hwtype == HWTYPE_CDNS_R1P10 && (flags & IIC_M_NOSTOP)) return (IIC_ENOTSUPP); #endif I2C_SC_ASSERT_LOCKED(sc); /* Program config register. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; sc->cfg_reg_shadow |= CDNC_I2C_CR_HOLD | CDNC_I2C_CR_ACKEN | CDNC_I2C_CR_NEA | CDNC_I2C_CR_MAST | CDNC_I2C_CR_RNW; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 1; while (len > 0) { nbytes = MIN(CDNC_I2C_FIFO_SIZE - 2, len); WR1(sc, CDNC_I2C_TRANS_SIZE, nbytes); last = nbytes == len && !(flags & IIC_M_NOSTOP); if (last) { /* Clear HOLD bit on last transfer. */ sc->cfg_reg_shadow &= ~CDNC_I2C_CR_HOLD; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->hold = 0; } /* Writing slv address for a start or repeated start. */ if (first && !(flags & IIC_M_NOSTART)) WR2(sc, CDNC_I2C_ADDR, msg->slave >> 1); first = 0; /* Enable FIFO interrupts and wait. */ if (last) WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); else WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DATA | CDNC_I2C_ISR_ERRS); error = mtx_sleep(sc, &sc->sc_mtx, 0, "cdi2c", hz); /* Disable FIFO interrupts. */ WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_XFER_DATA | CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); if (error == EWOULDBLOCK) error = cdnc_i2c_errs(sc, CDNC_I2C_ISR_XFER_TMOUT); else if (sc->istat & CDNC_I2C_ISR_ERRS) error = cdnc_i2c_errs(sc, sc->istat); sc->istat = 0; if (error != IIC_NOERR) break; /* Read nbytes from FIFO. */ while (nbytes-- > 0) { statr = RD2(sc, CDNC_I2C_SR); if (!(statr & CDNC_I2C_SR_RX_VALID)) { printf("%s: RX FIFO underflow?\n", __func__); break; } msg->buf[idx++] = RD2(sc, CDNC_I2C_DATA); len--; } } return (error); } static int cdnc_i2c_xfer_wr(struct cdnc_i2c_softc *sc, struct iic_msg *msg) { int error = IIC_NOERR; uint16_t flags = msg->flags; uint16_t len = msg->len; int idx = 0, nbytes, last, first = 1; DPRINTF("%s: flags=0x%x len=%d\n", __func__, flags, len); I2C_SC_ASSERT_LOCKED(sc); /* Program config register. */ sc->cfg_reg_shadow &= CDNC_I2C_CR_DIV_A_MASK | CDNC_I2C_CR_DIV_B_MASK; sc->cfg_reg_shadow |= CDNC_I2C_CR_HOLD | CDNC_I2C_CR_ACKEN | CDNC_I2C_CR_NEA | CDNC_I2C_CR_MAST; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow | CDNC_I2C_CR_CLR_FIFO); sc->hold = 1; while (len > 0) { /* Put as much data into fifo as you can. */ nbytes = MIN(len, CDNC_I2C_FIFO_SIZE - RD1(sc, CDNC_I2C_TRANS_SIZE) - 1); len -= nbytes; while (nbytes-- > 0) WR2(sc, CDNC_I2C_DATA, msg->buf[idx++]); last = len == 0 && !(flags & IIC_M_NOSTOP); if (last) { /* Clear HOLD bit on last transfer. */ sc->cfg_reg_shadow &= ~CDNC_I2C_CR_HOLD; WR2(sc, CDNC_I2C_CR, sc->cfg_reg_shadow); sc->hold = 0; } /* Perform START if this is start or repeated start. */ if (first && !(flags & IIC_M_NOSTART)) WR2(sc, CDNC_I2C_ADDR, msg->slave >> 1); first = 0; /* Enable FIFO interrupts. */ WR2(sc, CDNC_I2C_IER, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); /* Wait for end of data transfer. */ error = mtx_sleep(sc, &sc->sc_mtx, 0, "cdi2c", hz); /* Disable FIFO interrupts. */ WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_XFER_DONE | CDNC_I2C_ISR_ERRS); if (error == EWOULDBLOCK) error = cdnc_i2c_errs(sc, CDNC_I2C_ISR_XFER_TMOUT); else if (sc->istat & CDNC_I2C_ISR_ERRS) error = cdnc_i2c_errs(sc, sc->istat); sc->istat = 0; if (error) break; } return (error); } static int cdnc_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct cdnc_i2c_softc *sc = device_get_softc(dev); int i, error = IIC_NOERR; DPRINTF("%s: nmsgs=%d\n", __func__, nmsgs); I2C_SC_LOCK(sc); for (i = 0; i < nmsgs; i++) { DPRINTF("%s: msg[%d]: hold=%d slv=0x%x flags=0x%x len=%d\n", __func__, i, sc->hold, msgs[i].slave, msgs[i].flags, msgs[i].len); if (!sc->hold && (msgs[i].flags & IIC_M_NOSTART)) return (IIC_ENOTSUPP); if (msgs[i].flags & IIC_M_RD) { error = cdnc_i2c_xfer_rd(sc, &msgs[i]); if (error != IIC_NOERR) break; } else { error = cdnc_i2c_xfer_wr(sc, &msgs[i]); if (error != IIC_NOERR) break; } } I2C_SC_UNLOCK(sc); return (error); } static void cdnc_i2c_add_sysctls(device_t dev) { struct cdnc_i2c_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "i2c_clk_real_freq", CTLFLAG_RD, &sc->i2c_clk_real_freq, 0, "i2c clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_timeouts", CTLFLAG_RD, &sc->timeout_ints, 0, "hardware timeout interrupts"); } static int cdnc_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Cadence I2C Controller"); return (BUS_PROBE_DEFAULT); } static int cdnc_i2c_detach(device_t); static int cdnc_i2c_attach(device_t dev) { struct cdnc_i2c_softc *sc; int rid, err; phandle_t node; pcell_t cell; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; I2C_SC_LOCK_INIT(sc); /* Get ref-clock and i2c-clock properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock_freq = fdt32_to_cpu(cell); else if (clk_get_by_ofw_index(dev, node, 0, &sc->ref_clk) == 0) { if ((err = clk_enable(sc->ref_clk)) != 0) device_printf(dev, "Cannot enable clock. err=%d\n", err); else if ((err = clk_get_freq(sc->ref_clk, &freq)) != 0) device_printf(dev, "Cannot get clock frequency. err=%d\n", err); else sc->ref_clock_freq = freq; } else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "clock-frequency", &cell, sizeof(cell)) > 0) sc->i2c_clock_freq = fdt32_to_cpu(cell); else sc->i2c_clock_freq = CDNC_I2C_DEFAULT_I2C_CLOCK; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); cdnc_i2c_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); cdnc_i2c_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, cdnc_i2c_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); cdnc_i2c_detach(dev); return (err); } /* Configure the device. */ err = cdnc_i2c_init_hw(sc); if (err) { cdnc_i2c_detach(dev); return (err); } sc->iicbus = device_add_child(dev, "iicbus", -1); cdnc_i2c_add_sysctls(dev); /* Probe and attach iicbus when interrupts work. */ bus_delayed_attach_children(dev); return (0); } static int cdnc_i2c_detach(device_t dev) { struct cdnc_i2c_softc *sc = device_get_softc(dev); - if (device_is_attached(dev)) - bus_generic_detach(dev); + bus_generic_detach(dev); if (sc->ref_clk != NULL) { clk_release(sc->ref_clk); sc->ref_clk = NULL; } - /* Delete iic bus. */ - if (sc->iicbus) - device_delete_child(dev, sc->iicbus); - /* Disable hardware. */ if (sc->mem_res != NULL) { sc->cfg_reg_shadow = 0; WR2(sc, CDNC_I2C_CR, CDNC_I2C_CR_CLR_FIFO); /* Clear and disable all interrupts. */ WR2(sc, CDNC_I2C_ISR, CDNC_I2C_ISR_ALL); WR2(sc, CDNC_I2C_IDR, CDNC_I2C_ISR_ALL); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sc->irq_res = NULL; } /* Release memory resource. */ if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; } I2C_SC_LOCK_DESTROY(sc); return (0); } static phandle_t cdnc_i2c_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static device_method_t cdnc_i2c_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cdnc_i2c_probe), DEVMETHOD(device_attach, cdnc_i2c_attach), DEVMETHOD(device_detach, cdnc_i2c_detach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, cdnc_i2c_get_node), /* iicbus methods */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, cdnc_i2c_reset), DEVMETHOD(iicbus_transfer, cdnc_i2c_transfer), DEVMETHOD_END }; static driver_t cdnc_i2c_driver = { "cdnc_i2c", cdnc_i2c_methods, sizeof(struct cdnc_i2c_softc), }; DRIVER_MODULE(cdnc_i2c, simplebus, cdnc_i2c_driver, NULL, NULL); DRIVER_MODULE(ofw_iicbus, cdnc_i2c, ofw_iicbus_driver, NULL, NULL); MODULE_DEPEND(cdnc_i2c, iicbus, 1, 1, 1); MODULE_DEPEND(cdnc_i2c, ofw_iicbus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/iicbus/controller/rockchip/rk_i2c.c b/sys/dev/iicbus/controller/rockchip/rk_i2c.c index 0ef65d1121f5..9317adbcfd98 100644 --- a/sys/dev/iicbus/controller/rockchip/rk_i2c.c +++ b/sys/dev/iicbus/controller/rockchip/rk_i2c.c @@ -1,741 +1,737 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define RK_I2C_CON 0x00 #define RK_I2C_CON_EN (1 << 0) #define RK_I2C_CON_MODE_SHIFT 1 #define RK_I2C_CON_MODE_TX 0 #define RK_I2C_CON_MODE_RRX 1 #define RK_I2C_CON_MODE_RX 2 #define RK_I2C_CON_MODE_RTX 3 #define RK_I2C_CON_MODE_MASK 0x6 #define RK_I2C_CON_START (1 << 3) #define RK_I2C_CON_STOP (1 << 4) #define RK_I2C_CON_LASTACK (1 << 5) #define RK_I2C_CON_NAKSTOP (1 << 6) #define RK_I2C_CON_CTRL_MASK 0xFF #define RK_I2C_CLKDIV 0x04 #define RK_I2C_CLKDIVL_MASK 0xFFFF #define RK_I2C_CLKDIVL_SHIFT 0 #define RK_I2C_CLKDIVH_MASK 0xFFFF0000 #define RK_I2C_CLKDIVH_SHIFT 16 #define RK_I2C_CLKDIV_MUL 8 #define RK_I2C_MRXADDR 0x08 #define RK_I2C_MRXADDR_SADDR_MASK 0xFFFFFF #define RK_I2C_MRXADDR_VALID(x) (1 << (24 + x)) #define RK_I2C_MRXRADDR 0x0C #define RK_I2C_MRXRADDR_SRADDR_MASK 0xFFFFFF #define RK_I2C_MRXRADDR_VALID(x) (1 << (24 + x)) #define RK_I2C_MTXCNT 0x10 #define RK_I2C_MTXCNT_MASK 0x3F #define RK_I2C_MRXCNT 0x14 #define RK_I2C_MRXCNT_MASK 0x3F #define RK_I2C_IEN 0x18 #define RK_I2C_IEN_BTFIEN (1 << 0) #define RK_I2C_IEN_BRFIEN (1 << 1) #define RK_I2C_IEN_MBTFIEN (1 << 2) #define RK_I2C_IEN_MBRFIEN (1 << 3) #define RK_I2C_IEN_STARTIEN (1 << 4) #define RK_I2C_IEN_STOPIEN (1 << 5) #define RK_I2C_IEN_NAKRCVIEN (1 << 6) #define RK_I2C_IEN_ALL (RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_MBRFIEN | \ RK_I2C_IEN_STARTIEN | RK_I2C_IEN_STOPIEN | RK_I2C_IEN_NAKRCVIEN) #define RK_I2C_IPD 0x1C #define RK_I2C_IPD_BTFIPD (1 << 0) #define RK_I2C_IPD_BRFIPD (1 << 1) #define RK_I2C_IPD_MBTFIPD (1 << 2) #define RK_I2C_IPD_MBRFIPD (1 << 3) #define RK_I2C_IPD_STARTIPD (1 << 4) #define RK_I2C_IPD_STOPIPD (1 << 5) #define RK_I2C_IPD_NAKRCVIPD (1 << 6) #define RK_I2C_IPD_ALL (RK_I2C_IPD_MBTFIPD | RK_I2C_IPD_MBRFIPD | \ RK_I2C_IPD_STARTIPD | RK_I2C_IPD_STOPIPD | RK_I2C_IPD_NAKRCVIPD) #define RK_I2C_FNCT 0x20 #define RK_I2C_FNCT_MASK 0x3F #define RK_I2C_TXDATA_BASE 0x100 #define RK_I2C_RXDATA_BASE 0x200 /* 8 data registers, 4 bytes each. */ #define RK_I2C_MAX_RXTX_LEN 32 enum rk_i2c_state { STATE_IDLE = 0, STATE_START, STATE_READ, STATE_WRITE, STATE_STOP }; struct rk_i2c_softc { device_t dev; struct resource *res[2]; struct mtx mtx; clk_t sclk; clk_t pclk; int busy; void * intrhand; uint32_t intr; uint32_t ipd; struct iic_msg *msg; size_t cnt; bool transfer_done; bool nak_recv; bool tx_slave_addr; uint8_t mode; uint8_t state; device_t iicbus; }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-i2c", 1}, {"rockchip,rk3328-i2c", 1}, {"rockchip,rk3399-i2c", 1}, {NULL, 0} }; static struct resource_spec rk_i2c_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static int rk_i2c_probe(device_t dev); static int rk_i2c_attach(device_t dev); static int rk_i2c_detach(device_t dev); #define RK_I2C_LOCK(sc) mtx_lock(&(sc)->mtx) #define RK_I2C_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RK_I2C_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define RK_I2C_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define RK_I2C_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static uint32_t rk_i2c_get_clkdiv(struct rk_i2c_softc *sc, uint32_t speed) { uint64_t sclk_freq; uint32_t clkdiv; int err; err = clk_get_freq(sc->sclk, &sclk_freq); if (err != 0) return (err); clkdiv = (sclk_freq / speed / RK_I2C_CLKDIV_MUL / 2) - 1; clkdiv &= RK_I2C_CLKDIVL_MASK; clkdiv = clkdiv << RK_I2C_CLKDIVH_SHIFT | clkdiv; return (clkdiv); } static int rk_i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct rk_i2c_softc *sc; uint32_t clkdiv; u_int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); clkdiv = rk_i2c_get_clkdiv(sc, busfreq); RK_I2C_LOCK(sc); /* Set the clock divider */ RK_I2C_WRITE(sc, RK_I2C_CLKDIV, clkdiv); /* Disable the module */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_UNLOCK(sc); return (0); } static uint8_t rk_i2c_fill_tx(struct rk_i2c_softc *sc) { uint32_t buf32; uint8_t buf; int i, j, len; len = sc->msg->len - sc->cnt; if (sc->tx_slave_addr) { KASSERT(sc->cnt == 0, ("tx_slave_addr in the middle of data")); len++; } if (len > RK_I2C_MAX_RXTX_LEN) len = RK_I2C_MAX_RXTX_LEN; for (i = 0; i < len; ) { buf32 = 0; /* Process next 4 bytes or whatever remains. */ for (j = 0; j < MIN(len - i, 4); j++) { /* Fill the addr if needed */ if (sc->tx_slave_addr) { buf = sc->msg->slave; sc->tx_slave_addr = false; } else { KASSERT(sc->cnt < sc->msg->len, ("%s: data buffer overrun", __func__)); buf = sc->msg->buf[sc->cnt]; sc->cnt++; } buf32 |= (uint32_t)buf << (j * 8); } KASSERT(i % 4 == 0, ("%s: misaligned write offset", __func__)); RK_I2C_WRITE(sc, RK_I2C_TXDATA_BASE + i, buf32); i += j; } return (len); } static void rk_i2c_drain_rx(struct rk_i2c_softc *sc) { uint32_t buf32 = 0; uint8_t buf8; int len; int i; if (sc->msg == NULL) { device_printf(sc->dev, "No current iic msg\n"); return; } len = sc->msg->len - sc->cnt; if (len > RK_I2C_MAX_RXTX_LEN) len = RK_I2C_MAX_RXTX_LEN; for (i = 0; i < len; i++) { if (i % 4 == 0) buf32 = RK_I2C_READ(sc, RK_I2C_RXDATA_BASE + i); buf8 = (buf32 >> ((i % 4) * 8)) & 0xFF; sc->msg->buf[sc->cnt++] = buf8; } } static void rk_i2c_send_stop(struct rk_i2c_softc *sc) { uint32_t reg; if (!(sc->msg->flags & IIC_M_NOSTOP)) { RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STOPIEN); sc->state = STATE_STOP; reg = RK_I2C_READ(sc, RK_I2C_CON); reg |= RK_I2C_CON_STOP; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } else { /* * Do not actually set stop bit, set up conditions to * emulate repeated start by clearing all state. */ sc->state = STATE_IDLE; sc->transfer_done = 1; reg = RK_I2C_READ(sc, RK_I2C_CON); reg &= ~RK_I2C_CON_CTRL_MASK; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } } static void rk_i2c_intr_locked(struct rk_i2c_softc *sc) { uint32_t reg; int transfer_len; sc->ipd = RK_I2C_READ(sc, RK_I2C_IPD); /* Something to handle? */ if ((sc->ipd & RK_I2C_IPD_ALL) == 0) return; RK_I2C_WRITE(sc, RK_I2C_IPD, sc->ipd); sc->ipd &= RK_I2C_IPD_ALL; if (sc->ipd & RK_I2C_IPD_NAKRCVIPD) { /* NACK received */ sc->ipd &= ~RK_I2C_IPD_NAKRCVIPD; sc->nak_recv = true; /* XXXX last byte !!!, signal error !!! */ sc->transfer_done = true; sc->state = STATE_IDLE; goto err; } switch (sc->state) { case STATE_START: /* Disable start bit */ reg = RK_I2C_READ(sc, RK_I2C_CON); reg &= ~RK_I2C_CON_START; RK_I2C_WRITE(sc, RK_I2C_CON, reg); if (sc->mode == RK_I2C_CON_MODE_RRX || sc->mode == RK_I2C_CON_MODE_RX) { sc->state = STATE_READ; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN | RK_I2C_IEN_NAKRCVIEN); if ((sc->msg->len - sc->cnt) > 32) transfer_len = 32; else { transfer_len = sc->msg->len - sc->cnt; reg = RK_I2C_READ(sc, RK_I2C_CON); reg |= RK_I2C_CON_LASTACK; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } RK_I2C_WRITE(sc, RK_I2C_MRXCNT, transfer_len); } else { sc->state = STATE_WRITE; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); transfer_len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, transfer_len); } break; case STATE_READ: rk_i2c_drain_rx(sc); if (sc->cnt == sc->msg->len) { rk_i2c_send_stop(sc); } else { sc->mode = RK_I2C_CON_MODE_RX; reg = RK_I2C_READ(sc, RK_I2C_CON) & \ ~RK_I2C_CON_CTRL_MASK; reg |= sc->mode << RK_I2C_CON_MODE_SHIFT; reg |= RK_I2C_CON_EN; if ((sc->msg->len - sc->cnt) > 32) transfer_len = 32; else { transfer_len = sc->msg->len - sc->cnt; reg |= RK_I2C_CON_LASTACK; } RK_I2C_WRITE(sc, RK_I2C_CON, reg); RK_I2C_WRITE(sc, RK_I2C_MRXCNT, transfer_len); } break; case STATE_WRITE: if (sc->cnt < sc->msg->len) { /* Keep writing. */ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); transfer_len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, transfer_len); } else { rk_i2c_send_stop(sc); } break; case STATE_STOP: /* Disable stop bit */ reg = RK_I2C_READ(sc, RK_I2C_CON); reg &= ~RK_I2C_CON_STOP; RK_I2C_WRITE(sc, RK_I2C_CON, reg); sc->transfer_done = 1; sc->state = STATE_IDLE; break; case STATE_IDLE: break; } err: wakeup(sc); } static void rk_i2c_intr(void *arg) { struct rk_i2c_softc *sc; sc = (struct rk_i2c_softc *)arg; RK_I2C_LOCK(sc); rk_i2c_intr_locked(sc); RK_I2C_UNLOCK(sc); } static void rk_i2c_start_xfer(struct rk_i2c_softc *sc, struct iic_msg *msg, boolean_t last) { uint32_t reg; uint8_t len; sc->transfer_done = false; sc->nak_recv = false; sc->tx_slave_addr = false; sc->cnt = 0; sc->state = STATE_IDLE; sc->msg = msg; reg = RK_I2C_READ(sc, RK_I2C_CON) & ~RK_I2C_CON_CTRL_MASK; if (!(sc->msg->flags & IIC_M_NOSTART)) { /* Stadard message */ if (sc->mode == RK_I2C_CON_MODE_TX) { sc->tx_slave_addr = true; } sc->state = STATE_START; reg |= RK_I2C_CON_START; RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STARTIEN); } else { /* Continuation message */ if (sc->mode == RK_I2C_CON_MODE_RX) { sc->state = STATE_READ; if (last) reg |= RK_I2C_CON_LASTACK; RK_I2C_WRITE(sc, RK_I2C_MRXCNT, sc->msg->len); RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBRFIEN | RK_I2C_IEN_NAKRCVIEN); } else { sc->state = STATE_WRITE; len = rk_i2c_fill_tx(sc); RK_I2C_WRITE(sc, RK_I2C_MTXCNT, len); RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_MBTFIEN | RK_I2C_IEN_NAKRCVIEN); } } reg |= RK_I2C_CON_NAKSTOP; reg |= sc->mode << RK_I2C_CON_MODE_SHIFT; reg |= RK_I2C_CON_EN; RK_I2C_WRITE(sc, RK_I2C_CON, reg); } static int rk_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct rk_i2c_softc *sc; uint32_t reg; bool last_msg; int i, j, timeout, err; sc = device_get_softc(dev); RK_I2C_LOCK(sc); while (sc->busy) mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", 0); sc->busy = 1; /* Disable the module and interrupts */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_WRITE(sc, RK_I2C_IEN, 0); /* Clean stale interrupts */ RK_I2C_WRITE(sc, RK_I2C_IPD, RK_I2C_IPD_ALL); err = 0; for (i = 0; i < nmsgs; i++) { /* Validate parameters. */ if (msgs == NULL || msgs[i].buf == NULL || msgs[i].len == 0) { err = IIC_ENOTSUPP; break; } /* * If next message have NOSTART flag, then they both * should be same type (read/write) and same address. */ if (i < nmsgs - 1) { if ((msgs[i + 1].flags & IIC_M_NOSTART) && ((msgs[i].flags & IIC_M_RD) != (msgs[i + 1].flags & IIC_M_RD) || (msgs[i].slave != msgs[i + 1].slave))) { err = IIC_ENOTSUPP; break; } } /* * Detect simple register read case. * The first message should be IIC_M_WR | IIC_M_NOSTOP, * next pure IIC_M_RD (no other flags allowed). Both * messages should have same slave address. */ if (nmsgs - i >= 2 && msgs[i].len < 4 && msgs[i].flags == (IIC_M_WR | IIC_M_NOSTOP) && (msgs[i + 1].flags & IIC_M_RD) == IIC_M_RD && (msgs[i].slave & ~LSB) == (msgs[i + 1].slave & ~LSB)) { sc->mode = RK_I2C_CON_MODE_RRX; /* Write slave address */ reg = msgs[i].slave & ~LSB; reg |= RK_I2C_MRXADDR_VALID(0); RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg); /* Write slave register address */ reg = 0; for (j = 0; j < msgs[i].len ; j++) { reg |= (uint32_t)msgs[i].buf[j] << (j * 8); reg |= RK_I2C_MRXADDR_VALID(j); } RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, reg); i++; } else { if (msgs[i].flags & IIC_M_RD) { if (msgs[i].flags & IIC_M_NOSTART) { sc->mode = RK_I2C_CON_MODE_RX; } else { sc->mode = RK_I2C_CON_MODE_RRX; reg = msgs[i].slave & ~LSB; reg |= RK_I2C_MRXADDR_VALID(0); RK_I2C_WRITE(sc, RK_I2C_MRXADDR, reg); RK_I2C_WRITE(sc, RK_I2C_MRXRADDR, 0); } } else { sc->mode = RK_I2C_CON_MODE_TX; } } /* last message ? */ last_msg = (i >= nmsgs - 1) || !(msgs[i + 1].flags & IIC_M_NOSTART); rk_i2c_start_xfer(sc, msgs + i, last_msg); if (cold) { for(timeout = 10000; timeout > 0; timeout--) { rk_i2c_intr_locked(sc); if (sc->transfer_done) break; DELAY(1000); } if (timeout <= 0) err = IIC_ETIMEOUT; } else { while (err == 0 && !sc->transfer_done) { err = msleep(sc, &sc->mtx, PZERO, "rk_i2c", 10 * hz); } } } /* Disable the module and interrupts */ RK_I2C_WRITE(sc, RK_I2C_CON, 0); RK_I2C_WRITE(sc, RK_I2C_IEN, 0); sc->busy = 0; if (sc->nak_recv) err = IIC_ENOACK; RK_I2C_UNLOCK(sc); return (err); } static int rk_i2c_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip I2C"); return (BUS_PROBE_DEFAULT); } static int rk_i2c_attach(device_t dev) { struct rk_i2c_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), "rk_i2c", MTX_DEF); if (bus_alloc_resources(dev, rk_i2c_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_i2c_intr, sc, &sc->intrhand)) { bus_release_resources(dev, rk_i2c_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } clk_set_assigned(dev, ofw_bus_get_node(dev)); /* Activate the module clocks. */ error = clk_get_by_ofw_name(dev, 0, "i2c", &sc->sclk); if (error != 0) { device_printf(dev, "cannot get i2c clock\n"); goto fail; } error = clk_enable(sc->sclk); if (error != 0) { device_printf(dev, "cannot enable i2c clock\n"); goto fail; } /* pclk clock is optional. */ error = clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk); if (error != 0 && error != ENOENT) { device_printf(dev, "cannot get pclk clock\n"); goto fail; } if (sc->pclk != NULL) { error = clk_enable(sc->pclk); if (error != 0) { device_printf(dev, "cannot enable pclk clock\n"); goto fail; } } sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(dev, "cannot add iicbus child device\n"); error = ENXIO; goto fail; } bus_attach_children(dev); return (0); fail: if (rk_i2c_detach(dev) != 0) device_printf(dev, "Failed to detach\n"); return (error); } static int rk_i2c_detach(device_t dev) { struct rk_i2c_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev)) != 0) return (error); - if (sc->iicbus != NULL) - if ((error = device_delete_child(dev, sc->iicbus)) != 0) - return (error); - if (sc->sclk != NULL) clk_release(sc->sclk); if (sc->pclk != NULL) clk_release(sc->pclk); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, rk_i2c_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static phandle_t rk_i2c_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static device_method_t rk_i2c_methods[] = { DEVMETHOD(device_probe, rk_i2c_probe), DEVMETHOD(device_attach, rk_i2c_attach), DEVMETHOD(device_detach, rk_i2c_detach), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, rk_i2c_get_node), DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_reset, rk_i2c_reset), DEVMETHOD(iicbus_transfer, rk_i2c_transfer), DEVMETHOD_END }; static driver_t rk_i2c_driver = { "rk_i2c", rk_i2c_methods, sizeof(struct rk_i2c_softc), }; EARLY_DRIVER_MODULE(rk_i2c, simplebus, rk_i2c_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); EARLY_DRIVER_MODULE(ofw_iicbus, rk_i2c, ofw_iicbus_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); MODULE_DEPEND(rk_i2c, iicbus, 1, 1, 1); MODULE_VERSION(rk_i2c, 1); diff --git a/sys/dev/iicbus/controller/twsi/twsi.c b/sys/dev/iicbus/controller/twsi/twsi.c index 20d39aa7d93b..b5aefbae7d7e 100644 --- a/sys/dev/iicbus/controller/twsi/twsi.c +++ b/sys/dev/iicbus/controller/twsi/twsi.c @@ -1,879 +1,875 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for the TWSI (aka I2C, aka IIC) bus controller found on Marvell * and Allwinner SoCs. Supports master operation only. * * Calls to DELAY() are needed per Application Note AN-179 "TWSI Software * Guidelines for Discovery(TM), Horizon (TM) and Feroceon(TM) Devices". */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define TWSI_CONTROL_ACK (1 << 2) #define TWSI_CONTROL_IFLG (1 << 3) #define TWSI_CONTROL_STOP (1 << 4) #define TWSI_CONTROL_START (1 << 5) #define TWSI_CONTROL_TWSIEN (1 << 6) #define TWSI_CONTROL_INTEN (1 << 7) #define TWSI_STATUS_BUS_ERROR 0x00 #define TWSI_STATUS_START 0x08 #define TWSI_STATUS_RPTD_START 0x10 #define TWSI_STATUS_ADDR_W_ACK 0x18 #define TWSI_STATUS_ADDR_W_NACK 0x20 #define TWSI_STATUS_DATA_WR_ACK 0x28 #define TWSI_STATUS_DATA_WR_NACK 0x30 #define TWSI_STATUS_ARBITRATION_LOST 0x38 #define TWSI_STATUS_ADDR_R_ACK 0x40 #define TWSI_STATUS_ADDR_R_NACK 0x48 #define TWSI_STATUS_DATA_RD_ACK 0x50 #define TWSI_STATUS_DATA_RD_NOACK 0x58 #define TWSI_STATUS_IDLE 0xf8 #define TWSI_DEBUG #undef TWSI_DEBUG #define debugf(sc, fmt, args...) if ((sc)->debug) \ device_printf((sc)->dev, "%s: " fmt, __func__, ##args) static struct resource_spec res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, { -1, 0 } }; static __inline uint32_t TWSI_READ(struct twsi_softc *sc, bus_size_t off) { uint32_t val; val = bus_read_4(sc->res[0], off); if (sc->debug > 1) debugf(sc, "read %x from %lx\n", val, off); return (val); } static __inline void TWSI_WRITE(struct twsi_softc *sc, bus_size_t off, uint32_t val) { if (sc->debug > 1) debugf(sc, "Writing %x to %lx\n", val, off); bus_write_4(sc->res[0], off, val); } static __inline void twsi_control_clear(struct twsi_softc *sc, uint32_t mask) { uint32_t val; val = TWSI_READ(sc, sc->reg_control); debugf(sc, "read val=%x\n", val); val &= ~(TWSI_CONTROL_STOP | TWSI_CONTROL_START); val &= ~mask; debugf(sc, "write val=%x\n", val); TWSI_WRITE(sc, sc->reg_control, val); } static __inline void twsi_control_set(struct twsi_softc *sc, uint32_t mask) { uint32_t val; val = TWSI_READ(sc, sc->reg_control); debugf(sc, "read val=%x\n", val); val &= ~(TWSI_CONTROL_STOP | TWSI_CONTROL_START); val |= mask; debugf(sc, "write val=%x\n", val); TWSI_WRITE(sc, sc->reg_control, val); } static __inline void twsi_clear_iflg(struct twsi_softc *sc) { DELAY(1000); /* There are two ways of clearing IFLAG. */ if (sc->iflag_w1c) twsi_control_set(sc, TWSI_CONTROL_IFLG); else twsi_control_clear(sc, TWSI_CONTROL_IFLG); DELAY(1000); } /* * timeout given in us * returns * 0 on successful mask change * non-zero on timeout */ static int twsi_poll_ctrl(struct twsi_softc *sc, int timeout, uint32_t mask) { timeout /= 10; debugf(sc, "Waiting for ctrl reg to match mask %x\n", mask); while (!(TWSI_READ(sc, sc->reg_control) & mask)) { DELAY(10); if (--timeout < 0) return (timeout); } debugf(sc, "done\n"); return (0); } /* * 'timeout' is given in us. Note also that timeout handling is not exact -- * twsi_locked_start() total wait can be more than 2 x timeout * (twsi_poll_ctrl() is called twice). 'mask' can be either TWSI_STATUS_START * or TWSI_STATUS_RPTD_START */ static int twsi_locked_start(device_t dev, struct twsi_softc *sc, int32_t mask, u_char slave, int timeout) { int read_access, iflg_set = 0; uint32_t status; mtx_assert(&sc->mutex, MA_OWNED); if (mask == TWSI_STATUS_RPTD_START) /* read IFLG to know if it should be cleared later; from NBSD */ iflg_set = TWSI_READ(sc, sc->reg_control) & TWSI_CONTROL_IFLG; debugf(sc, "send start\n"); twsi_control_set(sc, TWSI_CONTROL_START); if (mask == TWSI_STATUS_RPTD_START && iflg_set) { debugf(sc, "IFLG set, clearing (mask=%x)\n", mask); twsi_clear_iflg(sc); } /* * Without this delay we timeout checking IFLG if the timeout is 0. * NBSD driver always waits here too. */ DELAY(1000); if (twsi_poll_ctrl(sc, timeout, TWSI_CONTROL_IFLG)) { debugf(sc, "timeout sending %sSTART condition\n", mask == TWSI_STATUS_START ? "" : "repeated "); return (IIC_ETIMEOUT); } status = TWSI_READ(sc, sc->reg_status); debugf(sc, "status=%x\n", status); if (status != mask) { debugf(sc, "wrong status (%02x) after sending %sSTART condition\n", status, mask == TWSI_STATUS_START ? "" : "repeated "); return (IIC_ESTATUS); } TWSI_WRITE(sc, sc->reg_data, slave); twsi_clear_iflg(sc); DELAY(1000); if (twsi_poll_ctrl(sc, timeout, TWSI_CONTROL_IFLG)) { debugf(sc, "timeout sending slave address (timeout=%d)\n", timeout); return (IIC_ETIMEOUT); } read_access = (slave & 0x1) ? 1 : 0; status = TWSI_READ(sc, sc->reg_status); if (status != (read_access ? TWSI_STATUS_ADDR_R_ACK : TWSI_STATUS_ADDR_W_ACK)) { debugf(sc, "no ACK (status: %02x) after sending slave address\n", status); return (IIC_ENOACK); } return (IIC_NOERR); } #define TWSI_BAUD_RATE_RAW(C,M,N) ((C)/((10*(M+1))<<(N))) #define ABSSUB(a,b) (((a) > (b)) ? (a) - (b) : (b) - (a)) static int twsi_calc_baud_rate(struct twsi_softc *sc, const u_int target, int *param) { uint64_t clk; uint32_t cur, diff, diff0; int m, n, m0, n0; /* Calculate baud rate. */ diff0 = 0xffffffff; if (clk_get_freq(sc->clk_core, &clk) < 0) return (-1); debugf(sc, "Bus clock is at %ju\n", clk); for (n = 0; n < 8; n++) { for (m = 0; m < 16; m++) { cur = TWSI_BAUD_RATE_RAW(clk,m,n); diff = ABSSUB(target, cur); if (diff < diff0) { m0 = m; n0 = n; diff0 = diff; } } } *param = TWSI_BAUD_RATE_PARAM(m0, n0); return (0); } /* * Only slave mode supported, disregard [old]addr */ static int twsi_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { struct twsi_softc *sc; uint32_t param; u_int busfreq; sc = device_get_softc(dev); busfreq = IICBUS_GET_FREQUENCY(sc->iicbus, speed); if (twsi_calc_baud_rate(sc, busfreq, ¶m) == -1) { switch (speed) { case IIC_SLOW: case IIC_FAST: param = sc->baud_rate[speed].param; debugf(sc, "Using IIC_FAST mode with speed param=%x\n", param); break; case IIC_FASTEST: case IIC_UNKNOWN: default: param = sc->baud_rate[IIC_FAST].param; debugf(sc, "Using IIC_FASTEST/UNKNOWN mode with speed param=%x\n", param); break; } } debugf(sc, "Using clock param=%x\n", param); mtx_lock(&sc->mutex); TWSI_WRITE(sc, sc->reg_soft_reset, 0x1); TWSI_WRITE(sc, sc->reg_baud_rate, param); TWSI_WRITE(sc, sc->reg_control, TWSI_CONTROL_TWSIEN); DELAY(1000); mtx_unlock(&sc->mutex); return (0); } static int twsi_stop(device_t dev) { struct twsi_softc *sc; sc = device_get_softc(dev); debugf(sc, "%s\n", __func__); mtx_lock(&sc->mutex); twsi_control_clear(sc, TWSI_CONTROL_ACK); twsi_control_set(sc, TWSI_CONTROL_STOP); twsi_clear_iflg(sc); DELAY(1000); mtx_unlock(&sc->mutex); return (IIC_NOERR); } /* * timeout is given in us */ static int twsi_repeated_start(device_t dev, u_char slave, int timeout) { struct twsi_softc *sc; int rv; sc = device_get_softc(dev); debugf(sc, "%s: slave=%x\n", __func__, slave); mtx_lock(&sc->mutex); rv = twsi_locked_start(dev, sc, TWSI_STATUS_RPTD_START, slave, timeout); mtx_unlock(&sc->mutex); if (rv) { twsi_stop(dev); return (rv); } else return (IIC_NOERR); } /* * timeout is given in us */ static int twsi_start(device_t dev, u_char slave, int timeout) { struct twsi_softc *sc; int rv; sc = device_get_softc(dev); debugf(sc, "%s: slave=%x\n", __func__, slave); mtx_lock(&sc->mutex); rv = twsi_locked_start(dev, sc, TWSI_STATUS_START, slave, timeout); mtx_unlock(&sc->mutex); if (rv) { twsi_stop(dev); return (rv); } else return (IIC_NOERR); } static int twsi_read(device_t dev, char *buf, int len, int *read, int last, int delay) { struct twsi_softc *sc; uint32_t status; int last_byte, rv; sc = device_get_softc(dev); mtx_lock(&sc->mutex); *read = 0; while (*read < len) { /* * Check if we are reading last byte of the last buffer, * do not send ACK then, per I2C specs */ last_byte = ((*read == len - 1) && last) ? 1 : 0; if (last_byte) twsi_control_clear(sc, TWSI_CONTROL_ACK); else twsi_control_set(sc, TWSI_CONTROL_ACK); twsi_clear_iflg(sc); DELAY(1000); if (twsi_poll_ctrl(sc, delay, TWSI_CONTROL_IFLG)) { debugf(sc, "timeout reading data (delay=%d)\n", delay); rv = IIC_ETIMEOUT; goto out; } status = TWSI_READ(sc, sc->reg_status); if (status != (last_byte ? TWSI_STATUS_DATA_RD_NOACK : TWSI_STATUS_DATA_RD_ACK)) { debugf(sc, "wrong status (%02x) while reading\n", status); rv = IIC_ESTATUS; goto out; } *buf++ = TWSI_READ(sc, sc->reg_data); (*read)++; } rv = IIC_NOERR; out: mtx_unlock(&sc->mutex); return (rv); } static int twsi_write(device_t dev, const char *buf, int len, int *sent, int timeout) { struct twsi_softc *sc; uint32_t status; int rv; sc = device_get_softc(dev); mtx_lock(&sc->mutex); *sent = 0; while (*sent < len) { TWSI_WRITE(sc, sc->reg_data, *buf++); twsi_clear_iflg(sc); DELAY(1000); if (twsi_poll_ctrl(sc, timeout, TWSI_CONTROL_IFLG)) { debugf(sc, "timeout writing data (timeout=%d)\n", timeout); rv = IIC_ETIMEOUT; goto out; } status = TWSI_READ(sc, sc->reg_status); if (status != TWSI_STATUS_DATA_WR_ACK) { debugf(sc, "wrong status (%02x) while writing\n", status); rv = IIC_ESTATUS; goto out; } (*sent)++; } rv = IIC_NOERR; out: mtx_unlock(&sc->mutex); return (rv); } static void twsi_error(struct twsi_softc *sc, int err) { /* * Must send stop condition to abort the current transfer. */ debugf(sc, "Sending STOP condition for error %d\n", err); sc->transfer = 0; sc->error = err; sc->control_val = 0; TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); } static int twsi_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct twsi_softc *sc; uint32_t status; int error; sc = device_get_softc(dev); if (!sc->have_intr) return (iicbus_transfer_gen(dev, msgs, nmsgs)); mtx_lock(&sc->mutex); KASSERT(sc->transfer == 0, ("starting a transfer while another is active")); debugf(sc, "transmitting %d messages\n", nmsgs); status = TWSI_READ(sc, sc->reg_status); debugf(sc, "status=0x%x\n", status); if (status != TWSI_STATUS_IDLE) { debugf(sc, "Bad status at start of transfer\n"); twsi_error(sc, IIC_ESTATUS); goto end; } sc->nmsgs = nmsgs; sc->msgs = msgs; sc->msg_idx = 0; sc->transfer = 1; sc->error = 0; #ifdef TWSI_DEBUG for (int i = 0; i < nmsgs; i++) debugf(sc, "msg %d is %d bytes long\n", i, msgs[i].len); #endif /* Send start and re-enable interrupts */ sc->control_val = TWSI_CONTROL_TWSIEN | TWSI_CONTROL_INTEN; TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_START); msleep_sbt(sc, &sc->mutex, 0, "twsi", 3000 * SBT_1MS, SBT_1MS, 0); debugf(sc, "pause finish\n"); if (sc->error == 0 && sc->transfer != 0) { device_printf(sc->dev, "transfer timeout\n"); sc->error = IIC_ETIMEOUT; sc->transfer = 0; } if (sc->error != 0) debugf(sc, "Error: %d\n", sc->error); end: /* Disable module and interrupts */ debugf(sc, "status=0x%x\n", TWSI_READ(sc, sc->reg_status)); TWSI_WRITE(sc, sc->reg_control, 0); debugf(sc, "status=0x%x\n", TWSI_READ(sc, sc->reg_status)); error = sc->error; mtx_unlock(&sc->mutex); return (error); } static void twsi_intr(void *arg) { struct twsi_softc *sc; uint32_t status; bool message_done; bool send_start; sc = arg; send_start = false; mtx_lock(&sc->mutex); debugf(sc, "Got interrupt, current msg=%u\n", sc->msg_idx); status = TWSI_READ(sc, sc->reg_status); debugf(sc, "reg control = 0x%x, status = 0x%x\n", TWSI_READ(sc, sc->reg_control), status); if (sc->transfer == 0) { device_printf(sc->dev, "interrupt without active transfer, " "status = 0x%x\n", status); TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); goto end; } restart: message_done = false; switch (status) { case TWSI_STATUS_START: case TWSI_STATUS_RPTD_START: /* Transmit the address */ debugf(sc, "Send address 0x%x\n", sc->msgs[sc->msg_idx].slave); if (sc->msgs[sc->msg_idx].flags & IIC_M_RD) TWSI_WRITE(sc, sc->reg_data, sc->msgs[sc->msg_idx].slave | LSB); else TWSI_WRITE(sc, sc->reg_data, sc->msgs[sc->msg_idx].slave & ~LSB); break; case TWSI_STATUS_ADDR_W_ACK: debugf(sc, "Address ACK-ed (write)\n"); if (sc->msgs[sc->msg_idx].len > 0) { /* Directly send the first byte */ sc->sent_bytes = 1; debugf(sc, "Sending byte 0 (of %d) = %x\n", sc->msgs[sc->msg_idx].len, sc->msgs[sc->msg_idx].buf[0]); TWSI_WRITE(sc, sc->reg_data, sc->msgs[sc->msg_idx].buf[0]); } else { debugf(sc, "Zero-length write, sending STOP\n"); TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); } break; case TWSI_STATUS_ADDR_R_ACK: debugf(sc, "Address ACK-ed (read)\n"); sc->recv_bytes = 0; if (sc->msgs[sc->msg_idx].len == 0) { debugf(sc, "Zero-length read, sending STOP\n"); TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); } else if (sc->msgs[sc->msg_idx].len == 1) { sc->control_val &= ~TWSI_CONTROL_ACK; } else { sc->control_val |= TWSI_CONTROL_ACK; } break; case TWSI_STATUS_ADDR_W_NACK: case TWSI_STATUS_ADDR_R_NACK: debugf(sc, "Address NACK-ed\n"); twsi_error(sc, IIC_ENOACK); break; case TWSI_STATUS_DATA_WR_NACK: debugf(sc, "Data byte NACK-ed\n"); twsi_error(sc, IIC_ENOACK); break; case TWSI_STATUS_DATA_WR_ACK: KASSERT(sc->sent_bytes <= sc->msgs[sc->msg_idx].len, ("sent_bytes beyond message length")); debugf(sc, "ACK received after transmitting data\n"); if (sc->sent_bytes == sc->msgs[sc->msg_idx].len) { debugf(sc, "Done TX data\n"); /* Send stop, no interrupts on stop */ if (!(sc->msgs[sc->msg_idx].flags & IIC_M_NOSTOP)) { TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); } else { debugf(sc, "NOSTOP flag\n"); } message_done = true; break; } debugf(sc, "Sending byte %d (of %d) = 0x%x\n", sc->sent_bytes, sc->msgs[sc->msg_idx].len, sc->msgs[sc->msg_idx].buf[sc->sent_bytes]); TWSI_WRITE(sc, sc->reg_data, sc->msgs[sc->msg_idx].buf[sc->sent_bytes]); sc->sent_bytes++; break; case TWSI_STATUS_DATA_RD_ACK: debugf(sc, "Received and ACK-ed data\n"); KASSERT(sc->recv_bytes < sc->msgs[sc->msg_idx].len, ("receiving beyond the end of buffer")); sc->msgs[sc->msg_idx].buf[sc->recv_bytes] = TWSI_READ(sc, sc->reg_data); debugf(sc, "Received byte %d (of %d) = 0x%x\n", sc->recv_bytes, sc->msgs[sc->msg_idx].len, sc->msgs[sc->msg_idx].buf[sc->recv_bytes]); sc->recv_bytes++; /* If we only have one byte left, disable ACK */ if (sc->msgs[sc->msg_idx].len - sc->recv_bytes == 1) { sc->control_val &= ~TWSI_CONTROL_ACK; } else if (sc->msgs[sc->msg_idx].len == sc->recv_bytes) { /* * We should not have ACK-ed the last byte. * The protocol state machine is in invalid state. */ debugf(sc, "RX all but asked for more?\n"); twsi_error(sc, IIC_ESTATUS); } break; case TWSI_STATUS_DATA_RD_NOACK: debugf(sc, "Received and NACK-ed data\n"); KASSERT(sc->recv_bytes == sc->msgs[sc->msg_idx].len - 1, ("sent NACK before receiving all requested data")); sc->msgs[sc->msg_idx].buf[sc->recv_bytes] = TWSI_READ(sc, sc->reg_data); debugf(sc, "Received byte %d (of %d) = 0x%x\n", sc->recv_bytes, sc->msgs[sc->msg_idx].len, sc->msgs[sc->msg_idx].buf[sc->recv_bytes]); sc->recv_bytes++; if (sc->msgs[sc->msg_idx].len == sc->recv_bytes) { debugf(sc, "Done RX data\n"); if (!(sc->msgs[sc->msg_idx].flags & IIC_M_NOSTOP)) { debugf(sc, "Send STOP\n"); TWSI_WRITE(sc, sc->reg_control, sc->control_val | TWSI_CONTROL_STOP); } message_done = true; } else { /* * We should not have NACK-ed yet. * The protocol state machine is in invalid state. */ debugf(sc, "NACK-ed before receving all bytes?\n"); twsi_error(sc, IIC_ESTATUS); } break; case TWSI_STATUS_BUS_ERROR: debugf(sc, "Bus error\n"); twsi_error(sc, IIC_EBUSERR); break; case TWSI_STATUS_ARBITRATION_LOST: debugf(sc, "Arbitration lost\n"); twsi_error(sc, IIC_EBUSBSY); break; default: debugf(sc, "unexpected status 0x%x\n", status); twsi_error(sc, IIC_ESTATUS); break; } if (message_done) { sc->msg_idx++; if (sc->msg_idx == sc->nmsgs) { debugf(sc, "All messages transmitted\n"); sc->transfer = 0; sc->error = 0; } else if ((sc->msgs[sc->msg_idx].flags & IIC_M_NOSTART) == 0) { debugf(sc, "Send (repeated) start\n"); send_start = true; } else { /* Just keep transmitting data. */ KASSERT((sc->msgs[sc->msg_idx - 1].flags & IIC_M_NOSTOP) != 0, ("NOSTART message after STOP")); KASSERT((sc->msgs[sc->msg_idx].flags & IIC_M_RD) == (sc->msgs[sc->msg_idx - 1].flags & IIC_M_RD), ("change of transfer direction without a START")); debugf(sc, "NOSTART message after NOSTOP\n"); sc->sent_bytes = 0; sc->recv_bytes = 0; if ((sc->msgs[sc->msg_idx].flags & IIC_M_RD) == 0) { status = TWSI_STATUS_ADDR_W_ACK; goto restart; } else { debugf(sc, "Read+NOSTART unsupported\n"); twsi_error(sc, IIC_ESTATUS); } } } end: /* * Newer Allwinner chips clear IFLG after writing 1 to it. */ debugf(sc, "Refresh reg_control\n"); TWSI_WRITE(sc, sc->reg_control, sc->control_val | (sc->iflag_w1c ? TWSI_CONTROL_IFLG : 0) | (send_start ? TWSI_CONTROL_START : 0)); debugf(sc, "Done with interrupt, transfer = %d\n", sc->transfer); if (sc->transfer == 0) wakeup(sc); mtx_unlock(&sc->mutex); } static void twsi_intr_start(void *pdev) { struct twsi_softc *sc; sc = device_get_softc(pdev); if ((bus_setup_intr(pdev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, twsi_intr, sc, &sc->intrhand))) device_printf(pdev, "unable to register interrupt handler\n"); sc->have_intr = true; } int twsi_attach(device_t dev) { struct twsi_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree_node; struct sysctl_oid_list *tree; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mutex, device_get_nameunit(dev), "twsi", MTX_DEF); if (bus_alloc_resources(dev, res_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); twsi_detach(dev); return (ENXIO); } #ifdef TWSI_DEBUG sc->debug = 1; #endif ctx = device_get_sysctl_ctx(dev); tree_node = device_get_sysctl_tree(dev); tree = SYSCTL_CHILDREN(tree_node); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->debug, 0, "Set debug level (zero to disable)"); /* Attach the iicbus. */ if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL) { device_printf(dev, "could not allocate iicbus instance\n"); twsi_detach(dev); return (ENXIO); } bus_attach_children(dev); config_intrhook_oneshot(twsi_intr_start, dev); return (0); } int twsi_detach(device_t dev) { struct twsi_softc *sc; int rv; sc = device_get_softc(dev); if ((rv = bus_generic_detach(dev)) != 0) return (rv); - if (sc->iicbus != NULL) - if ((rv = device_delete_child(dev, sc->iicbus)) != 0) - return (rv); - if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, res_spec, sc->res); mtx_destroy(&sc->mutex); return (0); } static device_method_t twsi_methods[] = { /* device interface */ DEVMETHOD(device_detach, twsi_detach), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, twsi_repeated_start), DEVMETHOD(iicbus_start, twsi_start), DEVMETHOD(iicbus_stop, twsi_stop), DEVMETHOD(iicbus_write, twsi_write), DEVMETHOD(iicbus_read, twsi_read), DEVMETHOD(iicbus_reset, twsi_reset), DEVMETHOD(iicbus_transfer, twsi_transfer), { 0, 0 } }; DEFINE_CLASS_0(twsi, twsi_driver, twsi_methods, sizeof(struct twsi_softc)); diff --git a/sys/dev/iicbus/controller/vybrid/vf_i2c.c b/sys/dev/iicbus/controller/vybrid/vf_i2c.c index f6d0eb4c2c59..4735a95cf5cd 100644 --- a/sys/dev/iicbus/controller/vybrid/vf_i2c.c +++ b/sys/dev/iicbus/controller/vybrid/vf_i2c.c @@ -1,583 +1,577 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2014 Ruslan Bukin * Copyright (c) 2024 Pierre-Luc Drouin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Vybrid Family Inter-Integrated Circuit (I2C) * Originally based on Chapter 48, Vybrid Reference Manual, Rev. 5, 07/2013 * Currently based on Chapter 21, LX2160A Reference Manual, Rev. 1, 10/2021 * * The current implementation is based on the original driver by Ruslan Bukin, * later modified by Dawid Górecki, and split into FDT and ACPI drivers by Val * Packett. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include #include #include #include #define I2C_IBAD 0x0 /* I2C Bus Address Register */ #define I2C_IBFD 0x1 /* I2C Bus Frequency Divider Register */ #define I2C_IBCR 0x2 /* I2C Bus Control Register */ #define IBCR_MDIS (1 << 7) /* Module disable. */ #define IBCR_IBIE (1 << 6) /* I-Bus Interrupt Enable. */ #define IBCR_MSSL (1 << 5) /* Master/Slave mode select. */ #define IBCR_TXRX (1 << 4) /* Transmit/Receive mode select. */ #define IBCR_NOACK (1 << 3) /* Data Acknowledge disable. */ #define IBCR_RSTA (1 << 2) /* Repeat Start. */ #define IBCR_DMAEN (1 << 1) /* DMA Enable. */ #define I2C_IBSR 0x3 /* I2C Bus Status Register */ #define IBSR_TCF (1 << 7) /* Transfer complete. */ #define IBSR_IAAS (1 << 6) /* Addressed as a slave. */ #define IBSR_IBB (1 << 5) /* Bus busy. */ #define IBSR_IBAL (1 << 4) /* Arbitration Lost. */ #define IBSR_SRW (1 << 2) /* Slave Read/Write. */ #define IBSR_IBIF (1 << 1) /* I-Bus Interrupt Flag. */ #define IBSR_RXAK (1 << 0) /* Received Acknowledge. */ #define I2C_IBDR 0x4 /* I2C Bus Data I/O Register */ #define I2C_IBIC 0x5 /* I2C Bus Interrupt Config Register */ #define IBIC_BIIE (1 << 7) /* Bus Idle Interrupt Enable bit. */ #define I2C_IBDBG 0x6 /* I2C Bus Debug Register */ #define DIV_REG_UNSET 0xFF #define READ1(_sc, _reg) bus_space_read_1(_sc->bst, _sc->bsh, _reg) #define WRITE1(_sc, _reg, _val) bus_space_write_1(_sc->bst,\ _sc->bsh, _reg, _val) #ifdef DEBUG #define vf_i2c_dbg(_sc, fmt, args...) \ device_printf((_sc)->dev, fmt, ##args) #ifdef DEBUG2 #undef WRITE1 #define WRITE1(_sc, _reg, _val) ({\ vf_i2c_dbg(_sc, "WRITE1 REG 0x%02X VAL 0x%02X\n",_reg,_val);\ bus_space_write_1(_sc->bst, _sc->bsh, _reg, _val);\ }) #undef READ1 #define READ1(_sc, _reg) ({\ uint32_t ret=bus_space_read_1(_sc->bst, _sc->bsh, _reg);\ vf_i2c_dbg(_sc, "READ1 REG 0x%02X RETURNS 0x%02X\n",_reg,ret);\ ret;\ }) #endif #else #define vf_i2c_dbg(_sc, fmt, args...) #endif static int i2c_repeated_start(device_t, u_char, int); static int i2c_start(device_t, u_char, int); static int i2c_stop(device_t); static int i2c_reset(device_t, u_char, u_char, u_char *); static int i2c_read(device_t, char *, int, int *, int, int); static int i2c_write(device_t, const char *, int, int *, int); struct i2c_div_type { uint32_t reg_val; uint32_t div; }; static struct resource_spec i2c_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static struct i2c_div_type vf610_div_table[] = { { 0x00, 20 }, { 0x01, 22 }, { 0x02, 24 }, { 0x03, 26 }, { 0x04, 28 }, { 0x05, 30 }, { 0x09, 32 }, { 0x06, 34 }, { 0x0A, 36 }, { 0x0B, 40 }, { 0x0C, 44 }, { 0x0D, 48 }, { 0x0E, 56 }, { 0x12, 64 }, { 0x13, 72 }, { 0x14, 80 }, { 0x15, 88 }, { 0x19, 96 }, { 0x16, 104 }, { 0x1A, 112 }, { 0x17, 128 }, { 0x1D, 160 }, { 0x1E, 192 }, { 0x22, 224 }, { 0x1F, 240 }, { 0x23, 256 }, { 0x24, 288 }, { 0x25, 320 }, { 0x26, 384 }, { 0x2A, 448 }, { 0x27, 480 }, { 0x2B, 512 }, { 0x2C, 576 }, { 0x2D, 640 }, { 0x2E, 768 }, { 0x32, 896 }, { 0x2F, 960 }, { 0x33, 1024 }, { 0x34, 1152 }, { 0x35, 1280 }, { 0x36, 1536 }, { 0x3A, 1792 }, { 0x37, 1920 }, { 0x3B, 2048 }, { 0x3C, 2304 }, { 0x3D, 2560 }, { 0x3E, 3072 }, { 0x3F, 3840 }, { 0x3F, 3840 }, { 0x7B, 4096 }, { 0x7D, 5120 }, { 0x7E, 6144 }, }; int vf_i2c_attach_common(device_t dev) { struct vf_i2c_softc *sc; int error; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c attach common\n"); mtx_init(&sc->mutex, device_get_nameunit(dev), "I2C", MTX_DEF); error = bus_alloc_resources(dev, i2c_spec, sc->res); if (error != 0) { mtx_destroy(&sc->mutex); device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBIC, IBIC_BIIE); if (sc->freq == 0) { uint8_t div_reg; div_reg = READ1(sc, I2C_IBFD); if (div_reg != 0x00) { sc->freq = UINT32_MAX; device_printf(dev, "Using existing bus frequency divider register value (0x%02X).\n", div_reg); } else { device_printf(dev, "Bus frequency divider value appears unset, defaulting to low I2C bus speed.\n"); } } mtx_unlock(&sc->mutex); sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY); if (sc->iicbus == NULL) { device_printf(dev, "could not add iicbus child"); mtx_destroy(&sc->mutex); bus_release_resources(dev, i2c_spec, sc->res); return (ENXIO); } bus_attach_children(dev); return (0); } static int i2c_detach(device_t dev) { struct vf_i2c_softc *sc; int error = 0; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c detach\n"); error = bus_generic_detach(dev); if (error != 0) { device_printf(dev, "cannot detach child devices.\n"); return (error); } - error = device_delete_child(dev, sc->iicbus); - if (error != 0) { - device_printf(dev, "could not delete iicbus child.\n"); - return (error); - } - mtx_lock(&sc->mutex); if (sc->freq == 0) { vf_i2c_dbg(sc, "Writing 0x00 to clock divider register\n"); WRITE1(sc, I2C_IBFD, 0x00); } bus_release_resources(dev, i2c_spec, sc->res); mtx_unlock(&sc->mutex); mtx_destroy(&sc->mutex); return (0); } /* Wait for free bus */ static int wait_for_nibb(struct vf_i2c_softc *sc) { int retry; retry = 1000; while (retry --) { if ((READ1(sc, I2C_IBSR) & IBSR_IBB) == 0) return (IIC_NOERR); DELAY(10); } return (IIC_ETIMEOUT); } /* Wait for transfer complete+interrupt flag */ static int wait_for_icf(struct vf_i2c_softc *sc) { int retry; uint8_t ibsr; vf_i2c_dbg(sc, "i2c wait for transfer complete + interrupt flag\n"); retry = 1000; while (retry --) { ibsr = READ1(sc, I2C_IBSR); if (ibsr & IBSR_IBIF) { WRITE1(sc, I2C_IBSR, IBSR_IBIF); if (ibsr & IBSR_IBAL) { WRITE1(sc, I2C_IBSR, IBSR_IBAL); return (IIC_EBUSBSY); } if (ibsr & IBSR_TCF) return (IIC_NOERR); } DELAY(10); } return (IIC_ETIMEOUT); } /* Get ACK bit from last write */ static bool tx_acked(struct vf_i2c_softc *sc) { vf_i2c_dbg(sc, "i2c get ACK bit from last write\n"); return (READ1(sc, I2C_IBSR) & IBSR_RXAK) ? false : true; } static int i2c_repeated_start(device_t dev, u_char slave, int timeout) { struct vf_i2c_softc *sc; int error; int reg; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c repeated start\n"); mtx_lock(&sc->mutex); if ((READ1(sc, I2C_IBSR) & IBSR_IBB) == 0) { vf_i2c_dbg(sc, "cant i2c repeat start: bus is no longer busy\n"); mtx_unlock(&sc->mutex); return (IIC_EBUSERR); } reg = READ1(sc, I2C_IBCR); reg |= (IBCR_RSTA | IBCR_IBIE); WRITE1(sc, I2C_IBCR, reg); /* Write target address - LSB is R/W bit */ WRITE1(sc, I2C_IBDR, slave); error = wait_for_icf(sc); if (!tx_acked(sc)) { mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "cant i2c repeat start: missing ACK after slave address\n"); return (IIC_ENOACK); } mtx_unlock(&sc->mutex); if (error != 0) return (error); return (IIC_NOERR); } static int i2c_start(device_t dev, u_char slave, int timeout) { struct vf_i2c_softc *sc; int error; int reg; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c start\n"); mtx_lock(&sc->mutex); error = wait_for_nibb(sc); /* Reset controller if bus is still busy. */ if (error == IIC_ETIMEOUT) { WRITE1(sc, I2C_IBCR, IBCR_MDIS); DELAY(1000); WRITE1(sc, I2C_IBCR, IBCR_NOACK); error = wait_for_nibb(sc); } if (error != 0) { mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "cant i2c start: %i\n", error); return (error); } /* Set start condition */ reg = (IBCR_MSSL | IBCR_NOACK | IBCR_IBIE | IBCR_TXRX); WRITE1(sc, I2C_IBCR, reg); WRITE1(sc, I2C_IBSR, IBSR_IBIF); /* Write target address - LSB is R/W bit */ WRITE1(sc, I2C_IBDR, slave); error = wait_for_icf(sc); if (error != 0) { mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "cant i2c start: iif error\n"); return (error); } mtx_unlock(&sc->mutex); if (!tx_acked(sc)) { vf_i2c_dbg(sc, "cant i2c start: missing ACK after slave address\n"); return (IIC_ENOACK); } return (IIC_NOERR); } static int i2c_stop(device_t dev) { struct vf_i2c_softc *sc; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c stop\n"); mtx_lock(&sc->mutex); if ((READ1(sc, I2C_IBCR) & IBCR_MSSL) != 0) WRITE1(sc, I2C_IBCR, IBCR_NOACK | IBCR_IBIE); mtx_unlock(&sc->mutex); return (IIC_NOERR); } static uint8_t i2c_get_div_val(device_t dev) { struct vf_i2c_softc *sc; uint8_t div_reg = DIV_REG_UNSET; sc = device_get_softc(dev); if (sc->freq == UINT32_MAX) return div_reg; #ifndef FDT div_reg = vf610_div_table[nitems(vf610_div_table) - 1].reg_val; #else if (sc->hwtype == HW_MVF600) div_reg = MVF600_DIV_REG; else if (sc->freq == 0) div_reg = vf610_div_table[nitems(vf610_div_table) - 1].reg_val; else { uint64_t clk_freq; int error, i; error = clk_get_freq(sc->clock, &clk_freq); if (error != 0) { device_printf(dev, "Could not get parent clock frequency. " "Using default divider.\n"); div_reg = vf610_div_table[nitems(vf610_div_table) - 1].reg_val; } else { for (i = 0; i < nitems(vf610_div_table) - 1; i++) if ((clk_freq / vf610_div_table[i].div) <= sc->freq) break; div_reg = vf610_div_table[i].reg_val; } } #endif vf_i2c_dbg(sc, "Writing 0x%02X to clock divider register\n", div_reg); return div_reg; } static int i2c_reset(device_t dev, u_char speed, u_char addr, u_char *oldadr) { struct vf_i2c_softc *sc; uint8_t div_reg; sc = device_get_softc(dev); div_reg = i2c_get_div_val(dev); vf_i2c_dbg(sc, "i2c reset\n"); mtx_lock(&sc->mutex); WRITE1(sc, I2C_IBCR, IBCR_MDIS); if(div_reg != DIV_REG_UNSET) WRITE1(sc, I2C_IBFD, div_reg); WRITE1(sc, I2C_IBCR, 0x0); /* Enable i2c */ mtx_unlock(&sc->mutex); return (IIC_NOERR); } static int i2c_read(device_t dev, char *buf, int len, int *read, int last, int delay) { struct vf_i2c_softc *sc; int error; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c read\n"); *read = 0; mtx_lock(&sc->mutex); if (len) { if (len == 1) WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL | IBCR_NOACK); else WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL); /* dummy read */ READ1(sc, I2C_IBDR); while (*read < len) { error = wait_for_icf(sc); if (error != 0) { mtx_unlock(&sc->mutex); return (error); } if (last) { if (*read == len - 2) { /* NO ACK on last byte */ WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_MSSL | IBCR_NOACK); } else if (*read == len - 1) { /* Transfer done, remove master bit */ WRITE1(sc, I2C_IBCR, IBCR_IBIE | IBCR_NOACK); } } *buf++ = READ1(sc, I2C_IBDR); (*read)++; } } mtx_unlock(&sc->mutex); return (IIC_NOERR); } static int i2c_write(device_t dev, const char *buf, int len, int *sent, int timeout) { struct vf_i2c_softc *sc; int error; sc = device_get_softc(dev); vf_i2c_dbg(sc, "i2c write\n"); *sent = 0; mtx_lock(&sc->mutex); while (*sent < len) { WRITE1(sc, I2C_IBDR, *buf++); error = wait_for_icf(sc); if (error != 0) { mtx_unlock(&sc->mutex); return (error); } if (!tx_acked(sc) && (*sent = (len - 2)) ){ mtx_unlock(&sc->mutex); vf_i2c_dbg(sc, "no ACK on %d write\n", *sent); return (IIC_ENOACK); } (*sent)++; } mtx_unlock(&sc->mutex); return (IIC_NOERR); } static device_method_t i2c_methods[] = { /* Device interface */ DEVMETHOD(device_detach, i2c_detach), /* Device interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, i2c_repeated_start), DEVMETHOD(iicbus_start, i2c_start), DEVMETHOD(iicbus_stop, i2c_stop), DEVMETHOD(iicbus_reset, i2c_reset), DEVMETHOD(iicbus_read, i2c_read), DEVMETHOD(iicbus_write, i2c_write), DEVMETHOD(iicbus_transfer, iicbus_transfer_gen), DEVMETHOD_END }; driver_t vf_i2c_driver = { "i2c", i2c_methods, sizeof(struct vf_i2c_softc), }; diff --git a/sys/dev/ismt/ismt.c b/sys/dev/ismt/ismt.c index 650353c75e9f..4aea93d1f435 100644 --- a/sys/dev/ismt/ismt.c +++ b/sys/dev/ismt/ismt.c @@ -1,773 +1,771 @@ /*- * Copyright (C) 2014 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #define ISMT_DESC_ENTRIES 32 /* Hardware Descriptor Constants - Control Field */ #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */ #define ISMT_DESC_BLK 0X04 /* Perform Block Transaction */ #define ISMT_DESC_FAIR 0x08 /* Set fairness flag upon successful arbit. */ #define ISMT_DESC_PEC 0x10 /* Packet Error Code */ #define ISMT_DESC_I2C 0x20 /* I2C Enable */ #define ISMT_DESC_INT 0x40 /* Interrupt */ #define ISMT_DESC_SOE 0x80 /* Stop On Error */ /* Hardware Descriptor Constants - Status Field */ #define ISMT_DESC_SCS 0x01 /* Success */ #define ISMT_DESC_DLTO 0x04 /* Data Low Time Out */ #define ISMT_DESC_NAK 0x08 /* NAK Received */ #define ISMT_DESC_CRC 0x10 /* CRC Error */ #define ISMT_DESC_CLTO 0x20 /* Clock Low Time Out */ #define ISMT_DESC_COL 0x40 /* Collisions */ #define ISMT_DESC_LPR 0x80 /* Large Packet Received */ /* Macros */ #define ISMT_DESC_ADDR_RW(addr, is_read) ((addr) | (is_read)) /* iSMT General Register address offsets (SMBBAR + ) */ #define ISMT_GR_GCTRL 0x000 /* General Control */ #define ISMT_GR_SMTICL 0x008 /* SMT Interrupt Cause Location */ #define ISMT_GR_ERRINTMSK 0x010 /* Error Interrupt Mask */ #define ISMT_GR_ERRAERMSK 0x014 /* Error AER Mask */ #define ISMT_GR_ERRSTS 0x018 /* Error Status */ #define ISMT_GR_ERRINFO 0x01c /* Error Information */ /* iSMT Master Registers */ #define ISMT_MSTR_MDBA 0x100 /* Master Descriptor Base Address */ #define ISMT_MSTR_MCTRL 0x108 /* Master Control */ #define ISMT_MSTR_MSTS 0x10c /* Master Status */ #define ISMT_MSTR_MDS 0x110 /* Master Descriptor Size */ #define ISMT_MSTR_RPOLICY 0x114 /* Retry Policy */ /* iSMT Miscellaneous Registers */ #define ISMT_SPGT 0x300 /* SMBus PHY Global Timing */ /* General Control Register (GCTRL) bit definitions */ #define ISMT_GCTRL_TRST 0x04 /* Target Reset */ #define ISMT_GCTRL_KILL 0x08 /* Kill */ #define ISMT_GCTRL_SRST 0x40 /* Soft Reset */ /* Master Control Register (MCTRL) bit definitions */ #define ISMT_MCTRL_SS 0x01 /* Start/Stop */ #define ISMT_MCTRL_MEIE 0x10 /* Master Error Interrupt Enable */ #define ISMT_MCTRL_FMHP 0x00ff0000 /* Firmware Master Head Ptr (FMHP) */ /* Master Status Register (MSTS) bit definitions */ #define ISMT_MSTS_HMTP 0xff0000 /* HW Master Tail Pointer (HMTP) */ #define ISMT_MSTS_MIS 0x20 /* Master Interrupt Status (MIS) */ #define ISMT_MSTS_MEIS 0x10 /* Master Error Int Status (MEIS) */ #define ISMT_MSTS_IP 0x01 /* In Progress */ /* Master Descriptor Size (MDS) bit definitions */ #define ISMT_MDS_MASK 0xff /* Master Descriptor Size mask (MDS) */ /* SMBus PHY Global Timing Register (SPGT) bit definitions */ #define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */ #define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */ #define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */ #define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */ #define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */ /* MSI Control Register (MSICTL) bit definitions */ #define ISMT_MSICTL_MSIE 0x01 /* MSI Enable */ #define ISMT_MAX_BLOCK_SIZE 32 /* per SMBus spec */ //#define ISMT_DEBUG device_printf #ifndef ISMT_DEBUG #define ISMT_DEBUG(...) #endif /* iSMT Hardware Descriptor */ struct ismt_desc { uint8_t tgtaddr_rw; /* target address & r/w bit */ uint8_t wr_len_cmd; /* write length in bytes or a command */ uint8_t rd_len; /* read length */ uint8_t control; /* control bits */ uint8_t status; /* status bits */ uint8_t retry; /* collision retry and retry count */ uint8_t rxbytes; /* received bytes */ uint8_t txbytes; /* transmitted bytes */ uint32_t dptr_low; /* lower 32 bit of the data pointer */ uint32_t dptr_high; /* upper 32 bit of the data pointer */ } __packed; #define DESC_SIZE (ISMT_DESC_ENTRIES * sizeof(struct ismt_desc)) #define DMA_BUFFER_SIZE 64 struct ismt_softc { device_t pcidev; device_t smbdev; struct thread *bus_reserved; int intr_rid; struct resource *intr_res; void *intr_handle; bus_space_tag_t mmio_tag; bus_space_handle_t mmio_handle; int mmio_rid; struct resource *mmio_res; uint8_t head; struct ismt_desc *desc; bus_dma_tag_t desc_dma_tag; bus_dmamap_t desc_dma_map; uint64_t desc_bus_addr; uint8_t *dma_buffer; bus_dma_tag_t dma_buffer_dma_tag; bus_dmamap_t dma_buffer_dma_map; uint64_t dma_buffer_bus_addr; uint8_t using_msi; }; static void ismt_intr(void *arg) { struct ismt_softc *sc = arg; uint32_t val; val = bus_read_4(sc->mmio_res, ISMT_MSTR_MSTS); ISMT_DEBUG(sc->pcidev, "%s MSTS=0x%x\n", __func__, val); val |= (ISMT_MSTS_MIS | ISMT_MSTS_MEIS); bus_write_4(sc->mmio_res, ISMT_MSTR_MSTS, val); wakeup(sc); } static int ismt_callback(device_t dev, int index, void *data) { struct ismt_softc *sc; int acquired, err; sc = device_get_softc(dev); switch (index) { case SMB_REQUEST_BUS: acquired = atomic_cmpset_ptr( (uintptr_t *)&sc->bus_reserved, (uintptr_t)NULL, (uintptr_t)curthread); ISMT_DEBUG(dev, "SMB_REQUEST_BUS acquired=%d\n", acquired); if (acquired) err = 0; else err = EWOULDBLOCK; break; case SMB_RELEASE_BUS: KASSERT(sc->bus_reserved == curthread, ("SMB_RELEASE_BUS called by wrong thread\n")); ISMT_DEBUG(dev, "SMB_RELEASE_BUS\n"); atomic_store_rel_ptr((uintptr_t *)&sc->bus_reserved, (uintptr_t)NULL); err = 0; break; default: err = SMB_EABORT; break; } return (err); } static struct ismt_desc * ismt_alloc_desc(struct ismt_softc *sc) { struct ismt_desc *desc; KASSERT(sc->bus_reserved == curthread, ("curthread %p did not request bus (%p has reserved)\n", curthread, sc->bus_reserved)); desc = &sc->desc[sc->head++]; if (sc->head == ISMT_DESC_ENTRIES) sc->head = 0; memset(desc, 0, sizeof(*desc)); return (desc); } static int ismt_submit(struct ismt_softc *sc, struct ismt_desc *desc, uint8_t slave, uint8_t is_read) { uint32_t err, fmhp, val; desc->control |= ISMT_DESC_FAIR; if (sc->using_msi) desc->control |= ISMT_DESC_INT; desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(slave, is_read); desc->dptr_low = (sc->dma_buffer_bus_addr & 0xFFFFFFFFLL); desc->dptr_high = (sc->dma_buffer_bus_addr >> 32); wmb(); fmhp = sc->head << 16; val = bus_read_4(sc->mmio_res, ISMT_MSTR_MCTRL); val &= ~ISMT_MCTRL_FMHP; val |= fmhp; bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, val); /* set the start bit */ val = bus_read_4(sc->mmio_res, ISMT_MSTR_MCTRL); val |= ISMT_MCTRL_SS; bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, val); err = tsleep(sc, PWAIT, "ismt_wait", 5 * hz); if (err != 0) { ISMT_DEBUG(sc->pcidev, "%s timeout\n", __func__); return (SMB_ETIMEOUT); } ISMT_DEBUG(sc->pcidev, "%s status=0x%x\n", __func__, desc->status); if (desc->status & ISMT_DESC_SCS) return (SMB_ENOERR); if (desc->status & ISMT_DESC_NAK) return (SMB_ENOACK); if (desc->status & ISMT_DESC_CRC) return (SMB_EBUSERR); if (desc->status & ISMT_DESC_COL) return (SMB_ECOLLI); if (desc->status & ISMT_DESC_LPR) return (SMB_EINVAL); if (desc->status & (ISMT_DESC_DLTO | ISMT_DESC_CLTO)) return (SMB_ETIMEOUT); return (SMB_EBUSERR); } static int ismt_quick(device_t dev, u_char slave, int how) { struct ismt_desc *desc; struct ismt_softc *sc; int is_read; ISMT_DEBUG(dev, "%s\n", __func__); if (how != SMB_QREAD && how != SMB_QWRITE) { return (SMB_ENOTSUPP); } sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); is_read = (how == SMB_QREAD ? 1 : 0); return (ismt_submit(sc, desc, slave, is_read)); } static int ismt_sendb(device_t dev, u_char slave, char byte) { struct ismt_desc *desc; struct ismt_softc *sc; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->control = ISMT_DESC_CWRL; desc->wr_len_cmd = byte; return (ismt_submit(sc, desc, slave, 0)); } static int ismt_recvb(device_t dev, u_char slave, char *byte) { struct ismt_desc *desc; struct ismt_softc *sc; int err; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->rd_len = 1; err = ismt_submit(sc, desc, slave, 1); if (err != SMB_ENOERR) return (err); *byte = sc->dma_buffer[0]; return (err); } static int ismt_writeb(device_t dev, u_char slave, char cmd, char byte) { struct ismt_desc *desc; struct ismt_softc *sc; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->wr_len_cmd = 2; sc->dma_buffer[0] = cmd; sc->dma_buffer[1] = byte; return (ismt_submit(sc, desc, slave, 0)); } static int ismt_writew(device_t dev, u_char slave, char cmd, short word) { struct ismt_desc *desc; struct ismt_softc *sc; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->wr_len_cmd = 3; sc->dma_buffer[0] = cmd; sc->dma_buffer[1] = word & 0xFF; sc->dma_buffer[2] = word >> 8; return (ismt_submit(sc, desc, slave, 0)); } static int ismt_readb(device_t dev, u_char slave, char cmd, char *byte) { struct ismt_desc *desc; struct ismt_softc *sc; int err; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->control = ISMT_DESC_CWRL; desc->wr_len_cmd = cmd; desc->rd_len = 1; err = ismt_submit(sc, desc, slave, 1); if (err != SMB_ENOERR) return (err); *byte = sc->dma_buffer[0]; return (err); } static int ismt_readw(device_t dev, u_char slave, char cmd, short *word) { struct ismt_desc *desc; struct ismt_softc *sc; int err; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->control = ISMT_DESC_CWRL; desc->wr_len_cmd = cmd; desc->rd_len = 2; err = ismt_submit(sc, desc, slave, 1); if (err != SMB_ENOERR) return (err); *word = sc->dma_buffer[0] | (sc->dma_buffer[1] << 8); return (err); } static int ismt_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata) { struct ismt_desc *desc; struct ismt_softc *sc; int err; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->wr_len_cmd = 3; desc->rd_len = 2; sc->dma_buffer[0] = cmd; sc->dma_buffer[1] = sdata & 0xff; sc->dma_buffer[2] = sdata >> 8; err = ismt_submit(sc, desc, slave, 0); if (err != SMB_ENOERR) return (err); *rdata = sc->dma_buffer[0] | (sc->dma_buffer[1] << 8); return (err); } static int ismt_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct ismt_desc *desc; struct ismt_softc *sc; ISMT_DEBUG(dev, "%s\n", __func__); if (count == 0 || count > ISMT_MAX_BLOCK_SIZE) return (SMB_EINVAL); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->control = ISMT_DESC_I2C; desc->wr_len_cmd = count + 1; sc->dma_buffer[0] = cmd; memcpy(&sc->dma_buffer[1], buf, count); return (ismt_submit(sc, desc, slave, 0)); } static int ismt_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct ismt_desc *desc; struct ismt_softc *sc; int err; ISMT_DEBUG(dev, "%s\n", __func__); if (*count == 0 || *count > ISMT_MAX_BLOCK_SIZE) return (SMB_EINVAL); sc = device_get_softc(dev); desc = ismt_alloc_desc(sc); desc->control = ISMT_DESC_I2C | ISMT_DESC_CWRL; desc->wr_len_cmd = cmd; desc->rd_len = *count; err = ismt_submit(sc, desc, slave, 0); if (err != SMB_ENOERR) return (err); memcpy(buf, sc->dma_buffer, desc->rxbytes); *count = desc->rxbytes; return (err); } static int ismt_detach(device_t dev) { struct ismt_softc *sc; int error; ISMT_DEBUG(dev, "%s\n", __func__); sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error) return (error); - device_delete_child(dev, sc->smbdev); - if (sc->intr_handle != NULL) { bus_teardown_intr(dev, sc->intr_res, sc->intr_handle); sc->intr_handle = NULL; } if (sc->intr_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, sc->intr_res); sc->intr_res = NULL; } if (sc->using_msi == 1) pci_release_msi(dev); if (sc->mmio_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mmio_rid, sc->mmio_res); sc->mmio_res = NULL; } bus_dmamap_unload(sc->desc_dma_tag, sc->desc_dma_map); bus_dmamap_unload(sc->dma_buffer_dma_tag, sc->dma_buffer_dma_map); bus_dmamem_free(sc->desc_dma_tag, sc->desc, sc->desc_dma_map); bus_dmamem_free(sc->dma_buffer_dma_tag, sc->dma_buffer, sc->dma_buffer_dma_map); bus_dma_tag_destroy(sc->desc_dma_tag); bus_dma_tag_destroy(sc->dma_buffer_dma_tag); pci_disable_busmaster(dev); return 0; } static void ismt_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { uint64_t *bus_addr = (uint64_t *)arg; KASSERT(error == 0, ("%s: error=%d\n", __func__, error)); KASSERT(nseg == 1, ("%s: nseg=%d\n", __func__, nseg)); *bus_addr = seg[0].ds_addr; } static int ismt_attach(device_t dev) { struct ismt_softc *sc = device_get_softc(dev); int err, num_vectors, val; sc->pcidev = dev; pci_enable_busmaster(dev); if ((sc->smbdev = device_add_child(dev, "smbus", -1)) == NULL) { device_printf(dev, "no smbus child found\n"); err = ENXIO; goto fail; } sc->mmio_rid = PCIR_BAR(0); sc->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mmio_rid, RF_ACTIVE); if (sc->mmio_res == NULL) { device_printf(dev, "cannot allocate mmio region\n"); err = ENOMEM; goto fail; } sc->mmio_tag = rman_get_bustag(sc->mmio_res); sc->mmio_handle = rman_get_bushandle(sc->mmio_res); /* Attach "smbus" child */ bus_attach_children(dev); bus_dma_tag_create(bus_get_dma_tag(dev), 4, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DESC_SIZE, 1, DESC_SIZE, 0, NULL, NULL, &sc->desc_dma_tag); bus_dma_tag_create(bus_get_dma_tag(dev), 4, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DMA_BUFFER_SIZE, 1, DMA_BUFFER_SIZE, 0, NULL, NULL, &sc->dma_buffer_dma_tag); bus_dmamap_create(sc->desc_dma_tag, 0, &sc->desc_dma_map); bus_dmamap_create(sc->dma_buffer_dma_tag, 0, &sc->dma_buffer_dma_map); bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->desc, BUS_DMA_WAITOK, &sc->desc_dma_map); bus_dmamem_alloc(sc->dma_buffer_dma_tag, (void **)&sc->dma_buffer, BUS_DMA_WAITOK, &sc->dma_buffer_dma_map); bus_dmamap_load(sc->desc_dma_tag, sc->desc_dma_map, sc->desc, DESC_SIZE, ismt_single_map, &sc->desc_bus_addr, 0); bus_dmamap_load(sc->dma_buffer_dma_tag, sc->dma_buffer_dma_map, sc->dma_buffer, DMA_BUFFER_SIZE, ismt_single_map, &sc->dma_buffer_bus_addr, 0); bus_write_4(sc->mmio_res, ISMT_MSTR_MDBA, (sc->desc_bus_addr & 0xFFFFFFFFLL)); bus_write_4(sc->mmio_res, ISMT_MSTR_MDBA + 4, (sc->desc_bus_addr >> 32)); /* initialize the Master Control Register (MCTRL) */ bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, ISMT_MCTRL_MEIE); /* initialize the Master Status Register (MSTS) */ bus_write_4(sc->mmio_res, ISMT_MSTR_MSTS, 0); /* initialize the Master Descriptor Size (MDS) */ val = bus_read_4(sc->mmio_res, ISMT_MSTR_MDS); val &= ~ISMT_MDS_MASK; val |= (ISMT_DESC_ENTRIES - 1); bus_write_4(sc->mmio_res, ISMT_MSTR_MDS, val); sc->using_msi = 1; if (pci_msi_count(dev) == 0) { sc->using_msi = 0; goto intx; } num_vectors = 1; if (pci_alloc_msi(dev, &num_vectors) != 0) { sc->using_msi = 0; goto intx; } sc->intr_rid = 1; sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, RF_ACTIVE); if (sc->intr_res == NULL) { sc->using_msi = 0; pci_release_msi(dev); } intx: if (sc->using_msi == 0) { sc->intr_rid = 0; sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->intr_res == NULL) { device_printf(dev, "cannot allocate irq\n"); err = ENXIO; goto fail; } } ISMT_DEBUG(dev, "using_msi = %d\n", sc->using_msi); err = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ismt_intr, sc, &sc->intr_handle); if (err != 0) { device_printf(dev, "cannot setup interrupt\n"); err = ENXIO; goto fail; } return (0); fail: ismt_detach(dev); return (err); } #define ID_INTEL_S1200_SMT0 0x0c598086 #define ID_INTEL_S1200_SMT1 0x0c5a8086 #define ID_INTEL_C2000_SMT 0x1f158086 #define ID_INTEL_C3000_SMT 0x19ac8086 static int ismt_probe(device_t dev) { const char *desc; switch (pci_get_devid(dev)) { case ID_INTEL_S1200_SMT0: desc = "Atom Processor S1200 SMBus 2.0 Controller 0"; break; case ID_INTEL_S1200_SMT1: desc = "Atom Processor S1200 SMBus 2.0 Controller 1"; break; case ID_INTEL_C2000_SMT: desc = "Atom Processor C2000 SMBus 2.0"; break; case ID_INTEL_C3000_SMT: desc = "Atom Processor C3000 SMBus 2.0"; break; default: return (ENXIO); } device_set_desc(dev, desc); return (BUS_PROBE_DEFAULT); } /* Device methods */ static device_method_t ismt_pci_methods[] = { DEVMETHOD(device_probe, ismt_probe), DEVMETHOD(device_attach, ismt_attach), DEVMETHOD(device_detach, ismt_detach), DEVMETHOD(smbus_callback, ismt_callback), DEVMETHOD(smbus_quick, ismt_quick), DEVMETHOD(smbus_sendb, ismt_sendb), DEVMETHOD(smbus_recvb, ismt_recvb), DEVMETHOD(smbus_writeb, ismt_writeb), DEVMETHOD(smbus_writew, ismt_writew), DEVMETHOD(smbus_readb, ismt_readb), DEVMETHOD(smbus_readw, ismt_readw), DEVMETHOD(smbus_pcall, ismt_pcall), DEVMETHOD(smbus_bwrite, ismt_bwrite), DEVMETHOD(smbus_bread, ismt_bread), DEVMETHOD_END }; static driver_t ismt_pci_driver = { "ismt", ismt_pci_methods, sizeof(struct ismt_softc) }; DRIVER_MODULE(ismt, pci, ismt_pci_driver, 0, 0); DRIVER_MODULE(smbus, ismt, smbus_driver, 0, 0); MODULE_DEPEND(ismt, pci, 1, 1, 1); MODULE_DEPEND(ismt, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(ismt, 1); diff --git a/sys/dev/lge/if_lge.c b/sys/dev/lge/if_lge.c index 7542b17e19eb..c5cfafc0bd22 100644 --- a/sys/dev/lge/if_lge.c +++ b/sys/dev/lge/if_lge.c @@ -1,1531 +1,1530 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2000, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include /* * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public * documentation not available, but ask me nicely. * * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. * It's a 64-bit PCI part that supports TCP/IP checksum offload, * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There * are three supported methods for data transfer between host and * NIC: programmed I/O, traditional scatter/gather DMA and Packet * Propulsion Technology (tm) DMA. The latter mechanism is a form * of double buffer DMA where the packet data is copied to a * pre-allocated DMA buffer who's physical address has been loaded * into a table at device initialization time. The rationale is that * the virtual to physical address translation needed for normal * scatter/gather DMA is more expensive than the data copy needed * for double buffering. This may be true in Windows NT and the like, * but it isn't true for us, at least on the x86 arch. This driver * uses the scatter/gather I/O method for both TX and RX. * * The LXT1001 only supports TCP/IP checksum offload on receive. * Also, the VLAN tagging is done using a 16-entry table which allows * the chip to perform hardware filtering based on VLAN tags. Sadly, * our vlan support doesn't currently play well with this kind of * hardware support. * * Special thanks to: * - Jeff James at Intel, for arranging to have the LXT1001 manual * released (at long last) * - Beny Chen at D-Link, for actually sending it to me * - Brad Short and Keith Alexis at SMC, for sending me sample * SMC9462SX and SMC9462TX adapters for testing * - Paul Saab at Y!, for not killing me (though it remains to be seen * if in fact he did me much of a favor) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #define LGE_USEIOSPACE #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct lge_type lge_devs[] = { { LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" }, { 0, 0, NULL } }; static int lge_probe(device_t); static int lge_attach(device_t); static int lge_detach(device_t); static int lge_alloc_jumbo_mem(struct lge_softc *); static void lge_free_jumbo_mem(struct lge_softc *); static void *lge_jalloc(struct lge_softc *); static void lge_jfree(struct mbuf *); static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *); static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); static void lge_rxeof(struct lge_softc *, int); static void lge_rxeoc(struct lge_softc *); static void lge_txeof(struct lge_softc *); static void lge_intr(void *); static void lge_tick(void *); static void lge_start(if_t); static void lge_start_locked(if_t); static int lge_ioctl(if_t, u_long, caddr_t); static void lge_init(void *); static void lge_init_locked(struct lge_softc *); static void lge_stop(struct lge_softc *); static void lge_watchdog(struct lge_softc *); static int lge_shutdown(device_t); static int lge_ifmedia_upd(if_t); static void lge_ifmedia_upd_locked(if_t); static void lge_ifmedia_sts(if_t, struct ifmediareq *); static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); static int lge_miibus_readreg(device_t, int, int); static int lge_miibus_writereg(device_t, int, int, int); static void lge_miibus_statchg(device_t); static void lge_setmulti(struct lge_softc *); static void lge_reset(struct lge_softc *); static int lge_list_rx_init(struct lge_softc *); static int lge_list_tx_init(struct lge_softc *); #ifdef LGE_USEIOSPACE #define LGE_RES SYS_RES_IOPORT #define LGE_RID LGE_PCI_LOIO #else #define LGE_RES SYS_RES_MEMORY #define LGE_RID LGE_PCI_LOMEM #endif static device_method_t lge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lge_probe), DEVMETHOD(device_attach, lge_attach), DEVMETHOD(device_detach, lge_detach), DEVMETHOD(device_shutdown, lge_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, lge_miibus_readreg), DEVMETHOD(miibus_writereg, lge_miibus_writereg), DEVMETHOD(miibus_statchg, lge_miibus_statchg), DEVMETHOD_END }; static driver_t lge_driver = { "lge", lge_methods, sizeof(struct lge_softc) }; DRIVER_MODULE(lge, pci, lge_driver, 0, 0); DRIVER_MODULE(miibus, lge, miibus_driver, 0, 0); MODULE_DEPEND(lge, pci, 1, 1, 1); MODULE_DEPEND(lge, ether, 1, 1, 1); MODULE_DEPEND(lge, miibus, 1, 1, 1); #define LGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define LGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest) { int i; u_int32_t val; CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) break; if (i == LGE_TIMEOUT) { device_printf(sc->lge_dev, "EEPROM read timed out\n"); return; } val = CSR_READ_4(sc, LGE_EEDATA); if (addr & 1) *dest = (val >> 16) & 0xFFFF; else *dest = val & 0xFFFF; return; } /* * Read a sequence of words from the EEPROM. */ static void lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off, int cnt, int swap) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { lge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } static int lge_miibus_readreg(device_t dev, int phy, int reg) { struct lge_softc *sc; int i; sc = device_get_softc(dev); /* * If we have a non-PCS PHY, pretend that the internal * autoneg stuff at PHY address 0 isn't there so that * the miibus code will find only the GMII PHY. */ if (sc->lge_pcs == 0 && phy == 0) return(0); CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { device_printf(sc->lge_dev, "PHY read timed out\n"); return(0); } return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); } static int lge_miibus_writereg(device_t dev, int phy, int reg, int data) { struct lge_softc *sc; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, LGE_GMIICTL, (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { device_printf(sc->lge_dev, "PHY write timed out\n"); return(0); } return(0); } static void lge_miibus_statchg(device_t dev) { struct lge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->lge_miibus); LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; case IFM_100_TX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); break; case IFM_10_T: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); break; default: /* * Choose something, even if it's wrong. Clearing * all the bits will hose autoneg on the internal * PHY. */ LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } else { LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } return; } static u_int lge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) { uint32_t h, *hashes = arg; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); return (1); } static void lge_setmulti(struct lge_softc *sc) { if_t ifp; uint32_t hashes[2] = { 0, 0 }; ifp = sc->lge_ifp; LGE_LOCK_ASSERT(sc); /* Make sure multicast hash table is enabled. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, LGE_MAR0, 0); CSR_WRITE_4(sc, LGE_MAR1, 0); /* now program new ones */ if_foreach_llmaddr(ifp, lge_hash_maddr, hashes); CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); return; } static void lge_reset(struct lge_softc *sc) { int i; LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); for (i = 0; i < LGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) break; } if (i == LGE_TIMEOUT) device_printf(sc->lge_dev, "reset never completed\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Level 1 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int lge_probe(device_t dev) { const struct lge_type *t; t = lge_devs; while(t->lge_name != NULL) { if ((pci_get_vendor(dev) == t->lge_vid) && (pci_get_device(dev) == t->lge_did)) { device_set_desc(dev, t->lge_name); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int lge_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; struct lge_softc *sc; if_t ifp = NULL; int error = 0, rid; sc = device_get_softc(dev); sc->lge_dev = dev; mtx_init(&sc->lge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->lge_stat_callout, &sc->lge_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = LGE_RID; sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE); if (sc->lge_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->lge_btag = rman_get_bustag(sc->lge_res); sc->lge_bhandle = rman_get_bushandle(sc->lge_res); /* Allocate interrupt */ rid = 0; sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->lge_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Reset the adapter. */ lge_reset(sc); /* * Get station address from the EEPROM. */ lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF, M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } /* Try to allocate memory for jumbo buffers. */ if (lge_alloc_jumbo_mem(sc)) { device_printf(dev, "jumbo buffer allocation failed\n"); error = ENXIO; goto fail; } ifp = sc->lge_ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, lge_ioctl); if_setstartfn(ifp, lge_start); if_setinitfn(ifp, lge_init); if_setsendqlen(ifp, LGE_TX_LIST_CNT - 1); if_setcapabilities(ifp, IFCAP_RXCSUM); if_setcapenable(ifp, if_getcapabilities(ifp)); if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) sc->lge_pcs = 1; else sc->lge_pcs = 0; /* * Do MII setup. */ error = mii_attach(dev, &sc->lge_miibus, ifp, lge_ifmedia_upd, lge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, lge_intr, sc, &sc->lge_intrhand); if (error) { ether_ifdetach(ifp); device_printf(dev, "couldn't set up irq\n"); goto fail; } return (0); fail: lge_free_jumbo_mem(sc); if (sc->lge_ldata) free(sc->lge_ldata, M_DEVBUF); if (ifp) if_free(ifp); if (sc->lge_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); if (sc->lge_res) bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); mtx_destroy(&sc->lge_mtx); return(error); } static int lge_detach(device_t dev) { struct lge_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->lge_ifp; LGE_LOCK(sc); lge_reset(sc); lge_stop(sc); LGE_UNLOCK(sc); callout_drain(&sc->lge_stat_callout); ether_ifdetach(ifp); bus_generic_detach(dev); - device_delete_child(dev, sc->lge_miibus); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); free(sc->lge_ldata, M_DEVBUF); if_free(ifp); lge_free_jumbo_mem(sc); mtx_destroy(&sc->lge_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int lge_list_tx_init(struct lge_softc *sc) { struct lge_list_data *ld; struct lge_ring_data *cd; int i; cd = &sc->lge_cdata; ld = sc->lge_ldata; for (i = 0; i < LGE_TX_LIST_CNT; i++) { ld->lge_tx_list[i].lge_mbuf = NULL; ld->lge_tx_list[i].lge_ctl = 0; } cd->lge_tx_prod = cd->lge_tx_cons = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arralge the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int lge_list_rx_init(struct lge_softc *sc) { struct lge_list_data *ld; struct lge_ring_data *cd; int i; ld = sc->lge_ldata; cd = &sc->lge_cdata; cd->lge_rx_prod = cd->lge_rx_cons = 0; CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) break; if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); } /* Clear possible 'rx command queue empty' interrupt. */ CSR_READ_4(sc, LGE_ISR); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m) { struct mbuf *m_new = NULL; char *buf = NULL; if (m == NULL) { MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->lge_dev, "no memory for rx list " "-- packet dropped!\n"); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = lge_jalloc(sc); if (buf == NULL) { #ifdef LGE_VERBOSE device_printf(sc->lge_dev, "jumbo allocation failed " "-- packet dropped!\n"); #endif m_freem(m_new); return(ENOBUFS); } /* Attach the buffer to the mbuf */ m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; m_extadd(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree, sc, NULL, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); c->lge_mbuf = m_new; c->lge_fragptr_hi = 0; c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); c->lge_fraglen = m_new->m_len; c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); c->lge_sts = 0; /* * Put this buffer in the RX command FIFO. To do this, * we just write the physical address of the descriptor * into the RX descriptor address registers. Note that * there are two registers, one high DWORD and one low * DWORD, which lets us specify a 64-bit address if * desired. We only use a 32-bit address for now. * Writing to the low DWORD register is what actually * causes the command to be issued, so we do that * last. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); return(0); } static int lge_alloc_jumbo_mem(struct lge_softc *sc) { caddr_t ptr; int i; struct lge_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_cdata.lge_jumbo_buf == NULL) { device_printf(sc->lge_dev, "no memory for jumbo buffers!\n"); return(ENOBUFS); } SLIST_INIT(&sc->lge_jfree_listhead); SLIST_INIT(&sc->lge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->lge_cdata.lge_jumbo_buf; for (i = 0; i < LGE_JSLOTS; i++) { sc->lge_cdata.lge_jslots[i] = ptr; ptr += LGE_JLEN; entry = malloc(sizeof(struct lge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { device_printf(sc->lge_dev, "no memory for jumbo " "buffer queue!\n"); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); } return(0); } static void lge_free_jumbo_mem(struct lge_softc *sc) { struct lge_jpool_entry *entry; if (sc->lge_cdata.lge_jumbo_buf == NULL) return; while ((entry = SLIST_FIRST(&sc->lge_jinuse_listhead))) { device_printf(sc->lge_dev, "asked to free buffer that is in use!\n"); SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); } while (!SLIST_EMPTY(&sc->lge_jfree_listhead)) { entry = SLIST_FIRST(&sc->lge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } free(sc->lge_cdata.lge_jumbo_buf, M_DEVBUF); return; } /* * Allocate a jumbo buffer. */ static void * lge_jalloc(struct lge_softc *sc) { struct lge_jpool_entry *entry; entry = SLIST_FIRST(&sc->lge_jfree_listhead); if (entry == NULL) { #ifdef LGE_VERBOSE device_printf(sc->lge_dev, "no free jumbo buffers\n"); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); return(sc->lge_cdata.lge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void lge_jfree(struct mbuf *m) { struct lge_softc *sc; int i; struct lge_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = m->m_ext.ext_arg1; if (sc == NULL) panic("lge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)m->m_ext.ext_buf - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; if ((i < 0) || (i >= LGE_JSLOTS)) panic("lge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->lge_jinuse_listhead); if (entry == NULL) panic("lge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void lge_rxeof(struct lge_softc *sc, int cnt) { struct mbuf *m; if_t ifp; struct lge_rx_desc *cur_rx; int c, i, total_len = 0; u_int32_t rxsts, rxctl; ifp = sc->lge_ifp; /* Find out how many frames were processed. */ c = cnt; i = sc->lge_cdata.lge_rx_cons; /* Suck them in. */ while(c) { struct mbuf *m0 = NULL; cur_rx = &sc->lge_ldata->lge_rx_list[i]; rxctl = cur_rx->lge_ctl; rxsts = cur_rx->lge_sts; m = cur_rx->lge_mbuf; cur_rx->lge_mbuf = NULL; total_len = LGE_RXBYTES(cur_rx); LGE_INC(i, LGE_RX_LIST_CNT); c--; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxctl & LGE_RXCTL_ERRMASK) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); lge_newbuf(sc, &LGE_RXTAIL(sc), m); continue; } if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); lge_newbuf(sc, &LGE_RXTAIL(sc), m); if (m0 == NULL) { device_printf(sc->lge_dev, "no receive buffers " "available -- packet dropped!\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Do IP checksum checking. */ if (rxsts & LGE_RXSTS_ISIP) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxsts & LGE_RXSTS_IPCSUMERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxsts & LGE_RXSTS_ISTCP && !(rxsts & LGE_RXSTS_TCPCSUMERR)) || (rxsts & LGE_RXSTS_ISUDP && !(rxsts & LGE_RXSTS_UDPCSUMERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } LGE_UNLOCK(sc); if_input(ifp, m); LGE_LOCK(sc); } sc->lge_cdata.lge_rx_cons = i; return; } static void lge_rxeoc(struct lge_softc *sc) { if_t ifp; ifp = sc->lge_ifp; if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lge_init_locked(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void lge_txeof(struct lge_softc *sc) { struct lge_tx_desc *cur_tx = NULL; if_t ifp; u_int32_t idx, txdone; ifp = sc->lge_ifp; /* Clear the timeout timer. */ sc->lge_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->lge_cdata.lge_tx_cons; txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); while (idx != sc->lge_cdata.lge_tx_prod && txdone) { cur_tx = &sc->lge_ldata->lge_tx_list[idx]; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (cur_tx->lge_mbuf != NULL) { m_freem(cur_tx->lge_mbuf); cur_tx->lge_mbuf = NULL; } cur_tx->lge_ctl = 0; txdone--; LGE_INC(idx, LGE_TX_LIST_CNT); sc->lge_timer = 0; } sc->lge_cdata.lge_tx_cons = idx; if (cur_tx != NULL) if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); return; } static void lge_tick(void *xsc) { struct lge_softc *sc; struct mii_data *mii; if_t ifp; sc = xsc; ifp = sc->lge_ifp; LGE_LOCK_ASSERT(sc); CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CSR_READ_4(sc, LGE_STATSVAL)); CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CSR_READ_4(sc, LGE_STATSVAL)); if (!sc->lge_link) { mii = device_get_softc(sc->lge_miibus); mii_tick(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->lge_link++; if (bootverbose && (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)) device_printf(sc->lge_dev, "gigabit link up\n"); if (!if_sendq_empty(ifp)) lge_start_locked(ifp); } } if (sc->lge_timer != 0 && --sc->lge_timer == 0) lge_watchdog(sc); callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc); return; } static void lge_intr(void *arg) { struct lge_softc *sc; if_t ifp; u_int32_t status; sc = arg; ifp = sc->lge_ifp; LGE_LOCK(sc); /* Suppress unwanted interrupts */ if (!(if_getflags(ifp) & IFF_UP)) { lge_stop(sc); LGE_UNLOCK(sc); return; } for (;;) { /* * Reading the ISR register clears all interrupts, and * clears the 'interrupts enabled' bit in the IMR * register. */ status = CSR_READ_4(sc, LGE_ISR); if ((status & LGE_INTRS) == 0) break; if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) lge_txeof(sc); if (status & LGE_ISR_RXDMA_DONE) lge_rxeof(sc, LGE_RX_DMACNT(status)); if (status & LGE_ISR_RXCMDFIFO_EMPTY) lge_rxeoc(sc); if (status & LGE_ISR_PHY_INTR) { sc->lge_link = 0; callout_stop(&sc->lge_stat_callout); lge_tick(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); if (!if_sendq_empty(ifp)) lge_start_locked(ifp); LGE_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) { struct lge_frag *f = NULL; struct lge_tx_desc *cur_tx; struct mbuf *m; int frag = 0, tot_len = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; frag = 0; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { tot_len += m->m_len; f = &cur_tx->lge_frags[frag]; f->lge_fraglen = m->m_len; f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t)); f->lge_fragptr_hi = 0; frag++; } } if (m != NULL) return(ENOBUFS); cur_tx->lge_mbuf = m_head; cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; LGE_INC((*txidx), LGE_TX_LIST_CNT); /* Queue for transmit */ CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void lge_start(if_t ifp) { struct lge_softc *sc; sc = if_getsoftc(ifp); LGE_LOCK(sc); lge_start_locked(ifp); LGE_UNLOCK(sc); } static void lge_start_locked(if_t ifp) { struct lge_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = if_getsoftc(ifp); if (!sc->lge_link) return; idx = sc->lge_cdata.lge_tx_prod; if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) return; while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) break; m_head = if_dequeue(ifp); if (m_head == NULL) break; if (lge_encap(sc, m_head, &idx)) { if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } sc->lge_cdata.lge_tx_prod = idx; /* * Set a timeout in case the chip goes out to lunch. */ sc->lge_timer = 5; return; } static void lge_init(void *xsc) { struct lge_softc *sc = xsc; LGE_LOCK(sc); lge_init_locked(sc); LGE_UNLOCK(sc); } static void lge_init_locked(struct lge_softc *sc) { if_t ifp = sc->lge_ifp; LGE_LOCK_ASSERT(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; /* * Cancel pending I/O and free all RX/TX buffers. */ lge_stop(sc); lge_reset(sc); /* Set MAC address */ CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&if_getlladdr(sc->lge_ifp)[0])); CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&if_getlladdr(sc->lge_ifp)[4])); /* Init circular RX list. */ if (lge_list_rx_init(sc) == ENOBUFS) { device_printf(sc->lge_dev, "initialization failed: no " "memory for rx buffers\n"); lge_stop(sc); return; } /* * Init tx descriptors. */ lge_list_tx_init(sc); /* Set initial value for MODE1 register. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); /* If we want promiscuous mode, set the allframes bit. */ if (if_getflags(ifp) & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (if_getflags(ifp) & IFF_BROADCAST) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); } /* Packet padding workaround? */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); /* No error frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); /* Receive large frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); /* Workaround: disable RX/TX flow control */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); /* Make sure to strip CRC from received frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); /* Turn off magic packet mode */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); /* Turn off all VLAN stuff */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); /* Workarond: FIFO overflow */ CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); /* * Load the multicast filter. */ lge_setmulti(sc); /* * Enable hardware checksum validation for all received IPv4 * packets, do not reject packets with bad checksums. */ CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| LGE_MODE2_RX_ERRCSUM); /* * Enable the delivery of PHY interrupts based on * link/speed/duplex status chalges. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); /* Enable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); /* * Enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); lge_ifmedia_upd_locked(ifp); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc); return; } /* * Set media options. */ static int lge_ifmedia_upd(if_t ifp) { struct lge_softc *sc; sc = if_getsoftc(ifp); LGE_LOCK(sc); lge_ifmedia_upd_locked(ifp); LGE_UNLOCK(sc); return(0); } static void lge_ifmedia_upd_locked(if_t ifp) { struct lge_softc *sc; struct mii_data *mii; struct mii_softc *miisc; sc = if_getsoftc(ifp); LGE_LOCK_ASSERT(sc); mii = device_get_softc(sc->lge_miibus); sc->lge_link = 0; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); } /* * Report current media status. */ static void lge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct lge_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); LGE_LOCK(sc); mii = device_get_softc(sc->lge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; LGE_UNLOCK(sc); return; } static int lge_ioctl(if_t ifp, u_long command, caddr_t data) { struct lge_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch(command) { case SIOCSIFMTU: LGE_LOCK(sc); if (ifr->ifr_mtu > LGE_JUMBO_MTU) error = EINVAL; else if_setmtu(ifp, ifr->ifr_mtu); LGE_UNLOCK(sc); break; case SIOCSIFFLAGS: LGE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && if_getflags(ifp) & IFF_PROMISC && !(sc->lge_if_flags & IFF_PROMISC)) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1| LGE_MODE1_RX_PROMISC); } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !(if_getflags(ifp) & IFF_PROMISC) && sc->lge_if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lge_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) lge_stop(sc); } sc->lge_if_flags = if_getflags(ifp); LGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: LGE_LOCK(sc); lge_setmulti(sc); LGE_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->lge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void lge_watchdog(struct lge_softc *sc) { if_t ifp; LGE_LOCK_ASSERT(sc); ifp = sc->lge_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); lge_stop(sc); lge_reset(sc); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lge_init_locked(sc); if (!if_sendq_empty(ifp)) lge_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void lge_stop(struct lge_softc *sc) { int i; if_t ifp; LGE_LOCK_ASSERT(sc); ifp = sc->lge_ifp; sc->lge_timer = 0; callout_stop(&sc->lge_stat_callout); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); /* Disable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); sc->lge_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < LGE_TX_LIST_CNT; i++) { if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int lge_shutdown(device_t dev) { struct lge_softc *sc; sc = device_get_softc(dev); LGE_LOCK(sc); lge_reset(sc); lge_stop(sc); LGE_UNLOCK(sc); return (0); } diff --git a/sys/dev/ow/ow.c b/sys/dev/ow/ow.c index 9cbd05cd88b1..0325e6b324c8 100644 --- a/sys/dev/ow/ow.c +++ b/sys/dev/ow/ow.c @@ -1,732 +1,719 @@ /*- * Copyright (c) 2015 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * lldev - link level device * ndev - network / transport device (this module) * pdev - presentation device (children of this module) */ typedef int ow_enum_fn(device_t, device_t); typedef int ow_found_fn(device_t, romid_t); struct ow_softc { device_t dev; /* Newbus driver back pointer */ struct mtx mtx; /* bus mutex */ device_t owner; /* bus owner, if != NULL */ }; struct ow_devinfo { romid_t romid; }; static int ow_acquire_bus(device_t ndev, device_t pdev, int how); static void ow_release_bus(device_t ndev, device_t pdev); #define OW_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define OW_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define OW_LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define OW_ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define OW_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static MALLOC_DEFINE(M_OW, "ow", "House keeping data for 1wire bus"); static const struct ow_timing timing_regular_min = { .t_slot = 60, .t_low0 = 60, .t_low1 = 1, .t_release = 0, .t_rec = 1, .t_rdv = 15, /* fixed */ .t_rstl = 480, .t_rsth = 480, .t_pdl = 60, .t_pdh = 15, .t_lowr = 1, }; static const struct ow_timing timing_regular_max = { .t_slot = 120, .t_low0 = 120, .t_low1 = 15, .t_release = 45, .t_rec = 960, /* infinity */ .t_rdv = 15, /* fixed */ .t_rstl = 960, /* infinity */ .t_rsth = 960, /* infinity */ .t_pdl = 240, /* 60us to 240us */ .t_pdh = 60, /* 15us to 60us */ .t_lowr = 15, /* 1us */ }; static struct ow_timing timing_regular = { .t_slot = 60, /* 60 <= t < 120 */ .t_low0 = 60, /* 60 <= t < t_slot < 120 */ .t_low1 = 1, /* 1 <= t < 15 */ .t_release = 45, /* 0 <= t < 45 */ .t_rec = 15, /* 1 <= t < inf */ .t_rdv = 15, /* t == 15 */ .t_rstl = 480, /* 480 <= t < inf */ .t_rsth = 480, /* 480 <= t < inf */ .t_pdl = 60, /* 60 <= t < 240 */ .t_pdh = 60, /* 15 <= t < 60 */ .t_lowr = 1, /* 1 <= t < 15 */ }; /* NB: Untested */ static const struct ow_timing timing_overdrive_min = { .t_slot = 6, .t_low0 = 6, .t_low1 = 1, .t_release = 0, .t_rec = 1, .t_rdv = 2, /* fixed */ .t_rstl = 48, .t_rsth = 48, .t_pdl = 8, .t_pdh = 2, .t_lowr = 1, }; static const struct ow_timing timing_overdrive_max = { .t_slot = 16, .t_low0 = 16, .t_low1 = 2, .t_release = 4, .t_rec = 960, /* infinity */ .t_rdv = 2, /* fixed */ .t_rstl = 80, .t_rsth = 960, /* infinity */ .t_pdl = 24, .t_pdh = 6, .t_lowr = 2, }; static struct ow_timing timing_overdrive = { .t_slot = 11, /* 6 <= t < 16 */ .t_low0 = 6, /* 6 <= t < t_slot < 16 */ .t_low1 = 1, /* 1 <= t < 2 */ .t_release = 4, /* 0 <= t < 4 */ .t_rec = 1, /* 1 <= t < inf */ .t_rdv = 2, /* t == 2 */ .t_rstl = 48, /* 48 <= t < 80 */ .t_rsth = 48, /* 48 <= t < inf */ .t_pdl = 8, /* 8 <= t < 24 */ .t_pdh = 2, /* 2 <= t < 6 */ .t_lowr = 1, /* 1 <= t < 2 */ }; SYSCTL_NODE(_hw, OID_AUTO, ow, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "1-Wire protocol"); SYSCTL_NODE(_hw_ow, OID_AUTO, regular, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Regular mode timings"); SYSCTL_NODE(_hw_ow, OID_AUTO, overdrive, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Overdrive mode timings"); #define _OW_TIMING_SYSCTL(mode, param) \ static int \ sysctl_ow_timing_ ## mode ## _ ## param(SYSCTL_HANDLER_ARGS) \ { \ int val = timing_ ## mode.param; \ int err; \ err = sysctl_handle_int(oidp, &val, 0, req); \ if (err != 0 || req->newptr == NULL) \ return (err); \ if (val < timing_ ## mode ## _min.param) \ return (EINVAL); \ else if (val >= timing_ ## mode ## _max.param) \ return (EINVAL); \ timing_ ## mode.param = val; \ return (0); \ } \ SYSCTL_PROC(_hw_ow_ ## mode, OID_AUTO, param, \ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int), \ sysctl_ow_timing_ ## mode ## _ ## param, "I", \ "1-Wire timing parameter in microseconds (-1 resets to default)") #define OW_TIMING_SYSCTL(param) \ _OW_TIMING_SYSCTL(regular, param); \ _OW_TIMING_SYSCTL(overdrive, param) OW_TIMING_SYSCTL(t_slot); OW_TIMING_SYSCTL(t_low0); OW_TIMING_SYSCTL(t_low1); OW_TIMING_SYSCTL(t_release); OW_TIMING_SYSCTL(t_rec); OW_TIMING_SYSCTL(t_rdv); OW_TIMING_SYSCTL(t_rstl); OW_TIMING_SYSCTL(t_rsth); OW_TIMING_SYSCTL(t_pdl); OW_TIMING_SYSCTL(t_pdh); OW_TIMING_SYSCTL(t_lowr); #undef _OW_TIMING_SYSCTL #undef OW_TIMING_SYSCTL static void ow_send_byte(device_t lldev, struct ow_timing *t, uint8_t byte) { int i; for (i = 0; i < 8; i++) if (byte & (1 << i)) OWLL_WRITE_ONE(lldev, t); else OWLL_WRITE_ZERO(lldev, t); } static void ow_read_byte(device_t lldev, struct ow_timing *t, uint8_t *bytep) { int i; uint8_t byte = 0; int bit; for (i = 0; i < 8; i++) { OWLL_READ_DATA(lldev, t, &bit); byte |= bit << i; } *bytep = byte; } static int ow_send_command(device_t ndev, device_t pdev, struct ow_cmd *cmd) { int present, i, bit, tries; device_t lldev; struct ow_timing *t; lldev = device_get_parent(ndev); /* * Retry the reset a couple of times before giving up. */ tries = 4; do { OWLL_RESET_AND_PRESENCE(lldev, &timing_regular, &present); if (present == 1) device_printf(ndev, "Reset said no device on bus?.\n"); } while (present == 1 && tries-- > 0); if (present == 1) { device_printf(ndev, "Reset said the device wasn't there.\n"); return ENOENT; /* No devices acked the RESET */ } if (present == -1) { device_printf(ndev, "Reset discovered bus wired wrong.\n"); return ENOENT; } for (i = 0; i < cmd->rom_len; i++) ow_send_byte(lldev, &timing_regular, cmd->rom_cmd[i]); for (i = 0; i < cmd->rom_read_len; i++) ow_read_byte(lldev, &timing_regular, cmd->rom_read + i); if (cmd->xpt_len) { /* * Per AN937, the reset pulse and ROM level are always * done with the regular timings. Certain ROM commands * put the device into overdrive mode for the remainder * of the data transfer, which is why we have to pass the * timings here. Commands that need to be handled like this * are expected to be flagged by the client. */ t = (cmd->flags & OW_FLAG_OVERDRIVE) ? &timing_overdrive : &timing_regular; for (i = 0; i < cmd->xpt_len; i++) ow_send_byte(lldev, t, cmd->xpt_cmd[i]); if (cmd->flags & OW_FLAG_READ_BIT) { memset(cmd->xpt_read, 0, (cmd->xpt_read_len + 7) / 8); for (i = 0; i < cmd->xpt_read_len; i++) { OWLL_READ_DATA(lldev, t, &bit); cmd->xpt_read[i / 8] |= bit << (i % 8); } } else { for (i = 0; i < cmd->xpt_read_len; i++) ow_read_byte(lldev, t, cmd->xpt_read + i); } } return 0; } static int ow_search_rom(device_t lldev, device_t dev) { struct ow_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.rom_cmd[0] = SEARCH_ROM; cmd.rom_len = 1; return ow_send_command(lldev, dev, &cmd); } #if 0 static int ow_alarm_search(device_t lldev, device_t dev) { struct ow_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.rom_cmd[0] = ALARM_SEARCH; cmd.rom_len = 1; return ow_send_command(lldev, dev, &cmd); } #endif static int ow_add_child(device_t dev, romid_t romid) { struct ow_devinfo *di; device_t child; di = malloc(sizeof(*di), M_OW, M_WAITOK); di->romid = romid; child = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (child == NULL) { free(di, M_OW); return ENOMEM; } device_set_ivars(child, di); return (0); } static void ow_child_deleted(device_t dev, device_t child) { free(device_get_ivars(child), M_OW); } static device_t ow_child_by_romid(device_t dev, romid_t romid) { device_t *children, retval, child; int nkid, i; struct ow_devinfo *di; if (device_get_children(dev, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { child = children[i]; di = device_get_ivars(child); if (di->romid == romid) { retval = child; break; } } free(children, M_TEMP); return (retval); } /* * CRC generator table -- taken from AN937 DOW CRC LOOKUP FUNCTION Table 2 */ const uint8_t ow_crc_table[] = { 0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65, 157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220, 35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98, 190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255, 70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7, 219, 133,103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154, 101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36, 248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185, 140,210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113,147, 205, 17, 79, 173, 243, 112, 46, 204, 146, 211,141, 111, 49, 178, 236, 14, 80, 175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82,176, 238, 50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115, 202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139, 87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22, 233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168, 116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53 }; /* * Converted from DO_CRC page 131 ANN937 */ static uint8_t ow_crc(device_t ndev, device_t pdev, uint8_t *buffer, size_t len) { uint8_t crc = 0; int i; for (i = 0; i < len; i++) crc = ow_crc_table[crc ^ buffer[i]]; return crc; } static int ow_check_crc(romid_t romid) { return ow_crc(NULL, NULL, (uint8_t *)&romid, sizeof(romid)) == 0; } static int ow_device_found(device_t dev, romid_t romid) { /* XXX Move this up into enumerate? */ /* * All valid ROM IDs have a valid CRC. Check that first. */ if (!ow_check_crc(romid)) { device_printf(dev, "Device romid %8D failed CRC.\n", &romid, ":"); return EINVAL; } /* * If we've seen this child before, don't add a new one for it. */ if (ow_child_by_romid(dev, romid) != NULL) return 0; return ow_add_child(dev, romid); } static int ow_enumerate(device_t dev, ow_enum_fn *enumfp, ow_found_fn *foundfp) { device_t lldev = device_get_parent(dev); int first, second, i, dir, prior, last, err, retries; uint64_t probed, last_mask; int sanity = 10; prior = -1; last_mask = 0; retries = 0; last = -2; err = ow_acquire_bus(dev, dev, OWN_DONTWAIT); if (err != 0) return err; while (last != -1) { if (sanity-- < 0) { printf("Reached the sanity limit\n"); return EIO; } again: probed = 0; last = -1; /* * See AN397 section 5.II.C.3 for the algorithm (though a bit * poorly stated). The search command forces each device to * send ROM ID bits one at a time (first the bit, then the * complement) the master (us) sends back a bit. If the * device's bit doesn't match what we send back, that device * stops sending bits back. So each time through we remember * where we made the last decision (always 0). If there's a * conflict there this time (and there will be in the absence * of a hardware failure) we go with 1. This way, we prune the * devices on the bus and wind up with a unique ROM. We know * we're done when we detect no new conflicts. The same * algorithm is used for devices in alarm state as well. * * In addition, experience has shown that sometimes devices * stop responding in the middle of enumeration, so try this * step again a few times when that happens. It is unclear if * this is due to a nosiy electrical environment or some odd * timing issue. */ /* * The enumeration command should be successfully sent, if not, * we have big issues on the bus so punt. Lower layers report * any unusual errors, so we don't need to here. */ err = enumfp(dev, dev); if (err != 0) return (err); for (i = 0; i < 64; i++) { OWLL_READ_DATA(lldev, &timing_regular, &first); OWLL_READ_DATA(lldev, &timing_regular, &second); switch (first | second << 1) { case 0: /* Conflict */ if (i < prior) dir = (last_mask >> i) & 1; else dir = i == prior; if (dir == 0) last = i; break; case 1: /* 1 then 0 -> 1 for all */ dir = 1; break; case 2: /* 0 then 1 -> 0 for all */ dir = 0; break; case 3: /* * No device responded. This is unexpected, but * experience has shown that on some platforms * we miss a timing window, or otherwise have * an issue. Start this step over. Since we've * not updated prior yet, we can just jump to * the top of the loop for a re-do of this step. */ printf("oops, starting over\n"); if (++retries > 5) return (EIO); goto again; default: /* NOTREACHED */ __assert_unreachable(); } if (dir) { OWLL_WRITE_ONE(lldev, &timing_regular); probed |= 1ull << i; } else { OWLL_WRITE_ZERO(lldev, &timing_regular); } } retries = 0; foundfp(dev, probed); last_mask = probed; prior = last; } ow_release_bus(dev, dev); return (0); } static int ow_probe(device_t dev) { device_set_desc(dev, "1 Wire Bus"); return (BUS_PROBE_GENERIC); } static int ow_attach(device_t ndev) { struct ow_softc *sc; /* * Find all the devices on the bus. We don't probe / attach them in the * enumeration phase. We do this because we want to allow the probe / * attach routines of the child drivers to have as full an access to the * bus as possible. While we reset things before the next step of the * search (so it would likely be OK to allow access by the clients to * the bus), it is more conservative to find them all, then to do the * attach of the devices. This also allows the child devices to have * more knowledge of the bus. We also ignore errors from the enumeration * because they might happen after we've found a few devices. */ sc = device_get_softc(ndev); sc->dev = ndev; mtx_init(&sc->mtx, device_get_nameunit(sc->dev), "ow", MTX_DEF); ow_enumerate(ndev, ow_search_rom, ow_device_found); bus_attach_children(ndev); return (0); } static int ow_detach(device_t ndev) { - device_t *children, child; - int nkid, i; struct ow_softc *sc; sc = device_get_softc(ndev); /* * detach all the children first. This is blocking until any threads * have stopped, etc. */ bus_generic_detach(ndev); - /* - * We delete all the children, and free up the ivars - */ - if (device_get_children(ndev, &children, &nkid) != 0) - return ENOMEM; - for (i = 0; i < nkid; i++) { - child = children[i]; - device_delete_child(ndev, child); - } - free(children, M_TEMP); - OW_LOCK_DESTROY(sc); return 0; } static int ow_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { struct ow_devinfo *di; di = device_get_ivars(child); sbuf_printf(sb, "romid=%8D", &di->romid, ":"); return (0); } static int ow_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ow_devinfo *di; romid_t **ptr; di = device_get_ivars(child); switch (which) { case OW_IVAR_FAMILY: *result = di->romid & 0xff; break; case OW_IVAR_ROMID: ptr = (romid_t **)result; *ptr = &di->romid; break; default: return EINVAL; } return 0; } static int ow_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return EINVAL; } static int ow_print_child(device_t ndev, device_t pdev) { int retval = 0; struct ow_devinfo *di; di = device_get_ivars(pdev); retval += bus_print_child_header(ndev, pdev); retval += printf(" romid %8D", &di->romid, ":"); retval += bus_print_child_footer(ndev, pdev); return retval; } static void ow_probe_nomatch(device_t ndev, device_t pdev) { struct ow_devinfo *di; di = device_get_ivars(pdev); device_printf(ndev, "romid %8D: no driver\n", &di->romid, ":"); } static int ow_acquire_bus(device_t ndev, device_t pdev, int how) { struct ow_softc *sc; sc = device_get_softc(ndev); OW_ASSERT_UNLOCKED(sc); OW_LOCK(sc); if (sc->owner != NULL) { if (sc->owner == pdev) panic("%s: %s recursively acquiring the bus.\n", device_get_nameunit(ndev), device_get_nameunit(pdev)); if (how == OWN_DONTWAIT) { OW_UNLOCK(sc); return EWOULDBLOCK; } while (sc->owner != NULL) mtx_sleep(sc, &sc->mtx, 0, "owbuswait", 0); } sc->owner = pdev; OW_UNLOCK(sc); return 0; } static void ow_release_bus(device_t ndev, device_t pdev) { struct ow_softc *sc; sc = device_get_softc(ndev); OW_ASSERT_UNLOCKED(sc); OW_LOCK(sc); if (sc->owner == NULL) panic("%s: %s releasing unowned bus.", device_get_nameunit(ndev), device_get_nameunit(pdev)); if (sc->owner != pdev) panic("%s: %s don't own the bus. %s does. game over.", device_get_nameunit(ndev), device_get_nameunit(pdev), device_get_nameunit(sc->owner)); sc->owner = NULL; wakeup(sc); OW_UNLOCK(sc); } static device_method_t ow_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ow_probe), DEVMETHOD(device_attach, ow_attach), DEVMETHOD(device_detach, ow_detach), /* Bus interface */ DEVMETHOD(bus_child_deleted, ow_child_deleted), DEVMETHOD(bus_child_pnpinfo, ow_child_pnpinfo), DEVMETHOD(bus_read_ivar, ow_read_ivar), DEVMETHOD(bus_write_ivar, ow_write_ivar), DEVMETHOD(bus_print_child, ow_print_child), DEVMETHOD(bus_probe_nomatch, ow_probe_nomatch), /* One Wire Network/Transport layer interface */ DEVMETHOD(own_send_command, ow_send_command), DEVMETHOD(own_acquire_bus, ow_acquire_bus), DEVMETHOD(own_release_bus, ow_release_bus), DEVMETHOD(own_crc, ow_crc), { 0, 0 } }; static driver_t ow_driver = { "ow", ow_methods, sizeof(struct ow_softc), }; DRIVER_MODULE(ow, owc, ow_driver, 0, 0); MODULE_VERSION(ow, 1); diff --git a/sys/dev/pcf/pcf_isa.c b/sys/dev/pcf/pcf_isa.c index 190554258c3a..f86caed87e6a 100644 --- a/sys/dev/pcf/pcf_isa.c +++ b/sys/dev/pcf/pcf_isa.c @@ -1,209 +1,206 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Joerg Wunsch * * derived from sys/i386/isa/pcf.c which is: * * Copyright (c) 1998 Nicolas Souchu, Marc Bouget * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Hardware driver for a Philips PCF8584 I2C bus controller sitting * on a generic ISA bus. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #define PCF_NAME "pcf" static void pcf_isa_identify(driver_t *, device_t); static int pcf_isa_probe(device_t); static int pcf_isa_attach(device_t); static int pcf_isa_detach(device_t); static device_method_t pcf_isa_methods[] = { /* device interface */ DEVMETHOD(device_identify, pcf_isa_identify), DEVMETHOD(device_probe, pcf_isa_probe), DEVMETHOD(device_attach, pcf_isa_attach), DEVMETHOD(device_detach, pcf_isa_detach), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_repeated_start, pcf_repeated_start), DEVMETHOD(iicbus_start, pcf_start), DEVMETHOD(iicbus_stop, pcf_stop), DEVMETHOD(iicbus_write, pcf_write), DEVMETHOD(iicbus_read, pcf_read), DEVMETHOD(iicbus_reset, pcf_rst_card), { 0, 0 } }; static driver_t pcf_isa_driver = { PCF_NAME, pcf_isa_methods, sizeof(struct pcf_softc), }; static void pcf_isa_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, PCF_NAME, 0); return; } static int pcf_isa_probe(device_t dev) { rman_res_t start, count; u_int rid = 0, port, error; /* skip PnP probes */ if (isa_get_logicalid(dev)) return (ENXIO); /* The port address must be explicitly specified */ bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count); if ((error = resource_int_value(PCF_NAME, 0, "port", &port)) != 0) return (error); /* Probe is only successful for the specified base io */ if (port != (u_int)start) return (ENXIO); device_set_desc(dev, "PCF8584 I2C bus controller"); return (0); } static int pcf_isa_attach(device_t dev) { struct pcf_softc *sc; int rv = ENXIO; sc = DEVTOSOFTC(dev); mtx_init(&sc->pcf_lock, device_get_nameunit(dev), "pcf", MTX_DEF); /* IO port is mandatory */ sc->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->rid_ioport, RF_ACTIVE); if (sc->res_ioport == 0) { device_printf(dev, "cannot reserve I/O port range\n"); goto error; } sc->pcf_flags = device_get_flags(dev); if (!(sc->pcf_flags & IIC_POLLED)) { sc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid_irq, RF_ACTIVE); if (sc->res_irq == 0) { device_printf(dev, "can't reserve irq, polled mode.\n"); sc->pcf_flags |= IIC_POLLED; } } /* reset the chip */ pcf_rst_card(dev, IIC_FASTEST, PCF_DEFAULT_ADDR, NULL); if (sc->res_irq) { rv = bus_setup_intr(dev, sc->res_irq, INTR_TYPE_NET /* | INTR_ENTROPY */, NULL, pcf_intr, sc, &sc->intr_cookie); if (rv) { device_printf(dev, "could not setup IRQ\n"); goto error; } } if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL) device_printf(dev, "could not allocate iicbus instance\n"); /* probe and attach the iicbus */ bus_attach_children(dev); return (0); error: if (sc->res_irq != 0) { bus_release_resource(dev, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); } if (sc->res_ioport != 0) { bus_release_resource(dev, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); } mtx_destroy(&sc->pcf_lock); return (rv); } static int pcf_isa_detach(device_t dev) { struct pcf_softc *sc; int rv; sc = DEVTOSOFTC(dev); if ((rv = bus_generic_detach(dev)) != 0) return (rv); - if ((rv = device_delete_child(dev, sc->iicbus)) != 0) - return (rv); - if (sc->res_irq != 0) { bus_teardown_intr(dev, sc->res_irq, sc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); } bus_release_resource(dev, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); mtx_destroy(&sc->pcf_lock); return (0); } DRIVER_MODULE(pcf_isa, isa, pcf_isa_driver, 0, 0); diff --git a/sys/dev/pwm/controller/allwinner/aw_pwm.c b/sys/dev/pwm/controller/allwinner/aw_pwm.c index 0f505957bb1a..117f3ae17e1b 100644 --- a/sys/dev/pwm/controller/allwinner/aw_pwm.c +++ b/sys/dev/pwm/controller/allwinner/aw_pwm.c @@ -1,399 +1,396 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" #define AW_PWM_CTRL 0x00 #define AW_PWM_CTRL_PRESCALE_MASK 0xF #define AW_PWM_CTRL_EN (1 << 4) #define AW_PWM_CTRL_ACTIVE_LEVEL_HIGH (1 << 5) #define AW_PWM_CTRL_GATE (1 << 6) #define AW_PWM_CTRL_MODE_MASK 0x80 #define AW_PWM_CTRL_PULSE_MODE (1 << 7) #define AW_PWM_CTRL_CYCLE_MODE (0 << 7) #define AW_PWM_CTRL_PULSE_START (1 << 8) #define AW_PWM_CTRL_CLK_BYPASS (1 << 9) #define AW_PWM_CTRL_PERIOD_BUSY (1 << 28) #define AW_PWM_PERIOD 0x04 #define AW_PWM_PERIOD_TOTAL_MASK 0xFFFF #define AW_PWM_PERIOD_TOTAL_SHIFT 16 #define AW_PWM_PERIOD_ACTIVE_MASK 0xFFFF #define AW_PWM_PERIOD_ACTIVE_SHIFT 0 #define AW_PWM_MAX_FREQ 24000000 #define NS_PER_SEC 1000000000 static struct ofw_compat_data compat_data[] = { { "allwinner,sun5i-a13-pwm", 1 }, { "allwinner,sun8i-h3-pwm", 1 }, { NULL, 0 } }; static struct resource_spec aw_pwm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct aw_pwm_softc { device_t dev; device_t busdev; clk_t clk; struct resource *res; uint64_t clk_freq; unsigned int period; unsigned int duty; uint32_t flags; bool enabled; }; static uint32_t aw_pwm_clk_prescaler[] = { 120, 180, 240, 360, 480, 0, 0, 0, 12000, 24000, 36000, 48000, 72000, 0, 0, 1, }; #define AW_PWM_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define AW_PWM_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static int aw_pwm_probe(device_t dev); static int aw_pwm_attach(device_t dev); static int aw_pwm_detach(device_t dev); static int aw_pwm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner PWM"); return (BUS_PROBE_DEFAULT); } static int aw_pwm_attach(device_t dev) { struct aw_pwm_softc *sc; uint64_t clk_freq; uint32_t reg; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (error != 0) { device_printf(dev, "cannot get clock\n"); goto fail; } error = clk_enable(sc->clk); if (error != 0) { device_printf(dev, "cannot enable clock\n"); goto fail; } error = clk_get_freq(sc->clk, &sc->clk_freq); if (error != 0) { device_printf(dev, "cannot get clock frequency\n"); goto fail; } if (bus_alloc_resources(dev, aw_pwm_spec, &sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } /* Read the configuration left by U-Boot */ reg = AW_PWM_READ(sc, AW_PWM_CTRL); if (reg & (AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN)) sc->enabled = true; reg = AW_PWM_READ(sc, AW_PWM_CTRL); reg &= AW_PWM_CTRL_PRESCALE_MASK; if (reg > nitems(aw_pwm_clk_prescaler)) { device_printf(dev, "Bad prescaler %x, cannot guess current settings\n", reg); goto skipcfg; } clk_freq = sc->clk_freq / aw_pwm_clk_prescaler[reg]; reg = AW_PWM_READ(sc, AW_PWM_PERIOD); sc->period = NS_PER_SEC / (clk_freq / ((reg >> AW_PWM_PERIOD_TOTAL_SHIFT) & AW_PWM_PERIOD_TOTAL_MASK)); sc->duty = NS_PER_SEC / (clk_freq / ((reg >> AW_PWM_PERIOD_ACTIVE_SHIFT) & AW_PWM_PERIOD_ACTIVE_MASK)); skipcfg: /* * Note that we don't check for failure to attach pwmbus -- even without * it we can still service clients who connect via fdt xref data. */ node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); sc->busdev = device_add_child(dev, "pwmbus", DEVICE_UNIT_ANY); bus_attach_children(dev); return (0); fail: aw_pwm_detach(dev); return (error); } static int aw_pwm_detach(device_t dev) { struct aw_pwm_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(sc->dev)) != 0) { device_printf(sc->dev, "cannot detach child devices\n"); return (error); } - if (sc->busdev != NULL) - device_delete_child(dev, sc->busdev); - if (sc->res != NULL) bus_release_resources(dev, aw_pwm_spec, &sc->res); return (0); } static phandle_t aw_pwm_get_node(device_t bus, device_t dev) { /* * Share our controller node with our pwmbus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static int aw_pwm_channel_count(device_t dev, u_int *nchannel) { *nchannel = 1; return (0); } static int aw_pwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty) { struct aw_pwm_softc *sc; uint64_t period_freq, duty_freq; uint64_t clk_rate, div; uint32_t reg; int prescaler; int i; sc = device_get_softc(dev); period_freq = NS_PER_SEC / period; if (period_freq > AW_PWM_MAX_FREQ) return (EINVAL); /* * FIXME. The hardware is capable of sub-Hz frequencies, that is, * periods longer than a second. But the current code cannot deal * with those properly. */ if (period_freq == 0) return (EINVAL); /* * FIXME. There is a great loss of precision when the period and the * duty are near 1 second. In some cases period_freq and duty_freq can * be equal even if the period and the duty are significantly different. */ duty_freq = NS_PER_SEC / duty; if (duty_freq < period_freq) { device_printf(sc->dev, "duty < period\n"); return (EINVAL); } /* First test without prescaler */ clk_rate = AW_PWM_MAX_FREQ; prescaler = AW_PWM_CTRL_PRESCALE_MASK; div = AW_PWM_MAX_FREQ / period_freq; if ((div - 1) > AW_PWM_PERIOD_TOTAL_MASK) { /* Test all prescaler */ for (i = 0; i < nitems(aw_pwm_clk_prescaler); i++) { if (aw_pwm_clk_prescaler[i] == 0) continue; div = AW_PWM_MAX_FREQ / aw_pwm_clk_prescaler[i] / period_freq; if ((div - 1) < AW_PWM_PERIOD_TOTAL_MASK ) { prescaler = i; clk_rate = AW_PWM_MAX_FREQ / aw_pwm_clk_prescaler[i]; break; } } if (prescaler == AW_PWM_CTRL_PRESCALE_MASK) return (EINVAL); } reg = AW_PWM_READ(sc, AW_PWM_CTRL); /* Write the prescalar */ reg &= ~AW_PWM_CTRL_PRESCALE_MASK; reg |= prescaler; reg &= ~AW_PWM_CTRL_MODE_MASK; reg |= AW_PWM_CTRL_CYCLE_MODE; reg &= ~AW_PWM_CTRL_PULSE_START; reg &= ~AW_PWM_CTRL_CLK_BYPASS; AW_PWM_WRITE(sc, AW_PWM_CTRL, reg); /* Write the total/active cycles */ reg = ((clk_rate / period_freq - 1) << AW_PWM_PERIOD_TOTAL_SHIFT) | ((clk_rate / duty_freq) << AW_PWM_PERIOD_ACTIVE_SHIFT); AW_PWM_WRITE(sc, AW_PWM_PERIOD, reg); sc->period = period; sc->duty = duty; return (0); } static int aw_pwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty) { struct aw_pwm_softc *sc; sc = device_get_softc(dev); *period = sc->period; *duty = sc->duty; return (0); } static int aw_pwm_channel_enable(device_t dev, u_int channel, bool enable) { struct aw_pwm_softc *sc; uint32_t reg; sc = device_get_softc(dev); if (enable && sc->enabled) return (0); reg = AW_PWM_READ(sc, AW_PWM_CTRL); if (enable) reg |= AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN; else reg &= ~(AW_PWM_CTRL_GATE | AW_PWM_CTRL_EN); AW_PWM_WRITE(sc, AW_PWM_CTRL, reg); sc->enabled = enable; return (0); } static int aw_pwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled) { struct aw_pwm_softc *sc; sc = device_get_softc(dev); *enabled = sc->enabled; return (0); } static device_method_t aw_pwm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_pwm_probe), DEVMETHOD(device_attach, aw_pwm_attach), DEVMETHOD(device_detach, aw_pwm_detach), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, aw_pwm_get_node), /* pwmbus interface */ DEVMETHOD(pwmbus_channel_count, aw_pwm_channel_count), DEVMETHOD(pwmbus_channel_config, aw_pwm_channel_config), DEVMETHOD(pwmbus_channel_get_config, aw_pwm_channel_get_config), DEVMETHOD(pwmbus_channel_enable, aw_pwm_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, aw_pwm_channel_is_enabled), DEVMETHOD_END }; static driver_t aw_pwm_driver = { "pwm", aw_pwm_methods, sizeof(struct aw_pwm_softc), }; DRIVER_MODULE(aw_pwm, simplebus, aw_pwm_driver, 0, 0); MODULE_VERSION(aw_pwm, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/qcom_qup/qcom_spi.c b/sys/dev/qcom_qup/qcom_spi.c index d3f38dee041f..88341b4d2083 100644 --- a/sys/dev/qcom_qup/qcom_spi.c +++ b/sys/dev/qcom_qup/qcom_spi.c @@ -1,906 +1,904 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021, Adrian Chadd * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" #include #include #include #include static struct ofw_compat_data compat_data[] = { { "qcom,spi-qup-v1.1.1", QCOM_SPI_HW_QPI_V1_1 }, { "qcom,spi-qup-v2.1.1", QCOM_SPI_HW_QPI_V2_1 }, { "qcom,spi-qup-v2.2.1", QCOM_SPI_HW_QPI_V2_2 }, { NULL, 0 } }; /* * Flip the CS GPIO line either active or inactive. * * Actually listen to the CS polarity. */ static void qcom_spi_set_chipsel(struct qcom_spi_softc *sc, int cs, bool active) { bool pinactive; bool invert = !! (cs & SPIBUS_CS_HIGH); cs = cs & ~SPIBUS_CS_HIGH; if (sc->cs_pins[cs] == NULL) { device_printf(sc->sc_dev, "%s: cs=%u, active=%u, invert=%u, no gpio?\n", __func__, cs, active, invert); return; } QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_CHIPSELECT, "%s: cs=%u active=%u\n", __func__, cs, active); /* * Default rule here is CS is active low. */ if (active) pinactive = false; else pinactive = true; /* * Invert the CS line if required. */ if (invert) pinactive = !! pinactive; gpio_pin_set_active(sc->cs_pins[cs], pinactive); gpio_pin_is_active(sc->cs_pins[cs], &pinactive); } static void qcom_spi_intr(void *arg) { struct qcom_spi_softc *sc = arg; int ret; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: called\n", __func__); QCOM_SPI_LOCK(sc); ret = qcom_spi_hw_interrupt_handle(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to read intr status\n"); goto done; } /* * Handle spurious interrupts outside of an actual * transfer. */ if (sc->transfer.active == false) { device_printf(sc->sc_dev, "ERROR: spurious interrupt\n"); qcom_spi_hw_ack_opmode(sc); goto done; } /* Now, handle interrupts */ if (sc->intr.error) { sc->intr.error = false; device_printf(sc->sc_dev, "ERROR: intr\n"); } if (sc->intr.do_rx) { sc->intr.do_rx = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: PIO_READ\n", __func__); if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_read_pio_fifo(sc); else ret = qcom_spi_hw_read_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_read failed (%u)\n", ret); goto done; } } if (sc->intr.do_tx) { sc->intr.do_tx = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: PIO_WRITE\n", __func__); /* * For FIFO operations we do not do a write here, we did * it at the beginning of the transfer. * * For BLOCK operations yes, we call the routine. */ if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_ack_write_pio_fifo(sc); else ret = qcom_spi_hw_write_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_write failed (%u)\n", ret); goto done; } } /* * Do this last. We may actually have completed the * transfer in the PIO receive path above and it will * set the done flag here. */ if (sc->intr.done) { sc->intr.done = false; sc->transfer.done = true; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: transfer done\n", __func__); wakeup(sc); } done: QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_INTR, "%s: done\n", __func__); QCOM_SPI_UNLOCK(sc); } static int qcom_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Qualcomm SPI Interface"); return (BUS_PROBE_DEFAULT); } /* * Allocate GPIOs if provided in the SPI controller block. * * Some devices will use GPIO lines for chip select. * It's also quite annoying because some devices will want to use * the hardware provided CS gating for say, the first chipselect block, * and then use GPIOs for the later ones. * * So here we just assume for now that SPI index 0 uses the hardware * lines, and >0 use GPIO lines. Revisit this if better hardware * shows up. * * And finally, iterating over the cs-gpios list to allocate GPIOs * doesn't actually tell us what the polarity is. For that we need * to actually iterate over the list of child nodes and check what * their properties are (and look for "spi-cs-high".) */ static void qcom_spi_attach_gpios(struct qcom_spi_softc *sc) { phandle_t node; int idx, err; /* Allocate gpio pins for configured chip selects. */ node = ofw_bus_get_node(sc->sc_dev); for (idx = 0; idx < nitems(sc->cs_pins); idx++) { err = gpio_pin_get_by_ofw_propidx(sc->sc_dev, node, "cs-gpios", idx, &sc->cs_pins[idx]); if (err == 0) { err = gpio_pin_setflags(sc->cs_pins[idx], GPIO_PIN_OUTPUT); if (err != 0) { device_printf(sc->sc_dev, "error configuring gpio for" " cs %u (%d)\n", idx, err); } /* * We can't set this HIGH right now because * we don't know if it needs to be set to * high for inactive or low for inactive * based on the child SPI device flags. */ #if 0 gpio_pin_set_active(sc->cs_pins[idx], 1); gpio_pin_is_active(sc->cs_pins[idx], &tmp); #endif } else { device_printf(sc->sc_dev, "cannot configure gpio for chip select %u\n", idx); sc->cs_pins[idx] = NULL; } } } static void qcom_spi_sysctl_attach(struct qcom_spi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); } static int qcom_spi_attach(device_t dev) { struct qcom_spi_softc *sc = device_get_softc(dev); int rid, ret, i, val; sc->sc_dev = dev; /* * Hardware version is stored in the ofw_compat_data table. */ sc->hw_version = ofw_bus_search_compatible(dev, compat_data)->ocd_data; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "ERROR: Could not map memory\n"); ret = ENXIO; goto error; } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "ERROR: Could not map interrupt\n"); ret = ENXIO; goto error; } ret = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, qcom_spi_intr, sc, &sc->sc_irq_h); if (ret != 0) { device_printf(dev, "ERROR: could not configure interrupt " "(%d)\n", ret); goto error; } qcom_spi_attach_gpios(sc); ret = clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core); if (ret != 0) { device_printf(dev, "ERROR: could not get %s clock (%d)\n", "core", ret); goto error; } ret = clk_get_by_ofw_name(dev, 0, "iface", &sc->clk_iface); if (ret != 0) { device_printf(dev, "ERROR: could not get %s clock (%d)\n", "iface", ret); goto error; } /* Bring up initial clocks if they're off */ ret = clk_enable(sc->clk_core); if (ret != 0) { device_printf(dev, "ERROR: couldn't enable core clock (%u)\n", ret); goto error; } ret = clk_enable(sc->clk_iface); if (ret != 0) { device_printf(dev, "ERROR: couldn't enable iface clock (%u)\n", ret); goto error; } /* * Read optional spi-max-frequency */ if (OF_getencprop(ofw_bus_get_node(dev), "spi-max-frequency", &val, sizeof(val)) > 0) sc->config.max_frequency = val; else sc->config.max_frequency = SPI_MAX_RATE; /* * Read optional cs-select */ if (OF_getencprop(ofw_bus_get_node(dev), "cs-select", &val, sizeof(val)) > 0) sc->config.cs_select = val; else sc->config.cs_select = 0; /* * Read optional num-cs */ if (OF_getencprop(ofw_bus_get_node(dev), "num-cs", &val, sizeof(val)) > 0) sc->config.num_cs = val; else sc->config.num_cs = SPI_NUM_CHIPSELECTS; ret = fdt_pinctrl_configure_by_name(dev, "default"); if (ret != 0) { device_printf(dev, "ERROR: could not configure default pinmux\n"); goto error; } ret = qcom_spi_hw_read_controller_transfer_sizes(sc); if (ret != 0) { device_printf(dev, "ERROR: Could not read transfer config\n"); goto error; } device_printf(dev, "BLOCK: input=%u bytes, output=%u bytes\n", sc->config.input_block_size, sc->config.output_block_size); device_printf(dev, "FIFO: input=%u bytes, output=%u bytes\n", sc->config.input_fifo_size, sc->config.output_fifo_size); /* QUP config */ QCOM_SPI_LOCK(sc); ret = qcom_spi_hw_qup_init_locked(sc); if (ret != 0) { device_printf(dev, "ERROR: QUP init failed (%d)\n", ret); QCOM_SPI_UNLOCK(sc); goto error; } /* Initial SPI config */ ret = qcom_spi_hw_spi_init_locked(sc); if (ret != 0) { device_printf(dev, "ERROR: SPI init failed (%d)\n", ret); QCOM_SPI_UNLOCK(sc); goto error; } QCOM_SPI_UNLOCK(sc); sc->spibus = device_add_child(dev, "spibus", -1); /* We're done, so shut down the interface clock for now */ device_printf(dev, "DONE: shutting down interface clock for now\n"); clk_disable(sc->clk_iface); /* Register for debug sysctl */ qcom_spi_sysctl_attach(sc); bus_attach_children(dev); return (0); error: if (sc->sc_irq_h) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->clk_core) { clk_disable(sc->clk_core); clk_release(sc->clk_core); } if (sc->clk_iface) { clk_disable(sc->clk_iface); clk_release(sc->clk_iface); } for (i = 0; i < CS_MAX; i++) { if (sc->cs_pins[i] != NULL) gpio_pin_release(sc->cs_pins[i]); } mtx_destroy(&sc->sc_mtx); return (ret); } /* * Do a PIO transfer. * * Note that right now the TX/RX lens need to match, I'm not doing * dummy reads / dummy writes as required if they're not the same * size. The QUP hardware supports doing multi-phase transactions * where the FIFO isn't engaged for transmit or receive, but it's * not yet being done here. */ static int qcom_spi_transfer_pio_block(struct qcom_spi_softc *sc, int mode, char *tx_buf, int tx_len, char *rx_buf, int rx_len) { int ret = 0; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: start\n", __func__); if (rx_len != tx_len) { device_printf(sc->sc_dev, "ERROR: tx/rx len doesn't match (%d/%d)\n", tx_len, rx_len); return (ENXIO); } QCOM_SPI_ASSERT_LOCKED(sc); /* * Make initial choices for transfer configuration. */ ret = qcom_spi_hw_setup_transfer_selection(sc, tx_len); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to setup transfer selection (%d)\n", ret); return (ret); } /* Now set suitable buffer/lengths */ sc->transfer.tx_buf = tx_buf; sc->transfer.tx_len = tx_len; sc->transfer.rx_buf = rx_buf; sc->transfer.rx_len = rx_len; sc->transfer.done = false; sc->transfer.active = false; /* * Loop until the full transfer set is done. * * qcom_spi_hw_setup_current_transfer() will take care of * setting a maximum transfer size for the hardware and choose * a suitable operating mode. */ while (sc->transfer.tx_offset < sc->transfer.tx_len) { /* * Set transfer to false early; this covers * it also finishing a sub-transfer and we're * about the put the block into RESET state before * starting a new transfer. */ sc->transfer.active = false; QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: tx=%d of %d bytes, rx=%d of %d bytes\n", __func__, sc->transfer.tx_offset, sc->transfer.tx_len, sc->transfer.rx_offset, sc->transfer.rx_len); /* * Set state to RESET before doing anything. * * Otherwise the second sub-transfer that we queue up * will generate interrupts immediately when we start * configuring it here and it'll start underflowing. */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RESET (%u)\n", ret); goto done; } /* blank interrupt state; we'll do a RESET below */ bzero(&sc->intr, sizeof(sc->intr)); sc->transfer.done = false; /* * Configure what the transfer configuration for this * sub-transfer will be. */ ret = qcom_spi_hw_setup_current_transfer(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to setup sub transfer (%d)\n", ret); goto done; } /* * For now since we're configuring up PIO, we only setup * the PIO transfer size. */ ret = qcom_spi_hw_setup_pio_transfer_cnt(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_pio_transfer_cnt failed" " (%u)\n", ret); goto done; } #if 0 /* * This is what we'd do to setup the block transfer sizes. */ ret = qcom_spi_hw_setup_block_transfer_cnt(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_block_transfer_cnt failed" " (%u)\n", ret); goto done; } #endif ret = qcom_spi_hw_setup_io_modes(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_io_modes failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_spi_io_clock_polarity(sc, !! (mode & SPIBUS_MODE_CPOL)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_spi_io_clock_polarity" " failed (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_spi_config(sc, sc->state.frequency, !! (mode & SPIBUS_MODE_CPHA)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_spi_config failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_qup_config(sc, !! (tx_len > 0), !! (rx_len > 0)); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_qup_config failed" " (%u)\n", ret); goto done; } ret = qcom_spi_hw_setup_operational_mask(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_setup_operational_mask failed" " (%u)\n", ret); goto done; } /* * Setup is done; reset the controller and start the PIO * write. */ /* * Set state to RUN; we may start getting interrupts that * are valid and we need to handle. */ sc->transfer.active = true; ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RUN); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RUN (%u)\n", ret); goto done; } /* * Set state to PAUSE */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_PAUSE); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to PAUSE (%u)\n", ret); goto done; } /* * If FIFO mode, write data now. Else, we'll get an * interrupt when it's time to populate more data * in BLOCK mode. */ if (sc->state.transfer_mode == QUP_IO_M_MODE_FIFO) ret = qcom_spi_hw_write_pio_fifo(sc); else ret = qcom_spi_hw_write_pio_block(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: qcom_spi_hw_write failed (%u)\n", ret); goto done; } /* * Set state to RUN */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RUN); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RUN (%u)\n", ret); goto done; } /* * Wait for an interrupt notification (which will * continue to drive the state machine for this * sub-transfer) or timeout. */ ret = 0; while (ret == 0 && sc->transfer.done == false) { QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: waiting\n", __func__); ret = msleep(sc, &sc->sc_mtx, 0, "qcom_spi", 0); } } done: /* * Complete; put controller into reset. * * Don't worry about return value here; if we errored out above then * we want to communicate that value to the caller. */ (void) qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: completed\n", __func__); /* * Blank the transfer state so we don't use an old transfer * state in a subsequent interrupt. */ (void) qcom_spi_hw_complete_transfer(sc); sc->transfer.active = false; return (ret); } static int qcom_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct qcom_spi_softc *sc = device_get_softc(dev); uint32_t cs_val, mode_val, clock_val; uint32_t ret = 0; spibus_get_cs(child, &cs_val); spibus_get_clock(child, &clock_val); spibus_get_mode(child, &mode_val); QCOM_SPI_DPRINTF(sc, QCOM_SPI_DEBUG_TRANSFER, "%s: called; child cs=0x%08x, clock=%u, mode=0x%08x, " "cmd=%u/%u bytes; data=%u/%u bytes\n", __func__, cs_val, clock_val, mode_val, cmd->tx_cmd_sz, cmd->rx_cmd_sz, cmd->tx_data_sz, cmd->rx_data_sz); QCOM_SPI_LOCK(sc); /* * wait until the controller isn't busy */ while (sc->sc_busy == true) mtx_sleep(sc, &sc->sc_mtx, 0, "qcom_spi_wait", 0); /* * it's ours now! */ sc->sc_busy = true; sc->state.cs_high = !! (cs_val & SPIBUS_CS_HIGH); sc->state.frequency = clock_val; /* * We can't set the clock frequency and enable it * with the driver lock held, as the SPI lock is non-sleepable * and the clock framework is sleepable. * * No other transaction is going on right now, so we can * unlock here and do the clock related work. */ QCOM_SPI_UNLOCK(sc); /* * Set the clock frequency */ ret = clk_set_freq(sc->clk_iface, sc->state.frequency, 0); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to set frequency to %u\n", sc->state.frequency); goto done2; } clk_enable(sc->clk_iface); QCOM_SPI_LOCK(sc); /* * Set state to RESET */ ret = qcom_spi_hw_qup_set_state_locked(sc, QUP_STATE_RESET); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: can't transition to RESET (%u)\n", ret); goto done; } /* Assert hardware CS if set, else GPIO */ if (sc->cs_pins[cs_val & ~SPIBUS_CS_HIGH] == NULL) qcom_spi_hw_spi_cs_force(sc, cs_val & SPIBUS_CS_HIGH, true); else qcom_spi_set_chipsel(sc, cs_val & ~SPIBUS_CS_HIGH, true); /* * cmd buffer transfer */ ret = qcom_spi_transfer_pio_block(sc, mode_val, cmd->tx_cmd, cmd->tx_cmd_sz, cmd->rx_cmd, cmd->rx_cmd_sz); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to transfer cmd payload (%u)\n", ret); goto done; } /* * data buffer transfer */ if (cmd->tx_data_sz > 0) { ret = qcom_spi_transfer_pio_block(sc, mode_val, cmd->tx_data, cmd->tx_data_sz, cmd->rx_data, cmd->rx_data_sz); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to transfer data payload (%u)\n", ret); goto done; } } done: /* De-assert GPIO/CS */ if (sc->cs_pins[cs_val & ~SPIBUS_CS_HIGH] == NULL) qcom_spi_hw_spi_cs_force(sc, cs_val & ~SPIBUS_CS_HIGH, false); else qcom_spi_set_chipsel(sc, cs_val & ~SPIBUS_CS_HIGH, false); /* * Similarly to when we enabled the clock, we can't hold it here * across a clk API as that's a sleep lock and we're non-sleepable. * So instead we unlock/relock here, but we still hold the busy flag. */ QCOM_SPI_UNLOCK(sc); clk_disable(sc->clk_iface); QCOM_SPI_LOCK(sc); done2: /* * We're done; so mark the bus as not busy and wakeup * the next caller. */ sc->sc_busy = false; wakeup_one(sc); QCOM_SPI_UNLOCK(sc); return (ret); } static int qcom_spi_detach(device_t dev) { struct qcom_spi_softc *sc = device_get_softc(dev); int i; bus_generic_detach(sc->sc_dev); - if (sc->spibus != NULL) - device_delete_child(dev, sc->spibus); if (sc->sc_irq_h) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h); if (sc->clk_iface) { clk_disable(sc->clk_iface); clk_release(sc->clk_iface); } if (sc->clk_core) { clk_disable(sc->clk_core); clk_release(sc->clk_core); } for (i = 0; i < CS_MAX; i++) { if (sc->cs_pins[i] != NULL) gpio_pin_release(sc->cs_pins[i]); } if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); mtx_destroy(&sc->sc_mtx); return (0); } static phandle_t qcom_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static device_method_t qcom_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qcom_spi_probe), DEVMETHOD(device_attach, qcom_spi_attach), DEVMETHOD(device_detach, qcom_spi_detach), /* TODO: suspend */ /* TODO: resume */ DEVMETHOD(spibus_transfer, qcom_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, qcom_spi_get_node), DEVMETHOD_END }; static driver_t qcom_spi_driver = { "qcom_spi", qcom_spi_methods, sizeof(struct qcom_spi_softc), }; DRIVER_MODULE(qcom_spi, simplebus, qcom_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, qcom_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(qcom_spi, ofw_spibus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/smbus/smbus.c b/sys/dev/smbus/smbus.c index 28d1a16e8c96..9a37c482654b 100644 --- a/sys/dev/smbus/smbus.c +++ b/sys/dev/smbus/smbus.c @@ -1,248 +1,247 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #include "bus_if.h" struct smbus_ivar { uint8_t addr; }; /* * Autoconfiguration and support routines for System Management bus */ static int smbus_probe(device_t dev) { device_set_desc(dev, "System Management Bus"); return (0); } static int smbus_attach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); mtx_init(&sc->lock, device_get_nameunit(dev), "smbus", MTX_DEF); bus_identify_children(dev); bus_enumerate_hinted_children(dev); bus_attach_children(dev); return (0); } static int smbus_detach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); - device_delete_children(dev); mtx_destroy(&sc->lock); return (0); } void smbus_generic_intr(device_t dev, u_char devaddr, char low, char high, int err) { } static device_t smbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct smbus_ivar *devi; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct smbus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, devi); return (child); } static void smbus_child_deleted(device_t dev, device_t child) { free(device_get_ivars(child), M_DEVBUF); } static void smbus_hinted_child(device_t bus, const char *dname, int dunit) { struct smbus_ivar *devi; device_t child; int addr; addr = 0; resource_int_value(dname, dunit, "addr", &addr); if (addr > UINT8_MAX) { device_printf(bus, "ignored incorrect slave address hint 0x%x" " for %s%d\n", addr, dname, dunit); return; } child = BUS_ADD_CHILD(bus, SMBUS_ORDER_HINTED, dname, dunit); if (child == NULL) return; devi = device_get_ivars(child); devi->addr = addr; } static int smbus_child_location(device_t parent, device_t child, struct sbuf *sb) { struct smbus_ivar *devi; devi = device_get_ivars(child); if (devi->addr != 0) sbuf_printf(sb, "addr=0x%x", devi->addr); return (0); } static int smbus_print_child(device_t parent, device_t child) { struct smbus_ivar *devi; int retval; devi = device_get_ivars(child); retval = bus_print_child_header(parent, child); if (devi->addr != 0) retval += printf(" at addr 0x%x", devi->addr); retval += bus_print_child_footer(parent, child); return (retval); } static int smbus_read_ivar(device_t parent, device_t child, int which, uintptr_t *result) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: if (devi->addr != 0) *result = devi->addr; else *result = -1; break; default: return (ENOENT); } return (0); } static int smbus_write_ivar(device_t parent, device_t child, int which, uintptr_t value) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: /* Allow to set but no change the slave address. */ if (devi->addr != 0) return (EINVAL); devi->addr = value; break; default: return (ENOENT); } return (0); } static void smbus_probe_nomatch(device_t bus, device_t child) { struct smbus_ivar *devi = device_get_ivars(child); /* * Ignore (self-identified) devices without a slave address set. * For example, smb(4). */ if (devi->addr != 0) device_printf(bus, " at addr %#x\n", devi->addr); } /* * Device methods */ static device_method_t smbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, smbus_probe), DEVMETHOD(device_attach, smbus_attach), DEVMETHOD(device_detach, smbus_detach), /* bus interface */ DEVMETHOD(bus_add_child, smbus_add_child), DEVMETHOD(bus_child_deleted, smbus_child_deleted), DEVMETHOD(bus_hinted_child, smbus_hinted_child), DEVMETHOD(bus_probe_nomatch, smbus_probe_nomatch), DEVMETHOD(bus_child_location, smbus_child_location), DEVMETHOD(bus_print_child, smbus_print_child), DEVMETHOD(bus_read_ivar, smbus_read_ivar), DEVMETHOD(bus_write_ivar, smbus_write_ivar), DEVMETHOD_END }; driver_t smbus_driver = { "smbus", smbus_methods, sizeof(struct smbus_softc), }; MODULE_VERSION(smbus, SMBUS_MODVER); diff --git a/sys/dev/sound/pci/fm801.c b/sys/dev/sound/pci/fm801.c index 72a476708aa7..3537c7807ded 100644 --- a/sys/dev/sound/pci/fm801.c +++ b/sys/dev/sound/pci/fm801.c @@ -1,763 +1,757 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Dmitry Dicky diwil@dataart.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS `AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #define PCI_VENDOR_FORTEMEDIA 0x1319 #define PCI_DEVICE_FORTEMEDIA1 0x08011319 /* Audio controller */ #define PCI_DEVICE_FORTEMEDIA2 0x08021319 /* Joystick controller */ #define FM_PCM_VOLUME 0x00 #define FM_FM_VOLUME 0x02 #define FM_I2S_VOLUME 0x04 #define FM_RECORD_SOURCE 0x06 #define FM_PLAY_CTL 0x08 #define FM_PLAY_RATE_MASK 0x0f00 #define FM_PLAY_BUF1_LAST 0x0001 #define FM_PLAY_BUF2_LAST 0x0002 #define FM_PLAY_START 0x0020 #define FM_PLAY_PAUSE 0x0040 #define FM_PLAY_STOPNOW 0x0080 #define FM_PLAY_16BIT 0x4000 #define FM_PLAY_STEREO 0x8000 #define FM_PLAY_DMALEN 0x0a #define FM_PLAY_DMABUF1 0x0c #define FM_PLAY_DMABUF2 0x10 #define FM_REC_CTL 0x14 #define FM_REC_RATE_MASK 0x0f00 #define FM_REC_BUF1_LAST 0x0001 #define FM_REC_BUF2_LAST 0x0002 #define FM_REC_START 0x0020 #define FM_REC_PAUSE 0x0040 #define FM_REC_STOPNOW 0x0080 #define FM_REC_16BIT 0x4000 #define FM_REC_STEREO 0x8000 #define FM_REC_DMALEN 0x16 #define FM_REC_DMABUF1 0x18 #define FM_REC_DMABUF2 0x1c #define FM_CODEC_CTL 0x22 #define FM_VOLUME 0x26 #define FM_VOLUME_MUTE 0x8000 #define FM_CODEC_CMD 0x2a #define FM_CODEC_CMD_READ 0x0080 #define FM_CODEC_CMD_VALID 0x0100 #define FM_CODEC_CMD_BUSY 0x0200 #define FM_CODEC_DATA 0x2c #define FM_IO_CTL 0x52 #define FM_CARD_CTL 0x54 #define FM_INTMASK 0x56 #define FM_INTMASK_PLAY 0x0001 #define FM_INTMASK_REC 0x0002 #define FM_INTMASK_VOL 0x0040 #define FM_INTMASK_MPU 0x0080 #define FM_INTSTATUS 0x5a #define FM_INTSTATUS_PLAY 0x0100 #define FM_INTSTATUS_REC 0x0200 #define FM_INTSTATUS_VOL 0x4000 #define FM_INTSTATUS_MPU 0x8000 #define FM801_DEFAULT_BUFSZ 4096 /* Other values do not work!!! */ /* debug purposes */ #define DPRINT if(0) printf /* static int fm801ch_setup(struct pcm_channel *c); */ static u_int32_t fmts[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps fm801ch_caps = { 5500, 48000, fmts, 0 }; struct fm801_info; struct fm801_chinfo { struct fm801_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t spd, dir, fmt; /* speed, direction, format */ u_int32_t shift; }; struct fm801_info { int type; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; device_t dev; int num; u_int32_t unit; struct resource *reg, *irq; int regtype, regid, irqid; void *ih; u_int32_t play_flip, play_nextblk, play_start, play_blksize, play_fmt, play_shift, play_size; u_int32_t rec_flip, rec_nextblk, rec_start, rec_blksize, rec_fmt, rec_shift, rec_size; unsigned int bufsz; struct fm801_chinfo pch, rch; device_t radio; }; /* Bus Read / Write routines */ static u_int32_t fm801_rd(struct fm801_info *fm801, int regno, int size) { switch(size) { case 1: return (bus_space_read_1(fm801->st, fm801->sh, regno)); case 2: return (bus_space_read_2(fm801->st, fm801->sh, regno)); case 4: return (bus_space_read_4(fm801->st, fm801->sh, regno)); default: return 0xffffffff; } } static void fm801_wr(struct fm801_info *fm801, int regno, u_int32_t data, int size) { switch(size) { case 1: bus_space_write_1(fm801->st, fm801->sh, regno, data); break; case 2: bus_space_write_2(fm801->st, fm801->sh, regno, data); break; case 4: bus_space_write_4(fm801->st, fm801->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* * ac97 codec routines */ #define TIMO 50 static int fm801_rdcd(kobj_t obj, void *devinfo, int regno) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: codec busy\n"); return 0; } fm801_wr(fm801,FM_CODEC_CMD, regno|FM_CODEC_CMD_READ,2); for (i = 0; i < TIMO && !(fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_VALID); i++) { DELAY(10000); DPRINT("fm801 rdcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: write codec invalid\n"); return 0; } return fm801_rd(fm801,FM_CODEC_DATA,2); } static int fm801_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; DPRINT("fm801_wrcd reg 0x%x val 0x%x\n",regno, data); /* if(regno == AC97_REG_RECSEL) return; */ /* Poll until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } fm801_wr(fm801,FM_CODEC_DATA,data, 2); fm801_wr(fm801,FM_CODEC_CMD, regno,2); /* wait until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 wrcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } DPRINT("fm801 wrcd release reg 0x%x val 0x%x\n",regno, data); return 0; } static kobj_method_t fm801_ac97_methods[] = { KOBJMETHOD(ac97_read, fm801_rdcd), KOBJMETHOD(ac97_write, fm801_wrcd), DEVMETHOD_END }; AC97_DECLARE(fm801_ac97); /* -------------------------------------------------------------------- */ /* * The interrupt handler */ static void fm801_intr(void *p) { struct fm801_info *fm801 = (struct fm801_info *)p; u_int32_t intsrc = fm801_rd(fm801, FM_INTSTATUS, 2); DPRINT("\nfm801_intr intsrc 0x%x ", intsrc); if(intsrc & FM_INTSTATUS_PLAY) { fm801->play_flip++; if(fm801->play_flip & 1) { fm801_wr(fm801, FM_PLAY_DMABUF1, fm801->play_start,4); } else fm801_wr(fm801, FM_PLAY_DMABUF2, fm801->play_nextblk,4); chn_intr(fm801->pch.channel); } if(intsrc & FM_INTSTATUS_REC) { fm801->rec_flip++; if(fm801->rec_flip & 1) { fm801_wr(fm801, FM_REC_DMABUF1, fm801->rec_start,4); } else fm801_wr(fm801, FM_REC_DMABUF2, fm801->rec_nextblk,4); chn_intr(fm801->rch.channel); } if ( intsrc & FM_INTSTATUS_MPU ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_MPU,2); } if ( intsrc & FM_INTSTATUS_VOL ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_VOL,2); } DPRINT("fm801_intr clear\n\n"); fm801_wr(fm801, FM_INTSTATUS, intsrc & (FM_INTSTATUS_PLAY | FM_INTSTATUS_REC), 2); } /* -------------------------------------------------------------------- */ /* channel interface */ static void * fm801ch_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; struct fm801_chinfo *ch = (dir == PCMDIR_PLAY)? &fm801->pch : &fm801->rch; DPRINT("fm801ch_init, direction = %d\n", dir); ch->parent = fm801; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, fm801->parent_dmat, 0, fm801->bufsz) != 0) return NULL; return (void *)ch; } static int fm801ch_setformat(kobj_t obj, void *data, u_int32_t format) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; DPRINT("fm801ch_setformat 0x%x : %s, %s, %s, %s\n", format, (AFMT_CHANNEL(format) > 1)?"stereo":"mono", (format & AFMT_16BIT) ? "16bit":"8bit", (format & AFMT_SIGNED)? "signed":"unsigned", (format & AFMT_BIGENDIAN)?"bigendiah":"littleendian" ); if(ch->dir == PCMDIR_PLAY) { fm801->play_fmt = (AFMT_CHANNEL(format) > 1)? FM_PLAY_STEREO : 0; fm801->play_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } if(ch->dir == PCMDIR_REC ) { fm801->rec_fmt = (AFMT_CHANNEL(format) > 1)? FM_REC_STEREO:0; fm801->rec_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } return 0; } struct { u_int32_t limit; u_int32_t rate; } fm801_rates[11] = { { 6600, 5500 }, { 8750, 8000 }, { 10250, 9600 }, { 13200, 11025 }, { 17500, 16000 }, { 20500, 19200 }, { 26500, 22050 }, { 35000, 32000 }, { 41000, 38400 }, { 46000, 44100 }, { 48000, 48000 }, /* anything above -> 48000 */ }; static u_int32_t fm801ch_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; int i; for (i = 0; i < 10 && fm801_rates[i].limit <= speed; i++) ; if(ch->dir == PCMDIR_PLAY) { fm801->pch.spd = fm801_rates[i].rate; fm801->play_shift = (i<<8); fm801->play_shift &= FM_PLAY_RATE_MASK; } if(ch->dir == PCMDIR_REC ) { fm801->rch.spd = fm801_rates[i].rate; fm801->rec_shift = (i<<8); fm801->rec_shift &= FM_REC_RATE_MASK; } ch->spd = fm801_rates[i].rate; return fm801_rates[i].rate; } static u_int32_t fm801ch_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; /* * Don't mind for play_flip, set the blocksize to the * desired values in any case - otherwise sound playback * breaks here. */ if(ch->dir == PCMDIR_PLAY) fm801->play_blksize = blocksize; if(ch->dir == PCMDIR_REC) fm801->rec_blksize = blocksize; DPRINT("fm801ch_setblocksize %d (dir %d)\n",blocksize, ch->dir); return blocksize; } static int fm801ch_trigger(kobj_t obj, void *data, int go) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t baseaddr = sndbuf_getbufaddr(ch->buffer); u_int32_t k1; DPRINT("fm801ch_trigger go %d , ", go); if (!PCMTRIG_COMMON(go)) return 0; if (ch->dir == PCMDIR_PLAY) { if (go == PCMTRIG_START) { fm801->play_start = baseaddr; fm801->play_nextblk = fm801->play_start + fm801->play_blksize; fm801->play_flip = 0; fm801_wr(fm801, FM_PLAY_DMALEN, fm801->play_blksize - 1, 2); fm801_wr(fm801, FM_PLAY_DMABUF1,fm801->play_start,4); fm801_wr(fm801, FM_PLAY_DMABUF2,fm801->play_nextblk,4); fm801_wr(fm801, FM_PLAY_CTL, FM_PLAY_START | FM_PLAY_STOPNOW | fm801->play_fmt | fm801->play_shift, 2 ); } else { fm801->play_flip = 0; k1 = fm801_rd(fm801, FM_PLAY_CTL,2); fm801_wr(fm801, FM_PLAY_CTL, (k1 & ~(FM_PLAY_STOPNOW | FM_PLAY_START)) | FM_PLAY_BUF1_LAST | FM_PLAY_BUF2_LAST, 2 ); } } else if(ch->dir == PCMDIR_REC) { if (go == PCMTRIG_START) { fm801->rec_start = baseaddr; fm801->rec_nextblk = fm801->rec_start + fm801->rec_blksize; fm801->rec_flip = 0; fm801_wr(fm801, FM_REC_DMALEN, fm801->rec_blksize - 1, 2); fm801_wr(fm801, FM_REC_DMABUF1,fm801->rec_start,4); fm801_wr(fm801, FM_REC_DMABUF2,fm801->rec_nextblk,4); fm801_wr(fm801, FM_REC_CTL, FM_REC_START | FM_REC_STOPNOW | fm801->rec_fmt | fm801->rec_shift, 2 ); } else { fm801->rec_flip = 0; k1 = fm801_rd(fm801, FM_REC_CTL,2); fm801_wr(fm801, FM_REC_CTL, (k1 & ~(FM_REC_STOPNOW | FM_REC_START)) | FM_REC_BUF1_LAST | FM_REC_BUF2_LAST, 2); } } return 0; } /* Almost ALSA copy */ static u_int32_t fm801ch_getptr(kobj_t obj, void *data) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t result = 0; if (ch->dir == PCMDIR_PLAY) { result = fm801_rd(fm801, (fm801->play_flip&1) ? FM_PLAY_DMABUF2:FM_PLAY_DMABUF1, 4) - fm801->play_start; } if (ch->dir == PCMDIR_REC) { result = fm801_rd(fm801, (fm801->rec_flip&1) ? FM_REC_DMABUF2:FM_REC_DMABUF1, 4) - fm801->rec_start; } return result; } static struct pcmchan_caps * fm801ch_getcaps(kobj_t obj, void *data) { return &fm801ch_caps; } static kobj_method_t fm801ch_methods[] = { KOBJMETHOD(channel_init, fm801ch_init), KOBJMETHOD(channel_setformat, fm801ch_setformat), KOBJMETHOD(channel_setspeed, fm801ch_setspeed), KOBJMETHOD(channel_setblocksize, fm801ch_setblocksize), KOBJMETHOD(channel_trigger, fm801ch_trigger), KOBJMETHOD(channel_getptr, fm801ch_getptr), KOBJMETHOD(channel_getcaps, fm801ch_getcaps), DEVMETHOD_END }; CHANNEL_DECLARE(fm801ch); /* -------------------------------------------------------------------- */ /* * Init routine is taken from an original NetBSD driver */ static int fm801_init(struct fm801_info *fm801) { u_int32_t k1; /* reset codec */ fm801_wr(fm801, FM_CODEC_CTL, 0x0020,2); DELAY(100000); fm801_wr(fm801, FM_CODEC_CTL, 0x0000,2); DELAY(100000); fm801_wr(fm801, FM_PCM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_FM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_I2S_VOLUME, 0x0808,2); fm801_wr(fm801, 0x40,0x107f,2); /* enable legacy audio */ fm801_wr((void *)fm801, FM_RECORD_SOURCE, 0x0000,2); /* Unmask playback, record and mpu interrupts, mask the rest */ k1 = fm801_rd((void *)fm801, FM_INTMASK,2); fm801_wr(fm801, FM_INTMASK, (k1 & ~(FM_INTMASK_PLAY | FM_INTMASK_REC | FM_INTMASK_MPU)) | FM_INTMASK_VOL,2); fm801_wr(fm801, FM_INTSTATUS, FM_INTSTATUS_PLAY | FM_INTSTATUS_REC | FM_INTSTATUS_MPU | FM_INTSTATUS_VOL,2); DPRINT("FM801 init Ok\n"); return 0; } static int fm801_pci_attach(device_t dev) { struct ac97_info *codec = NULL; struct fm801_info *fm801; int i; int mapped = 0; char status[SND_STATUSLEN]; fm801 = malloc(sizeof(*fm801), M_DEVBUF, M_WAITOK | M_ZERO); fm801->type = pci_get_devid(dev); pci_enable_busmaster(dev); for (i = 0; (mapped == 0) && (i < PCI_MAXMAPS_0); i++) { fm801->regid = PCIR_BAR(i); fm801->regtype = SYS_RES_MEMORY; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); if(!fm801->reg) { fm801->regtype = SYS_RES_IOPORT; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); } if(fm801->reg) { fm801->st = rman_get_bustag(fm801->reg); fm801->sh = rman_get_bushandle(fm801->reg); mapped++; } } if (mapped == 0) { device_printf(dev, "unable to map register space\n"); goto oops; } fm801->bufsz = pcm_getbuffersize(dev, 4096, FM801_DEFAULT_BUFSZ, 65536); fm801_init(fm801); codec = AC97_CREATE(dev, fm801, fm801_ac97); if (codec == NULL) goto oops; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto oops; fm801->irqid = 0; fm801->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &fm801->irqid, RF_ACTIVE | RF_SHAREABLE); if (!fm801->irq || snd_setup_intr(dev, fm801->irq, 0, fm801_intr, fm801, &fm801->ih)) { device_printf(dev, "unable to map interrupt\n"); goto oops; } if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/fm801->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &fm801->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto oops; } snprintf(status, SND_STATUSLEN, "%s 0x%jx irq %jd on %s", (fm801->regtype == SYS_RES_IOPORT)? "port" : "mem", rman_get_start(fm801->reg), rman_get_start(fm801->irq), device_get_nameunit(device_get_parent(dev))); #define FM801_MAXPLAYCH 1 pcm_init(dev, fm801); pcm_addchan(dev, PCMDIR_PLAY, &fm801ch_class, fm801); pcm_addchan(dev, PCMDIR_REC, &fm801ch_class, fm801); if (pcm_register(dev, status)) goto oops; fm801->radio = device_add_child(dev, "radio", DEVICE_UNIT_ANY); bus_attach_children(dev); return 0; oops: if (codec) ac97_destroy(codec); if (fm801->reg) bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); if (fm801->ih) bus_teardown_intr(dev, fm801->irq, fm801->ih); if (fm801->irq) bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); if (fm801->parent_dmat) bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return ENXIO; } static int fm801_pci_detach(device_t dev) { int r; struct fm801_info *fm801; DPRINT("Forte Media FM801 detach\n"); fm801 = pcm_getdevinfo(dev); r = bus_generic_detach(dev); if (r) return r; - if (fm801->radio != NULL) { - r = device_delete_child(dev, fm801->radio); - if (r) - return r; - fm801->radio = NULL; - } r = pcm_unregister(dev); if (r) return r; bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); bus_teardown_intr(dev, fm801->irq, fm801->ih); bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return 0; } static int fm801_pci_probe( device_t dev ) { int id; if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA1 ) { device_set_desc(dev, "Forte Media FM801 Audio Controller"); return BUS_PROBE_DEFAULT; } /* if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA2 ) { device_set_desc(dev, "Forte Media FM801 Joystick (Not Supported)"); return ENXIO; } */ return ENXIO; } static struct resource * fm801_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fm801_info *fm801; fm801 = pcm_getdevinfo(bus); if (type == SYS_RES_IOPORT && *rid == PCIR_BAR(0)) return (fm801->reg); return (NULL); } static int fm801_release_resource(device_t bus, device_t child, struct resource *r) { return (0); } static device_method_t fm801_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fm801_pci_probe), DEVMETHOD(device_attach, fm801_pci_attach), DEVMETHOD(device_detach, fm801_pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_alloc_resource, fm801_alloc_resource), DEVMETHOD(bus_release_resource, fm801_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD_END }; static driver_t fm801_driver = { "pcm", fm801_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_fm801, pci, fm801_driver, 0, 0); MODULE_DEPEND(snd_fm801, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_fm801, 1); diff --git a/sys/dev/spibus/controller/allwinner/aw_spi.c b/sys/dev/spibus/controller/allwinner/aw_spi.c index e17152b054d7..34461ab2ba9c 100644 --- a/sys/dev/spibus/controller/allwinner/aw_spi.c +++ b/sys/dev/spibus/controller/allwinner/aw_spi.c @@ -1,611 +1,609 @@ /*- * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" #define AW_SPI_GCR 0x04 /* Global Control Register */ #define AW_SPI_GCR_EN (1 << 0) /* ENable */ #define AW_SPI_GCR_MODE_MASTER (1 << 1) /* 1 = Master, 0 = Slave */ #define AW_SPI_GCR_TP_EN (1 << 7) /* 1 = Stop transmit when FIFO is full */ #define AW_SPI_GCR_SRST (1 << 31) /* Soft Reset */ #define AW_SPI_TCR 0x08 /* Transfer Control register */ #define AW_SPI_TCR_XCH (1 << 31) /* Initiate transfer */ #define AW_SPI_TCR_SDDM (1 << 14) /* Sending Delay Data Mode */ #define AW_SPI_TCR_SDM (1 << 13) /* Master Sample Data Mode */ #define AW_SPI_TCR_FBS (1 << 12) /* First Transmit Bit Select (1 == LSB) */ #define AW_SPI_TCR_SDC (1 << 11) /* Master Sample Data Control */ #define AW_SPI_TCR_RPSM (1 << 10) /* Rapid Mode Select */ #define AW_SPI_TCR_DDB (1 << 9) /* Dummy Burst Type */ #define AW_SPI_TCR_SSSEL_MASK 0x30 /* Chip select */ #define AW_SPI_TCR_SSSEL_SHIFT 4 #define AW_SPI_TCR_SS_LEVEL (1 << 7) /* 1 == CS High */ #define AW_SPI_TCR_SS_OWNER (1 << 6) /* 1 == Software controlled */ #define AW_SPI_TCR_SPOL (1 << 2) /* 1 == Active low */ #define AW_SPI_TCR_CPOL (1 << 1) /* 1 == Active low */ #define AW_SPI_TCR_CPHA (1 << 0) /* 1 == Phase 1 */ #define AW_SPI_IER 0x10 /* Interrupt Control Register */ #define AW_SPI_IER_SS (1 << 13) /* Chip select went from valid to invalid */ #define AW_SPI_IER_TC (1 << 12) /* Transfer complete */ #define AW_SPI_IER_TF_UDR (1 << 11) /* TXFIFO underrun */ #define AW_SPI_IER_TF_OVF (1 << 10) /* TXFIFO overrun */ #define AW_SPI_IER_RF_UDR (1 << 9) /* RXFIFO underrun */ #define AW_SPI_IER_RF_OVF (1 << 8) /* RXFIFO overrun */ #define AW_SPI_IER_TF_FULL (1 << 6) /* TXFIFO Full */ #define AW_SPI_IER_TF_EMP (1 << 5) /* TXFIFO Empty */ #define AW_SPI_IER_TF_ERQ (1 << 4) /* TXFIFO Empty Request */ #define AW_SPI_IER_RF_FULL (1 << 2) /* RXFIFO Full */ #define AW_SPI_IER_RF_EMP (1 << 1) /* RXFIFO Empty */ #define AW_SPI_IER_RF_RDY (1 << 0) /* RXFIFO Ready Request */ #define AW_SPI_ISR 0x14 /* Interrupt Status Register */ #define AW_SPI_FCR 0x18 /* FIFO Control Register */ #define AW_SPI_FCR_TX_RST (1 << 31) /* Reset TX FIFO */ #define AW_SPI_FCR_TX_TRIG_MASK 0xFF0000 /* TX FIFO Trigger level */ #define AW_SPI_FCR_TX_TRIG_SHIFT 16 #define AW_SPI_FCR_RX_RST (1 << 15) /* Reset RX FIFO */ #define AW_SPI_FCR_RX_TRIG_MASK 0xFF /* RX FIFO Trigger level */ #define AW_SPI_FCR_RX_TRIG_SHIFT 0 #define AW_SPI_FSR 0x1C /* FIFO Status Register */ #define AW_SPI_FSR_TB_WR (1 << 31) #define AW_SPI_FSR_TB_CNT_MASK 0x70000000 #define AW_SPI_FSR_TB_CNT_SHIFT 28 #define AW_SPI_FSR_TF_CNT_MASK 0xFF0000 #define AW_SPI_FSR_TF_CNT_SHIFT 16 #define AW_SPI_FSR_RB_WR (1 << 15) #define AW_SPI_FSR_RB_CNT_MASK 0x7000 #define AW_SPI_FSR_RB_CNT_SHIFT 12 #define AW_SPI_FSR_RF_CNT_MASK 0xFF #define AW_SPI_FSR_RF_CNT_SHIFT 0 #define AW_SPI_WCR 0x20 /* Wait Clock Counter Register */ #define AW_SPI_CCR 0x24 /* Clock Rate Control Register */ #define AW_SPI_CCR_DRS (1 << 12) /* Clock divider select */ #define AW_SPI_CCR_CDR1_MASK 0xF00 #define AW_SPI_CCR_CDR1_SHIFT 8 #define AW_SPI_CCR_CDR2_MASK 0xFF #define AW_SPI_CCR_CDR2_SHIFT 0 #define AW_SPI_MBC 0x30 /* Burst Counter Register */ #define AW_SPI_MTC 0x34 /* Transmit Counter Register */ #define AW_SPI_BCC 0x38 /* Burst Control Register */ #define AW_SPI_MDMA_CTL 0x88 /* Normal DMA Control Register */ #define AW_SPI_TXD 0x200 /* TX Data Register */ #define AW_SPI_RDX 0x300 /* RX Data Register */ #define AW_SPI_MAX_CS 4 #define AW_SPI_FIFO_SIZE 64 static struct ofw_compat_data compat_data[] = { { "allwinner,sun8i-h3-spi", 1 }, { NULL, 0 } }; static struct resource_spec aw_spi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct aw_spi_softc { device_t dev; device_t spibus; struct resource *res[2]; struct mtx mtx; clk_t clk_ahb; clk_t clk_mod; uint64_t mod_freq; hwreset_t rst_ahb; void * intrhand; int transfer; uint8_t *rxbuf; uint32_t rxcnt; uint8_t *txbuf; uint32_t txcnt; uint32_t txlen; uint32_t rxlen; }; #define AW_SPI_LOCK(sc) mtx_lock(&(sc)->mtx) #define AW_SPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define AW_SPI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define AW_SPI_READ_1(sc, reg) bus_read_1((sc)->res[0], (reg)) #define AW_SPI_WRITE_1(sc, reg, val) bus_write_1((sc)->res[0], (reg), (val)) #define AW_SPI_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define AW_SPI_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int aw_spi_probe(device_t dev); static int aw_spi_attach(device_t dev); static int aw_spi_detach(device_t dev); static int aw_spi_intr(void *arg); static int aw_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Allwinner SPI"); return (BUS_PROBE_DEFAULT); } static int aw_spi_attach(device_t dev) { struct aw_spi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, aw_spi_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, aw_spi_intr, NULL, sc, &sc->intrhand)) { bus_release_resources(dev, aw_spi_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } /* De-assert reset */ if (hwreset_get_by_ofw_idx(dev, 0, 0, &sc->rst_ahb) == 0) { error = hwreset_deassert(sc->rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mod", &sc->clk_mod); if (error != 0) { device_printf(dev, "cannot get mod clock\n"); goto fail; } error = clk_enable(sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_enable(sc->clk_mod); if (error != 0) { device_printf(dev, "cannot enable mod clock\n"); goto fail; } sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); bus_attach_children(dev); return (0); fail: aw_spi_detach(dev); return (error); } static int aw_spi_detach(device_t dev) { struct aw_spi_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); - if (sc->spibus != NULL) - device_delete_child(dev, sc->spibus); if (sc->clk_mod != NULL) clk_release(sc->clk_mod); if (sc->clk_ahb) clk_release(sc->clk_ahb); if (sc->rst_ahb) hwreset_assert(sc->rst_ahb); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, aw_spi_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static phandle_t aw_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static void aw_spi_setup_mode(struct aw_spi_softc *sc, uint32_t mode) { uint32_t reg; /* We only support master mode */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg |= AW_SPI_GCR_MODE_MASTER; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); /* Setup the modes */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); if (mode & SPIBUS_MODE_CPHA) reg |= AW_SPI_TCR_CPHA; if (mode & SPIBUS_MODE_CPOL) reg |= AW_SPI_TCR_CPOL; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); } static void aw_spi_setup_cs(struct aw_spi_softc *sc, uint32_t cs, bool low) { uint32_t reg; /* Setup CS */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); reg &= ~(AW_SPI_TCR_SSSEL_MASK); reg |= cs << AW_SPI_TCR_SSSEL_SHIFT; reg |= AW_SPI_TCR_SS_OWNER; if (low) reg &= ~(AW_SPI_TCR_SS_LEVEL); else reg |= AW_SPI_TCR_SS_LEVEL; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); } static uint64_t aw_spi_clock_test_cdr1(struct aw_spi_softc *sc, uint64_t clock, uint32_t *ccr) { uint64_t cur, best = 0; int i, max, best_div; max = AW_SPI_CCR_CDR1_MASK >> AW_SPI_CCR_CDR1_SHIFT; for (i = 0; i < max; i++) { cur = sc->mod_freq / (1 << i); if ((clock - cur) < (clock - best)) { best = cur; best_div = i; } } *ccr = (best_div << AW_SPI_CCR_CDR1_SHIFT); return (best); } static uint64_t aw_spi_clock_test_cdr2(struct aw_spi_softc *sc, uint64_t clock, uint32_t *ccr) { uint64_t cur, best = 0; int i, max, best_div; max = ((AW_SPI_CCR_CDR2_MASK) >> AW_SPI_CCR_CDR2_SHIFT); for (i = 0; i < max; i++) { cur = sc->mod_freq / (2 * i + 1); if ((clock - cur) < (clock - best)) { best = cur; best_div = i; } } *ccr = AW_SPI_CCR_DRS | (best_div << AW_SPI_CCR_CDR2_SHIFT); return (best); } static void aw_spi_setup_clock(struct aw_spi_softc *sc, uint64_t clock) { uint64_t best_ccr1, best_ccr2; uint32_t ccr, ccr1, ccr2; best_ccr1 = aw_spi_clock_test_cdr1(sc, clock, &ccr1); best_ccr2 = aw_spi_clock_test_cdr2(sc, clock, &ccr2); if (best_ccr1 == clock) { ccr = ccr1; } else if (best_ccr2 == clock) { ccr = ccr2; } else { if ((clock - best_ccr1) < (clock - best_ccr2)) ccr = ccr1; else ccr = ccr2; } AW_SPI_WRITE_4(sc, AW_SPI_CCR, ccr); } static inline void aw_spi_fill_txfifo(struct aw_spi_softc *sc) { uint32_t reg, txcnt; int i; if (sc->txcnt == sc->txlen) return; reg = AW_SPI_READ_4(sc, AW_SPI_FSR); reg &= AW_SPI_FSR_TF_CNT_MASK; txcnt = reg >> AW_SPI_FSR_TF_CNT_SHIFT; for (i = 0; i < (AW_SPI_FIFO_SIZE - txcnt); i++) { AW_SPI_WRITE_1(sc, AW_SPI_TXD, sc->txbuf[sc->txcnt++]); if (sc->txcnt == sc->txlen) break; } return; } static inline void aw_spi_read_rxfifo(struct aw_spi_softc *sc) { uint32_t reg; uint8_t val; int i; if (sc->rxcnt == sc->rxlen) return; reg = AW_SPI_READ_4(sc, AW_SPI_FSR); reg = (reg & AW_SPI_FSR_RF_CNT_MASK) >> AW_SPI_FSR_RF_CNT_SHIFT; for (i = 0; i < reg; i++) { val = AW_SPI_READ_1(sc, AW_SPI_RDX); if (sc->rxcnt < sc->rxlen) sc->rxbuf[sc->rxcnt++] = val; } } static int aw_spi_intr(void *arg) { struct aw_spi_softc *sc; uint32_t intr; sc = (struct aw_spi_softc *)arg; intr = AW_SPI_READ_4(sc, AW_SPI_ISR); if (intr & AW_SPI_IER_RF_RDY) aw_spi_read_rxfifo(sc); if (intr & AW_SPI_IER_TF_ERQ) { aw_spi_fill_txfifo(sc); /* * If we don't have anything else to write * disable TXFifo interrupts */ if (sc->txcnt == sc->txlen) AW_SPI_WRITE_4(sc, AW_SPI_IER, AW_SPI_IER_TC | AW_SPI_IER_RF_RDY); } if (intr & AW_SPI_IER_TC) { /* read the rest of the data from the fifo */ aw_spi_read_rxfifo(sc); /* Disable the interrupts */ AW_SPI_WRITE_4(sc, AW_SPI_IER, 0); sc->transfer = 0; wakeup(sc); } /* Clear Interrupts */ AW_SPI_WRITE_4(sc, AW_SPI_ISR, intr); return (intr != 0 ? FILTER_HANDLED : FILTER_STRAY); } static int aw_spi_xfer(struct aw_spi_softc *sc, void *rxbuf, void *txbuf, uint32_t txlen, uint32_t rxlen) { uint32_t reg; int error = 0, timeout; sc->rxbuf = rxbuf; sc->rxcnt = 0; sc->txbuf = txbuf; sc->txcnt = 0; sc->txlen = txlen; sc->rxlen = rxlen; /* Reset the FIFOs */ AW_SPI_WRITE_4(sc, AW_SPI_FCR, AW_SPI_FCR_TX_RST | AW_SPI_FCR_RX_RST); for (timeout = 1000; timeout > 0; timeout--) { reg = AW_SPI_READ_4(sc, AW_SPI_FCR); if (reg == 0) break; } if (timeout == 0) { device_printf(sc->dev, "Cannot reset the FIFOs\n"); return (EIO); } /* * Set the TX FIFO threshold to 3/4-th the size and * the RX FIFO one to 1/4-th. */ AW_SPI_WRITE_4(sc, AW_SPI_FCR, ((3 * AW_SPI_FIFO_SIZE / 4) << AW_SPI_FCR_TX_TRIG_SHIFT) | ((AW_SPI_FIFO_SIZE / 4) << AW_SPI_FCR_RX_TRIG_SHIFT)); /* Write the counters */ AW_SPI_WRITE_4(sc, AW_SPI_MBC, txlen); AW_SPI_WRITE_4(sc, AW_SPI_MTC, txlen); AW_SPI_WRITE_4(sc, AW_SPI_BCC, txlen); /* First fill */ aw_spi_fill_txfifo(sc); /* Start transmit */ reg = AW_SPI_READ_4(sc, AW_SPI_TCR); reg |= AW_SPI_TCR_XCH; AW_SPI_WRITE_4(sc, AW_SPI_TCR, reg); /* * Enable interrupts for : * Transmit complete * TX Fifo is below its trigger threshold * RX Fifo is above its trigger threshold */ AW_SPI_WRITE_4(sc, AW_SPI_IER, AW_SPI_IER_TC | AW_SPI_IER_TF_ERQ | AW_SPI_IER_RF_RDY); sc->transfer = 1; while (error == 0 && sc->transfer != 0) error = msleep(sc, &sc->mtx, 0, "aw_spi", 10 * hz); return (0); } static int aw_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct aw_spi_softc *sc; uint32_t cs, mode, clock, reg; int err = 0; sc = device_get_softc(dev); spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); /* The minimum divider is 2 so set the clock at twice the needed speed */ clk_set_freq(sc->clk_mod, 2 * clock, CLK_SET_ROUND_DOWN); clk_get_freq(sc->clk_mod, &sc->mod_freq); if (cs >= AW_SPI_MAX_CS) { device_printf(dev, "Invalid cs %d\n", cs); return (EINVAL); } mtx_lock(&sc->mtx); /* Enable and reset the module */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg |= AW_SPI_GCR_EN | AW_SPI_GCR_SRST; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); /* Setup clock, CS and mode */ aw_spi_setup_clock(sc, clock); aw_spi_setup_mode(sc, mode); if (cs & SPIBUS_CS_HIGH) aw_spi_setup_cs(sc, cs, false); else aw_spi_setup_cs(sc, cs, true); /* xfer */ err = 0; if (cmd->tx_cmd_sz > 0) err = aw_spi_xfer(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz, cmd->rx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = aw_spi_xfer(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz, cmd->rx_data_sz); if (cs & SPIBUS_CS_HIGH) aw_spi_setup_cs(sc, cs, true); else aw_spi_setup_cs(sc, cs, false); /* Disable the module */ reg = AW_SPI_READ_4(sc, AW_SPI_GCR); reg &= ~AW_SPI_GCR_EN; AW_SPI_WRITE_4(sc, AW_SPI_GCR, reg); mtx_unlock(&sc->mtx); return (err); } static device_method_t aw_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_spi_probe), DEVMETHOD(device_attach, aw_spi_attach), DEVMETHOD(device_detach, aw_spi_detach), /* spibus_if */ DEVMETHOD(spibus_transfer, aw_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, aw_spi_get_node), DEVMETHOD_END }; static driver_t aw_spi_driver = { "aw_spi", aw_spi_methods, sizeof(struct aw_spi_softc), }; DRIVER_MODULE(aw_spi, simplebus, aw_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, aw_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(aw_spi, ofw_spibus, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/dev/spibus/controller/rockchip/rk_spi.c b/sys/dev/spibus/controller/rockchip/rk_spi.c index 2c8093c6dc82..db650763f6e1 100644 --- a/sys/dev/spibus/controller/rockchip/rk_spi.c +++ b/sys/dev/spibus/controller/rockchip/rk_spi.c @@ -1,477 +1,475 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" #define RK_SPI_CTRLR0 0x0000 #define CTRLR0_OPM_MASTER (0 << 20) #define CTRLR0_XFM_TR (0 << 18) #define CTRLR0_FRF_MOTO (0 << 16) #define CTRLR0_BHT_8BIT (1 << 13) #define CTRLR0_EM_BIG (1 << 11) #define CTRLR0_SSD_ONE (1 << 10) #define CTRLR0_SCPOL (1 << 7) #define CTRLR0_SCPH (1 << 6) #define CTRLR0_DFS_8BIT (1 << 0) #define RK_SPI_CTRLR1 0x0004 #define RK_SPI_ENR 0x0008 #define RK_SPI_SER 0x000c #define RK_SPI_BAUDR 0x0010 #define RK_SPI_TXFTLR 0x0014 #define RK_SPI_RXFTLR 0x0018 #define RK_SPI_TXFLR 0x001c #define RK_SPI_RXFLR 0x0020 #define RK_SPI_SR 0x0024 #define SR_BUSY (1 << 0) #define RK_SPI_IPR 0x0028 #define RK_SPI_IMR 0x002c #define IMR_RFFIM (1 << 4) #define IMR_TFEIM (1 << 0) #define RK_SPI_ISR 0x0030 #define ISR_RFFIS (1 << 4) #define ISR_TFEIS (1 << 0) #define RK_SPI_RISR 0x0034 #define RK_SPI_ICR 0x0038 #define RK_SPI_DMACR 0x003c #define RK_SPI_DMATDLR 0x0040 #define RK_SPI_DMARDLR 0x0044 #define RK_SPI_TXDR 0x0400 #define RK_SPI_RXDR 0x0800 #define CS_MAX 1 static struct ofw_compat_data compat_data[] = { { "rockchip,rk3328-spi", 1 }, { "rockchip,rk3399-spi", 1 }, { "rockchip,rk3568-spi", 1 }, { NULL, 0 } }; static struct resource_spec rk_spi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct rk_spi_softc { device_t dev; device_t spibus; struct resource *res[2]; struct mtx mtx; clk_t clk_apb; clk_t clk_spi; void * intrhand; int transfer; uint32_t fifo_size; uint64_t max_freq; uint32_t intreg; uint8_t *rxbuf; uint32_t rxidx; uint8_t *txbuf; uint32_t txidx; uint32_t txlen; uint32_t rxlen; }; #define RK_SPI_LOCK(sc) mtx_lock(&(sc)->mtx) #define RK_SPI_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define RK_SPI_READ_4(sc, reg) bus_read_4((sc)->res[0], (reg)) #define RK_SPI_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int rk_spi_probe(device_t dev); static int rk_spi_attach(device_t dev); static int rk_spi_detach(device_t dev); static void rk_spi_intr(void *arg); static void rk_spi_enable_chip(struct rk_spi_softc *sc, int enable) { RK_SPI_WRITE_4(sc, RK_SPI_ENR, enable ? 1 : 0); } static int rk_spi_set_cs(struct rk_spi_softc *sc, uint32_t cs, bool active) { uint32_t reg; if (cs & SPIBUS_CS_HIGH) { device_printf(sc->dev, "SPIBUS_CS_HIGH is not supported\n"); return (EINVAL); } if (cs > CS_MAX) return (EINVAL); reg = RK_SPI_READ_4(sc, RK_SPI_SER); if (active) reg |= (1 << cs); else reg &= ~(1 << cs); RK_SPI_WRITE_4(sc, RK_SPI_SER, reg); return (0); } static void rk_spi_hw_setup(struct rk_spi_softc *sc, uint32_t mode, uint32_t freq) { uint32_t cr0; uint32_t div; cr0 = CTRLR0_OPM_MASTER | CTRLR0_XFM_TR | CTRLR0_FRF_MOTO | CTRLR0_BHT_8BIT | CTRLR0_EM_BIG | CTRLR0_SSD_ONE | CTRLR0_DFS_8BIT; if (mode & SPIBUS_MODE_CPHA) cr0 |= CTRLR0_SCPH; if (mode & SPIBUS_MODE_CPOL) cr0 |= CTRLR0_SCPOL; /* minimum divider is 2 */ if (sc->max_freq < freq*2) { clk_set_freq(sc->clk_spi, 2 * freq, CLK_SET_ROUND_DOWN); clk_get_freq(sc->clk_spi, &sc->max_freq); } div = ((sc->max_freq + freq - 1) / freq); div = (div + 1) & 0xfffe; RK_SPI_WRITE_4(sc, RK_SPI_BAUDR, div); RK_SPI_WRITE_4(sc, RK_SPI_CTRLR0, cr0); } static uint32_t rk_spi_fifo_size(struct rk_spi_softc *sc) { uint32_t txftlr, reg; for (txftlr = 2; txftlr < 32; txftlr++) { RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, txftlr); reg = RK_SPI_READ_4(sc, RK_SPI_TXFTLR); if (reg != txftlr) break; } RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, 0); if (txftlr == 31) return 0; return txftlr; } static void rk_spi_empty_rxfifo(struct rk_spi_softc *sc) { uint32_t rxlevel; rxlevel = RK_SPI_READ_4(sc, RK_SPI_RXFLR); while (sc->rxidx < sc->rxlen && (rxlevel-- > 0)) { sc->rxbuf[sc->rxidx++] = (uint8_t)RK_SPI_READ_4(sc, RK_SPI_RXDR); } } static void rk_spi_fill_txfifo(struct rk_spi_softc *sc) { uint32_t txlevel; txlevel = RK_SPI_READ_4(sc, RK_SPI_TXFLR); while (sc->txidx < sc->txlen && txlevel < sc->fifo_size) { RK_SPI_WRITE_4(sc, RK_SPI_TXDR, sc->txbuf[sc->txidx++]); txlevel++; } if (sc->txidx != sc->txlen) sc->intreg |= (IMR_TFEIM | IMR_RFFIM); } static int rk_spi_xfer_buf(struct rk_spi_softc *sc, void *rxbuf, void *txbuf, uint32_t len) { int err; if (len == 0) return (0); sc->rxbuf = rxbuf; sc->rxlen = len; sc->rxidx = 0; sc->txbuf = txbuf; sc->txlen = len; sc->txidx = 0; sc->intreg = 0; rk_spi_fill_txfifo(sc); RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg); err = 0; while (err == 0 && sc->intreg != 0) err = msleep(sc, &sc->mtx, 0, "rk_spi", 10 * hz); while (err == 0 && sc->rxidx != sc->txidx) { /* read residual data from RX fifo */ rk_spi_empty_rxfifo(sc); } if (sc->rxidx != sc->rxlen || sc->txidx != sc->txlen) err = EIO; return (err); } static int rk_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Rockchip SPI"); return (BUS_PROBE_DEFAULT); } static int rk_spi_attach(device_t dev) { struct rk_spi_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); if (bus_alloc_resources(dev, rk_spi_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, rk_spi_intr, sc, &sc->intrhand)) { bus_release_resources(dev, rk_spi_spec, sc->res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "apb_pclk", &sc->clk_apb); if (error != 0) { device_printf(dev, "cannot get apb_pclk clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "spiclk", &sc->clk_spi); if (error != 0) { device_printf(dev, "cannot get spiclk clock\n"); goto fail; } error = clk_enable(sc->clk_apb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_enable(sc->clk_spi); if (error != 0) { device_printf(dev, "cannot enable spiclk clock\n"); goto fail; } clk_get_freq(sc->clk_spi, &sc->max_freq); sc->fifo_size = rk_spi_fifo_size(sc); if (sc->fifo_size == 0) { device_printf(dev, "failed to get fifo size\n"); goto fail; } sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); RK_SPI_WRITE_4(sc, RK_SPI_IMR, 0); RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, sc->fifo_size/2 - 1); RK_SPI_WRITE_4(sc, RK_SPI_RXFTLR, sc->fifo_size/2 - 1); bus_attach_children(dev); return (0); fail: rk_spi_detach(dev); return (error); } static int rk_spi_detach(device_t dev) { struct rk_spi_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); - if (sc->spibus != NULL) - device_delete_child(dev, sc->spibus); if (sc->clk_spi != NULL) clk_release(sc->clk_spi); if (sc->clk_apb) clk_release(sc->clk_apb); if (sc->intrhand != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand); bus_release_resources(dev, rk_spi_spec, sc->res); mtx_destroy(&sc->mtx); return (0); } static void rk_spi_intr(void *arg) { struct rk_spi_softc *sc; uint32_t intreg, isr; sc = arg; RK_SPI_LOCK(sc); intreg = RK_SPI_READ_4(sc, RK_SPI_IMR); isr = RK_SPI_READ_4(sc, RK_SPI_ISR); RK_SPI_WRITE_4(sc, RK_SPI_ICR, isr); if (isr & ISR_RFFIS) rk_spi_empty_rxfifo(sc); if (isr & ISR_TFEIS) rk_spi_fill_txfifo(sc); /* no bytes left, disable interrupt */ if (sc->txidx == sc->txlen) { sc->intreg = 0; wakeup(sc); } if (sc->intreg != intreg) { (void)RK_SPI_WRITE_4(sc, RK_SPI_IMR, sc->intreg); (void)RK_SPI_READ_4(sc, RK_SPI_IMR); } RK_SPI_UNLOCK(sc); } static phandle_t rk_spi_get_node(device_t bus, device_t dev) { return ofw_bus_get_node(bus); } static int rk_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct rk_spi_softc *sc; uint32_t cs, mode, clock; int err = 0; sc = device_get_softc(dev); spibus_get_cs(child, &cs); spibus_get_clock(child, &clock); spibus_get_mode(child, &mode); RK_SPI_LOCK(sc); rk_spi_hw_setup(sc, mode, clock); rk_spi_enable_chip(sc, 1); err = rk_spi_set_cs(sc, cs, true); if (err != 0) { rk_spi_enable_chip(sc, 0); RK_SPI_UNLOCK(sc); return (err); } /* Transfer command then data bytes. */ err = 0; if (cmd->tx_cmd_sz > 0) err = rk_spi_xfer_buf(sc, cmd->rx_cmd, cmd->tx_cmd, cmd->tx_cmd_sz); if (cmd->tx_data_sz > 0 && err == 0) err = rk_spi_xfer_buf(sc, cmd->rx_data, cmd->tx_data, cmd->tx_data_sz); rk_spi_set_cs(sc, cs, false); rk_spi_enable_chip(sc, 0); RK_SPI_UNLOCK(sc); return (err); } static device_method_t rk_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_spi_probe), DEVMETHOD(device_attach, rk_spi_attach), DEVMETHOD(device_detach, rk_spi_detach), /* spibus_if */ DEVMETHOD(spibus_transfer, rk_spi_transfer), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, rk_spi_get_node), DEVMETHOD_END }; static driver_t rk_spi_driver = { "spi", rk_spi_methods, sizeof(struct rk_spi_softc), }; DRIVER_MODULE(rk_spi, simplebus, rk_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, rk_spi, ofw_spibus_driver, 0, 0); MODULE_DEPEND(rk_spi, ofw_spibus, 1, 1, 1); OFWBUS_PNP_INFO(compat_data); diff --git a/sys/dev/viapm/viapm.c b/sys/dev/viapm/viapm.c index b19ad478a3ff..36c33422d5b3 100644 --- a/sys/dev/viapm/viapm.c +++ b/sys/dev/viapm/viapm.c @@ -1,1017 +1,1011 @@ /*- * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ISA #include #include #endif #include #include #include #include #include "iicbb_if.h" #include "smbus_if.h" #define VIAPM_DEBUG(x) if (viapm_debug) (x) #ifdef DEBUG static int viapm_debug = 1; #else static int viapm_debug = 0; #endif #define VIA_586B_PMU_ID 0x30401106 #define VIA_596A_PMU_ID 0x30501106 #define VIA_596B_PMU_ID 0x30511106 #define VIA_686A_PMU_ID 0x30571106 #define VIA_8233_PMU_ID 0x30741106 #define VIA_8233A_PMU_ID 0x31471106 #define VIA_8235_PMU_ID 0x31771106 #define VIA_8237_PMU_ID 0x32271106 #define VIA_CX700_PMU_ID 0x83241106 #define VIAPM_INB(port) \ ((u_char)bus_read_1(viapm->iores, port)) #define VIAPM_OUTB(port,val) \ (bus_write_1(viapm->iores, port, (u_char)(val))) #define VIAPM_TYP_UNKNOWN 0 #define VIAPM_TYP_586B_3040E 1 #define VIAPM_TYP_586B_3040F 2 #define VIAPM_TYP_596B 3 #define VIAPM_TYP_686A 4 #define VIAPM_TYP_8233 5 #define VIAPM_LOCK(sc) mtx_lock(&(sc)->lock) #define VIAPM_UNLOCK(sc) mtx_unlock(&(sc)->lock) #define VIAPM_LOCK_ASSERT(sc) mtx_assert(&(sc)->lock, MA_OWNED) struct viapm_softc { int type; u_int32_t base; int iorid; int irqrid; struct resource *iores; struct resource *irqres; void *irqih; device_t iicbb; device_t smbus; struct mtx lock; }; /* * VT82C586B definitions */ #define VIAPM_586B_REVID 0x08 #define VIAPM_586B_3040E_BASE 0x20 #define VIAPM_586B_3040E_ACTIV 0x4 /* 16 bits */ #define VIAPM_586B_3040F_BASE 0x48 #define VIAPM_586B_3040F_ACTIV 0x41 /* 8 bits */ #define VIAPM_586B_OEM_REV_E 0x00 #define VIAPM_586B_OEM_REV_F 0x01 #define VIAPM_586B_PROD_REV_A 0x10 #define VIAPM_586B_BA_MASK 0x0000ff00 #define GPIO_DIR 0x40 #define GPIO_VAL 0x42 #define EXTSMI_VAL 0x44 #define VIAPM_SCL 0x02 /* GPIO1_VAL */ #define VIAPM_SDA 0x04 /* GPIO2_VAL */ /* * VIAPRO common definitions */ #define VIAPM_PRO_BA_MASK 0x0000fff0 #define VIAPM_PRO_SMBCTRL 0xd2 #define VIAPM_PRO_REVID 0xd6 /* * VT82C686A definitions */ #define VIAPM_PRO_BASE 0x90 #define SMBHST 0x0 #define SMBHSL 0x1 #define SMBHCTRL 0x2 #define SMBHCMD 0x3 #define SMBHADDR 0x4 #define SMBHDATA0 0x5 #define SMBHDATA1 0x6 #define SMBHBLOCK 0x7 #define SMBSST 0x1 #define SMBSCTRL 0x8 #define SMBSSDWCMD 0x9 #define SMBSEVENT 0xa #define SMBSDATA 0xc #define SMBHST_RESERVED 0xef /* reserved bits */ #define SMBHST_FAILED 0x10 /* failed bus transaction */ #define SMBHST_COLLID 0x08 /* bus collision */ #define SMBHST_ERROR 0x04 /* device error */ #define SMBHST_INTR 0x02 /* command completed */ #define SMBHST_BUSY 0x01 /* host busy */ #define SMBHCTRL_START 0x40 /* start command */ #define SMBHCTRL_PROTO 0x1c /* command protocol mask */ #define SMBHCTRL_QUICK 0x00 #define SMBHCTRL_SENDRECV 0x04 #define SMBHCTRL_BYTE 0x08 #define SMBHCTRL_WORD 0x0c #define SMBHCTRL_BLOCK 0x14 #define SMBHCTRL_KILL 0x02 /* stop the current transaction */ #define SMBHCTRL_ENABLE 0x01 /* enable interrupts */ #define SMBSCTRL_ENABLE 0x01 /* enable slave */ /* * VIA8233 definitions */ #define VIAPM_8233_BASE 0xD0 static int viapm_586b_probe(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); u_int32_t l; u_int16_t s; u_int8_t c; switch (pci_get_devid(dev)) { case VIA_586B_PMU_ID: bzero(viapm, sizeof(struct viapm_softc)); l = pci_read_config(dev, VIAPM_586B_REVID, 1); switch (l) { case VIAPM_586B_OEM_REV_E: viapm->type = VIAPM_TYP_586B_3040E; viapm->iorid = VIAPM_586B_3040E_BASE; /* Activate IO block access */ s = pci_read_config(dev, VIAPM_586B_3040E_ACTIV, 2); pci_write_config(dev, VIAPM_586B_3040E_ACTIV, s | 0x1, 2); break; case VIAPM_586B_OEM_REV_F: case VIAPM_586B_PROD_REV_A: default: viapm->type = VIAPM_TYP_586B_3040F; viapm->iorid = VIAPM_586B_3040F_BASE; /* Activate IO block access */ c = pci_read_config(dev, VIAPM_586B_3040F_ACTIV, 1); pci_write_config(dev, VIAPM_586B_3040F_ACTIV, c | 0x80, 1); break; } viapm->base = pci_read_config(dev, viapm->iorid, 4) & VIAPM_586B_BA_MASK; /* * We have to set the I/O resources by hand because it is * described outside the viapmope of the traditional maps */ if (bus_set_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->base, 256)) { device_printf(dev, "could not set bus resource\n"); return ENXIO; } device_set_desc(dev, "VIA VT82C586B Power Management Unit"); return (BUS_PROBE_DEFAULT); default: break; } return ENXIO; } static int viapm_pro_probe(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); #ifdef VIAPM_BASE_ADDR u_int32_t l; #endif u_int32_t base_cfgreg; char *desc; switch (pci_get_devid(dev)) { case VIA_596A_PMU_ID: desc = "VIA VT82C596A Power Management Unit"; viapm->type = VIAPM_TYP_596B; base_cfgreg = VIAPM_PRO_BASE; goto viapro; case VIA_596B_PMU_ID: desc = "VIA VT82C596B Power Management Unit"; viapm->type = VIAPM_TYP_596B; base_cfgreg = VIAPM_PRO_BASE; goto viapro; case VIA_686A_PMU_ID: desc = "VIA VT82C686A Power Management Unit"; viapm->type = VIAPM_TYP_686A; base_cfgreg = VIAPM_PRO_BASE; goto viapro; case VIA_8233_PMU_ID: case VIA_8233A_PMU_ID: desc = "VIA VT8233 Power Management Unit"; viapm->type = VIAPM_TYP_UNKNOWN; base_cfgreg = VIAPM_8233_BASE; goto viapro; case VIA_8235_PMU_ID: desc = "VIA VT8235 Power Management Unit"; viapm->type = VIAPM_TYP_UNKNOWN; base_cfgreg = VIAPM_8233_BASE; goto viapro; case VIA_8237_PMU_ID: desc = "VIA VT8237 Power Management Unit"; viapm->type = VIAPM_TYP_UNKNOWN; base_cfgreg = VIAPM_8233_BASE; goto viapro; case VIA_CX700_PMU_ID: desc = "VIA CX700 Power Management Unit"; viapm->type = VIAPM_TYP_UNKNOWN; base_cfgreg = VIAPM_8233_BASE; goto viapro; viapro: #ifdef VIAPM_BASE_ADDR /* force VIAPM I/O base address */ /* enable the SMBus controller function */ l = pci_read_config(dev, VIAPM_PRO_SMBCTRL, 1); pci_write_config(dev, VIAPM_PRO_SMBCTRL, l | 1, 1); /* write the base address */ pci_write_config(dev, base_cfgreg, VIAPM_BASE_ADDR & VIAPM_PRO_BA_MASK, 4); #endif viapm->base = pci_read_config(dev, base_cfgreg, 4) & VIAPM_PRO_BA_MASK; /* * We have to set the I/O resources by hand because it is * described outside the viapmope of the traditional maps */ viapm->iorid = base_cfgreg; if (bus_set_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->base, 16)) { device_printf(dev, "could not set bus resource 0x%x\n", viapm->base); return ENXIO; } if (bootverbose) { device_printf(dev, "SMBus I/O base at 0x%x\n", viapm->base); } device_set_desc(dev, desc); return (BUS_PROBE_DEFAULT); default: break; } return ENXIO; } static int viapm_pro_attach(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); u_int32_t l; mtx_init(&viapm->lock, device_get_nameunit(dev), "viapm", MTX_DEF); if (!(viapm->iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &viapm->iorid, RF_ACTIVE))) { device_printf(dev, "could not allocate bus space\n"); goto error; } #ifdef notyet /* force irq 9 */ l = pci_read_config(dev, VIAPM_PRO_SMBCTRL, 1); pci_write_config(dev, VIAPM_PRO_SMBCTRL, l | 0x80, 1); viapm->irqrid = 0; if (!(viapm->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &viapm->irqrid, 9, 9, 1, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "could not allocate irq\n"); goto error; } if (bus_setup_intr(dev, viapm->irqres, INTR_TYPE_MISC | INTR_MPSAFE, (driver_intr_t *) viasmb_intr, viapm, &viapm->irqih)) { device_printf(dev, "could not setup irq\n"); goto error; } #endif if (bootverbose) { l = pci_read_config(dev, VIAPM_PRO_REVID, 1); device_printf(dev, "SMBus revision code 0x%x\n", l); } viapm->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); /* probe and attach the smbus */ bus_attach_children(dev); /* disable slave function */ VIAPM_OUTB(SMBSCTRL, VIAPM_INB(SMBSCTRL) & ~SMBSCTRL_ENABLE); /* enable the SMBus controller function */ l = pci_read_config(dev, VIAPM_PRO_SMBCTRL, 1); pci_write_config(dev, VIAPM_PRO_SMBCTRL, l | 1, 1); #ifdef notyet /* enable interrupts */ VIAPM_OUTB(SMBHCTRL, VIAPM_INB(SMBHCTRL) | SMBHCTRL_ENABLE); #endif #ifdef DEV_ISA /* If this device is a PCI-ISA bridge, then attach an ISA bus. */ if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_ISA)) isab_attach(dev); #endif return 0; error: if (viapm->iores) bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->iores); #ifdef notyet if (viapm->irqres) bus_release_resource(dev, SYS_RES_IRQ, viapm->irqrid, viapm->irqres); #endif mtx_destroy(&viapm->lock); return ENXIO; } static int viapm_586b_attach(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); mtx_init(&viapm->lock, device_get_nameunit(dev), "viapm", MTX_DEF); if (!(viapm->iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &viapm->iorid, RF_ACTIVE | RF_SHAREABLE))) { device_printf(dev, "could not allocate bus resource\n"); goto error; } VIAPM_OUTB(GPIO_DIR, VIAPM_INB(GPIO_DIR) | VIAPM_SCL | VIAPM_SDA); /* add generic bit-banging code */ if (!(viapm->iicbb = device_add_child(dev, "iicbb", -1))) goto error; bus_attach_children(dev); return 0; error: if (viapm->iores) bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->iores); mtx_destroy(&viapm->lock); return ENXIO; } static int viapm_586b_detach(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); bus_generic_detach(dev); - if (viapm->iicbb) { - device_delete_child(dev, viapm->iicbb); - } if (viapm->iores) bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->iores); mtx_destroy(&viapm->lock); return 0; } static int viapm_pro_detach(device_t dev) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); bus_generic_detach(dev); - if (viapm->smbus) { - device_delete_child(dev, viapm->smbus); - } bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->iores); #ifdef notyet bus_release_resource(dev, SYS_RES_IRQ, viapm->irqrid, viapm->irqres); #endif mtx_destroy(&viapm->lock); return 0; } static int viabb_callback(device_t dev, int index, caddr_t data) { return 0; } static void viabb_setscl(device_t dev, int ctrl) { struct viapm_softc *viapm = device_get_softc(dev); u_char val; VIAPM_LOCK(viapm); val = VIAPM_INB(GPIO_VAL); if (ctrl) val |= VIAPM_SCL; else val &= ~VIAPM_SCL; VIAPM_OUTB(GPIO_VAL, val); VIAPM_UNLOCK(viapm); return; } static void viabb_setsda(device_t dev, int data) { struct viapm_softc *viapm = device_get_softc(dev); u_char val; VIAPM_LOCK(viapm); val = VIAPM_INB(GPIO_VAL); if (data) val |= VIAPM_SDA; else val &= ~VIAPM_SDA; VIAPM_OUTB(GPIO_VAL, val); VIAPM_UNLOCK(viapm); return; } static int viabb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) { /* reset bus */ viabb_setsda(dev, 1); viabb_setscl(dev, 1); return (IIC_ENOADDR); } static int viabb_getscl(device_t dev) { struct viapm_softc *viapm = device_get_softc(dev); u_char val; VIAPM_LOCK(viapm); val = VIAPM_INB(EXTSMI_VAL); VIAPM_UNLOCK(viapm); return ((val & VIAPM_SCL) != 0); } static int viabb_getsda(device_t dev) { struct viapm_softc *viapm = device_get_softc(dev); u_char val; VIAPM_LOCK(viapm); val = VIAPM_INB(EXTSMI_VAL); VIAPM_UNLOCK(viapm); return ((val & VIAPM_SDA) != 0); } static int viapm_abort(struct viapm_softc *viapm) { VIAPM_OUTB(SMBHCTRL, SMBHCTRL_KILL); DELAY(10); return (0); } static int viapm_clear(struct viapm_softc *viapm) { VIAPM_OUTB(SMBHST, SMBHST_FAILED | SMBHST_COLLID | SMBHST_ERROR | SMBHST_INTR); DELAY(10); return (0); } static int viapm_busy(struct viapm_softc *viapm) { u_char sts; sts = VIAPM_INB(SMBHST); VIAPM_DEBUG(printf("viapm: idle? STS=0x%x\n", sts)); return (sts & SMBHST_BUSY); } /* * Poll the SMBus controller */ static int viapm_wait(struct viapm_softc *viapm) { int count = 10000; u_char sts = 0; int error; VIAPM_LOCK_ASSERT(viapm); /* wait for command to complete and SMBus controller is idle */ while(count--) { DELAY(10); sts = VIAPM_INB(SMBHST); /* check if the controller is processing a command */ if (!(sts & SMBHST_BUSY) && (sts & SMBHST_INTR)) break; } VIAPM_DEBUG(printf("viapm: SMBHST=0x%x\n", sts)); error = SMB_ENOERR; if (!count) error |= SMB_ETIMEOUT; if (sts & SMBHST_FAILED) error |= SMB_EABORT; if (sts & SMBHST_COLLID) error |= SMB_ENOACK; if (sts & SMBHST_ERROR) error |= SMB_EBUSERR; if (error != SMB_ENOERR) viapm_abort(viapm); viapm_clear(viapm); return (error); } static int viasmb_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: case SMB_RELEASE_BUS: /* ok, bus allocation accepted */ break; default: error = EINVAL; } return (error); } static int viasmb_quick(device_t dev, u_char slave, int how) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } switch (how) { case SMB_QWRITE: VIAPM_DEBUG(printf("viapm: QWRITE to 0x%x", slave)); VIAPM_OUTB(SMBHADDR, slave & ~LSB); break; case SMB_QREAD: VIAPM_DEBUG(printf("viapm: QREAD to 0x%x", slave)); VIAPM_OUTB(SMBHADDR, slave | LSB); break; default: panic("%s: unknown QUICK command (%x)!", __func__, how); } VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_QUICK); error = viapm_wait(viapm); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_sendb(device_t dev, u_char slave, char byte) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave & ~ LSB); VIAPM_OUTB(SMBHCMD, byte); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_SENDRECV); error = viapm_wait(viapm); VIAPM_DEBUG(printf("viapm: SENDB to 0x%x, byte=0x%x, error=0x%x\n", slave, byte, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_recvb(device_t dev, u_char slave, char *byte) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave | LSB); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_SENDRECV); if ((error = viapm_wait(viapm)) == SMB_ENOERR) *byte = VIAPM_INB(SMBHDATA0); VIAPM_DEBUG(printf("viapm: RECVB from 0x%x, byte=0x%x, error=0x%x\n", slave, *byte, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_writeb(device_t dev, u_char slave, char cmd, char byte) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave & ~ LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHDATA0, byte); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_BYTE); error = viapm_wait(viapm); VIAPM_DEBUG(printf("viapm: WRITEB to 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, byte, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_readb(device_t dev, u_char slave, char cmd, char *byte) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave | LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_BYTE); if ((error = viapm_wait(viapm)) == SMB_ENOERR) *byte = VIAPM_INB(SMBHDATA0); VIAPM_DEBUG(printf("viapm: READB from 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, *byte, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_writew(device_t dev, u_char slave, char cmd, short word) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave & ~ LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHDATA0, word & 0x00ff); VIAPM_OUTB(SMBHDATA1, (word & 0xff00) >> 8); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_WORD); error = viapm_wait(viapm); VIAPM_DEBUG(printf("viapm: WRITEW to 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, word, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_readw(device_t dev, u_char slave, char cmd, short *word) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); int error; u_char high, low; VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave | LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_WORD); if ((error = viapm_wait(viapm)) == SMB_ENOERR) { low = VIAPM_INB(SMBHDATA0); high = VIAPM_INB(SMBHDATA1); *word = ((high & 0xff) << 8) | (low & 0xff); } VIAPM_DEBUG(printf("viapm: READW from 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, *word, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); u_char i; int error; if (count < 1 || count > 32) return (SMB_EINVAL); VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave & ~LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHDATA0, count); i = VIAPM_INB(SMBHCTRL); /* fill the 32-byte internal buffer */ for (i = 0; i < count; i++) { VIAPM_OUTB(SMBHBLOCK, buf[i]); DELAY(2); } VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_BLOCK); error = viapm_wait(viapm); VIAPM_DEBUG(printf("viapm: WRITEBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, count, cmd, error)); VIAPM_UNLOCK(viapm); return (error); } static int viasmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev); u_char data, len, i; int error; if (*count < 1 || *count > 32) return (SMB_EINVAL); VIAPM_LOCK(viapm); viapm_clear(viapm); if (viapm_busy(viapm)) { VIAPM_UNLOCK(viapm); return (SMB_EBUSY); } VIAPM_OUTB(SMBHADDR, slave | LSB); VIAPM_OUTB(SMBHCMD, cmd); VIAPM_OUTB(SMBHCTRL, SMBHCTRL_START | SMBHCTRL_BLOCK); if ((error = viapm_wait(viapm)) != SMB_ENOERR) goto error; len = VIAPM_INB(SMBHDATA0); i = VIAPM_INB(SMBHCTRL); /* reset counter */ /* read the 32-byte internal buffer */ for (i = 0; i < len; i++) { data = VIAPM_INB(SMBHBLOCK); if (i < *count) buf[i] = data; DELAY(2); } *count = len; error: VIAPM_DEBUG(printf("viapm: READBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, *count, cmd, error)); VIAPM_UNLOCK(viapm); return (error); } static device_method_t viapm_methods[] = { /* device interface */ DEVMETHOD(device_probe, viapm_586b_probe), DEVMETHOD(device_attach, viapm_586b_attach), DEVMETHOD(device_detach, viapm_586b_detach), /* iicbb interface */ DEVMETHOD(iicbb_callback, viabb_callback), DEVMETHOD(iicbb_setscl, viabb_setscl), DEVMETHOD(iicbb_setsda, viabb_setsda), DEVMETHOD(iicbb_getscl, viabb_getscl), DEVMETHOD(iicbb_getsda, viabb_getsda), DEVMETHOD(iicbb_reset, viabb_reset), /* Bus interface */ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t viapm_driver = { "viapm", viapm_methods, sizeof(struct viapm_softc), }; static device_method_t viapropm_methods[] = { /* device interface */ DEVMETHOD(device_probe, viapm_pro_probe), DEVMETHOD(device_attach, viapm_pro_attach), DEVMETHOD(device_detach, viapm_pro_detach), /* smbus interface */ DEVMETHOD(smbus_callback, viasmb_callback), DEVMETHOD(smbus_quick, viasmb_quick), DEVMETHOD(smbus_sendb, viasmb_sendb), DEVMETHOD(smbus_recvb, viasmb_recvb), DEVMETHOD(smbus_writeb, viasmb_writeb), DEVMETHOD(smbus_readb, viasmb_readb), DEVMETHOD(smbus_writew, viasmb_writew), DEVMETHOD(smbus_readw, viasmb_readw), DEVMETHOD(smbus_bwrite, viasmb_bwrite), DEVMETHOD(smbus_bread, viasmb_bread), /* Bus interface */ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t viapropm_driver = { "viapropm", viapropm_methods, sizeof(struct viapm_softc), }; DRIVER_MODULE(viapm, pci, viapm_driver, 0, 0); DRIVER_MODULE(viapropm, pci, viapropm_driver, 0, 0); DRIVER_MODULE(iicbb, viapm, iicbb_driver, 0, 0); DRIVER_MODULE(smbus, viapropm, smbus_driver, 0, 0); MODULE_DEPEND(viapm, pci, 1, 1, 1); MODULE_DEPEND(viapropm, pci, 1, 1, 1); MODULE_DEPEND(viapm, iicbb, IICBB_MINVER, IICBB_PREFVER, IICBB_MAXVER); MODULE_DEPEND(viapropm, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(viapm, 1); #ifdef DEV_ISA DRIVER_MODULE(isa, viapm, isa_driver, 0, 0); DRIVER_MODULE(isa, viapropm, isa_driver, 0, 0); MODULE_DEPEND(viapm, isa, 1, 1, 1); MODULE_DEPEND(viapropm, isa, 1, 1, 1); #endif diff --git a/sys/dev/vnic/thunder_mdio.c b/sys/dev/vnic/thunder_mdio.c index 4545fe6658f5..a98e6aff05fd 100644 --- a/sys/dev/vnic/thunder_mdio.c +++ b/sys/dev/vnic/thunder_mdio.c @@ -1,505 +1,504 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_mdio_var.h" #include "lmac_if.h" #include "miibus_if.h" #define REG_BASE_RID 0 #define SMI_CMD 0x00 #define SMI_CMD_PHY_REG_ADR_SHIFT (0) #define SMI_CMD_PHY_REG_ADR_MASK (0x1FUL << SMI_CMD_PHY_REG_ADR_SHIFT) #define SMI_CMD_PHY_ADR_SHIFT (8) #define SMI_CMD_PHY_ADR_MASK (0x1FUL << SMI_CMD_PHY_ADR_SHIFT) #define SMI_CMD_PHY_OP_MASK (0x3UL << 16) #define SMI_CMD_PHY_OP_C22_READ (0x1UL << 16) #define SMI_CMD_PHY_OP_C22_WRITE (0x0UL << 16) #define SMI_CMD_PHY_OP_C45_READ (0x3UL << 16) #define SMI_CMD_PHY_OP_C45_WRITE (0x1UL << 16) #define SMI_CMD_PHY_OP_C45_ADDR (0x0UL << 16) #define SMI_WR_DAT 0x08 #define SMI_WR_DAT_PENDING (1UL << 17) #define SMI_WR_DAT_VAL (1UL << 16) #define SMI_WR_DAT_DAT_MASK (0xFFFFUL << 0) #define SMI_RD_DAT 0x10 #define SMI_RD_DAT_PENDING (1UL << 17) #define SMI_RD_DAT_VAL (1UL << 16) #define SMI_RD_DAT_DAT_MASK (0xFFFFUL << 0) #define SMI_CLK 0x18 #define SMI_CLK_PREAMBLE (1UL << 12) #define SMI_CLK_MODE (1UL << 24) #define SMI_EN 0x20 #define SMI_EN_EN (1UL << 0) /* Enable interface */ #define SMI_DRV_CTL 0x28 static int thunder_mdio_detach(device_t); static int thunder_mdio_read(device_t, int, int); static int thunder_mdio_write(device_t, int, int, int); static int thunder_ifmedia_change_stub(if_t); static void thunder_ifmedia_status_stub(if_t, struct ifmediareq *); static int thunder_mdio_media_status(device_t, int, int *, int *, int *); static int thunder_mdio_media_change(device_t, int, int, int, int); static int thunder_mdio_phy_connect(device_t, int, int); static int thunder_mdio_phy_disconnect(device_t, int, int); static device_method_t thunder_mdio_methods[] = { /* Device interface */ DEVMETHOD(device_detach, thunder_mdio_detach), /* LMAC interface */ DEVMETHOD(lmac_media_status, thunder_mdio_media_status), DEVMETHOD(lmac_media_change, thunder_mdio_media_change), DEVMETHOD(lmac_phy_connect, thunder_mdio_phy_connect), DEVMETHOD(lmac_phy_disconnect, thunder_mdio_phy_disconnect), /* MII interface */ DEVMETHOD(miibus_readreg, thunder_mdio_read), DEVMETHOD(miibus_writereg, thunder_mdio_write), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(thunder_mdio, thunder_mdio_driver, thunder_mdio_methods, sizeof(struct thunder_mdio_softc)); DRIVER_MODULE(miibus, thunder_mdio, miibus_driver, 0, 0); MODULE_VERSION(thunder_mdio, 1); MODULE_DEPEND(thunder_mdio, ether, 1, 1, 1); MODULE_DEPEND(thunder_mdio, miibus, 1, 1, 1); MODULE_DEPEND(thunder_mdio, mrmlbus, 1, 1, 1); MALLOC_DEFINE(M_THUNDER_MDIO, "ThunderX MDIO", "Cavium ThunderX MDIO dynamic memory"); #define MDIO_LOCK_INIT(sc, name) \ mtx_init(&(sc)->mtx, name, NULL, MTX_DEF) #define MDIO_LOCK_DESTROY(sc) \ mtx_destroy(&(sc)->mtx) #define MDIO_LOCK(sc) mtx_lock(&(sc)->mtx) #define MDIO_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define MDIO_LOCK_ASSERT(sc) \ mtx_assert(&(sc)->mtx, MA_OWNED) #define mdio_reg_read(sc, reg) \ bus_read_8((sc)->reg_base, (reg)) #define mdio_reg_write(sc, reg, val) \ bus_write_8((sc)->reg_base, (reg), (val)) int thunder_mdio_attach(device_t dev) { struct thunder_mdio_softc *sc; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Allocate memory resources */ rid = REG_BASE_RID; sc->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->reg_base == NULL) { device_printf(dev, "Could not allocate memory\n"); return (ENXIO); } TAILQ_INIT(&sc->phy_desc_head); MDIO_LOCK_INIT(sc, "ThunderX MDIO lock"); /* Enable SMI/MDIO interface */ mdio_reg_write(sc, SMI_EN, SMI_EN_EN); return (0); } static int thunder_mdio_detach(device_t dev) { struct thunder_mdio_softc *sc; sc = device_get_softc(dev); if (sc->reg_base != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, REG_BASE_RID, sc->reg_base); } return (0); } static __inline void thunder_mdio_set_mode(struct thunder_mdio_softc *sc, enum thunder_mdio_mode mode) { uint64_t smi_clk; if (sc->mode == mode) return; /* Set mode, IEEE CLAUSE 22 or IEEE CAUSE 45 */ smi_clk = mdio_reg_read(sc, SMI_CLK); if (mode == MODE_IEEE_C22) smi_clk &= ~SMI_CLK_MODE; else smi_clk |= SMI_CLK_MODE; /* Enable sending 32 bit preable on SMI transactions */ smi_clk |= SMI_CLK_PREAMBLE; /* Saved settings */ mdio_reg_write(sc, SMI_CLK, smi_clk); sc->mode = mode; } static int thunder_mdio_c45_addr(struct thunder_mdio_softc *sc, int phy, int reg) { uint64_t smi_cmd, smi_wr_dat; ssize_t timeout; thunder_mdio_set_mode(sc, MODE_IEEE_C45); /* Prepare data for transmission */ mdio_reg_write(sc, SMI_WR_DAT, reg & SMI_WR_DAT_DAT_MASK); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE; /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT); if (smi_wr_dat & SMI_WR_DAT_PENDING) DELAY(1000); else break; } if (timeout <= 0) return (EIO); else { /* Return 0 on success */ return (0); } } static int thunder_mdio_read(device_t dev, int phy, int reg) { struct thunder_mdio_softc *sc; uint64_t smi_cmd, smi_rd_dat; ssize_t timeout; int err; sc = device_get_softc(dev); /* XXX Always C22 - for <= 1Gbps only */ thunder_mdio_set_mode(sc, MODE_IEEE_C22); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ if (sc->mode == MODE_IEEE_C22) smi_cmd |= SMI_CMD_PHY_OP_C22_READ; else { smi_cmd |= SMI_CMD_PHY_OP_C45_READ; err = thunder_mdio_c45_addr(sc, phy, reg); if (err != 0) return (err); reg = (reg >> 16) & 0x1F; } /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_rd_dat = mdio_reg_read(sc, SMI_RD_DAT); if (smi_rd_dat & SMI_RD_DAT_PENDING) DELAY(1000); else break; } if (smi_rd_dat & SMI_RD_DAT_VAL) return (smi_rd_dat & SMI_RD_DAT_DAT_MASK); else { /* Return 0 on error */ return (0); } } static int thunder_mdio_write(device_t dev, int phy, int reg, int data) { struct thunder_mdio_softc *sc; uint64_t smi_cmd, smi_wr_dat; ssize_t timeout; sc = device_get_softc(dev); /* XXX Always C22 - for <= 1Gbps only */ thunder_mdio_set_mode(sc, MODE_IEEE_C22); /* Prepare data for transmission */ mdio_reg_write(sc, SMI_WR_DAT, data & SMI_WR_DAT_DAT_MASK); /* * Assemble command */ smi_cmd = 0; /* Set opcode */ if (sc->mode == MODE_IEEE_C22) smi_cmd |= SMI_CMD_PHY_OP_C22_WRITE; else smi_cmd |= SMI_CMD_PHY_OP_C45_WRITE; /* Set PHY address */ smi_cmd |= ((phy << SMI_CMD_PHY_ADR_SHIFT) & SMI_CMD_PHY_ADR_MASK); /* Set PHY register offset */ smi_cmd |= ((reg << SMI_CMD_PHY_REG_ADR_SHIFT) & SMI_CMD_PHY_REG_ADR_MASK); mdio_reg_write(sc, SMI_CMD, smi_cmd); for (timeout = 1000; timeout > 0; timeout--) { smi_wr_dat = mdio_reg_read(sc, SMI_WR_DAT); if (smi_wr_dat & SMI_WR_DAT_PENDING) DELAY(1000); else break; } if (timeout <= 0) return (EIO); else { /* Return 0 on success */ return (0); } } static int thunder_ifmedia_change_stub(if_t ifp __unused) { /* Will never be called by if_media */ return (0); } static void thunder_ifmedia_status_stub(if_t ifp __unused, struct ifmediareq *ifmr __unused) { /* Will never be called by if_media */ } static __inline struct phy_desc * get_phy_desc(struct thunder_mdio_softc *sc, int lmacid) { struct phy_desc *pd = NULL; MDIO_LOCK_ASSERT(sc); TAILQ_FOREACH(pd, &sc->phy_desc_head, phy_desc_list) { if (pd->lmacid == lmacid) break; } return (pd); } static int thunder_mdio_media_status(device_t dev, int lmacid, int *link, int *duplex, int *speed) { struct thunder_mdio_softc *sc; struct mii_data *mii_sc; struct phy_desc *pd; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); if (pd == NULL) { /* Panic when invariants are enabled, fail otherwise. */ KASSERT(0, ("%s: no PHY descriptor for LMAC%d", __func__, lmacid)); MDIO_UNLOCK(sc); return (ENXIO); } mii_sc = device_get_softc(pd->miibus); mii_tick(mii_sc); if ((mii_sc->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { /* Link is up */ *link = 1; } else *link = 0; switch (IFM_SUBTYPE(mii_sc->mii_media_active)) { case IFM_10_T: *speed = 10; break; case IFM_100_TX: *speed = 100; break; case IFM_1000_T: *speed = 1000; break; default: /* IFM_NONE */ *speed = 0; } if ((IFM_OPTIONS(mii_sc->mii_media_active) & IFM_FDX) != 0) *duplex = 1; else *duplex = 0; MDIO_UNLOCK(sc); return (0); } static int thunder_mdio_media_change(device_t dev, int lmacid, int link, int duplex, int speed) { return (EIO); } static int thunder_mdio_phy_connect(device_t dev, int lmacid, int phy) { struct thunder_mdio_softc *sc; struct phy_desc *pd; int err; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); MDIO_UNLOCK(sc); if (pd == NULL) { pd = malloc(sizeof(*pd), M_THUNDER_MDIO, (M_NOWAIT | M_ZERO)); if (pd == NULL) return (ENOMEM); pd->ifp = if_alloc(IFT_ETHER); pd->lmacid = lmacid; } err = mii_attach(dev, &pd->miibus, pd->ifp, thunder_ifmedia_change_stub, thunder_ifmedia_status_stub, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(dev, "Could not attach PHY%d\n", phy); if_free(pd->ifp); free(pd, M_THUNDER_MDIO); return (ENXIO); } MDIO_LOCK(sc); TAILQ_INSERT_TAIL(&sc->phy_desc_head, pd, phy_desc_list); MDIO_UNLOCK(sc); return (0); } static int thunder_mdio_phy_disconnect(device_t dev, int lmacid, int phy) { struct thunder_mdio_softc *sc; struct phy_desc *pd; sc = device_get_softc(dev); MDIO_LOCK(sc); pd = get_phy_desc(sc, lmacid); if (pd == NULL) { MDIO_UNLOCK(sc); return (EINVAL); } /* Remove this PHY descriptor from the list */ TAILQ_REMOVE(&sc->phy_desc_head, pd, phy_desc_list); /* Detach miibus */ bus_generic_detach(dev); - device_delete_child(dev, pd->miibus); /* Free fake ifnet */ if_free(pd->ifp); /* Free memory under phy descriptor */ free(pd, M_THUNDER_MDIO); MDIO_UNLOCK(sc); return (0); } diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 1d04ea496f08..0df0b883a32d 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6347 +1,6352 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include "opt_ddb.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include #endif #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); static bool disable_failed_devs = false; SYSCTL_BOOL(_hw_bus, OID_AUTO, disable_failed_devices, CTLFLAG_RWTUN, &disable_failed_devs, 0, "Do not retry attaching devices that return an error from DEVICE_ATTACH the first time"); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; struct device_prop_elm { const char *name; void *val; void *dtr_ctx; device_prop_dtr_t dtr; LIST_ENTRY(device_prop_elm) link; }; static void device_destroy_props(device_t dev); /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ u_int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ LIST_HEAD(, device_prop_elm) props; struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(device_nomatch); EVENTHANDLER_LIST_DEFINE(dev_lookup); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, DEVICE_SYSCTL_IOMMU, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; device_t iommu; int error; uint16_t rid; const char *c; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); bus_topo_lock(); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: bus_child_location(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: bus_child_pnpinfo(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; case DEVICE_SYSCTL_IOMMU: iommu = NULL; error = device_get_prop(dev, DEV_PROP_NAME_IOMMU, (void **)&iommu); c = ""; if (error == 0 && iommu != NULL) { sbuf_printf(&sb, "unit=%s", device_get_nameunit(iommu)); c = " "; } rid = 0; #ifdef IOMMU iommu_get_requester(dev, &rid); #endif if (rid != 0) sbuf_printf(&sb, "%srid=%#x", c, rid); break; default: error = EINVAL; goto out; } error = sbuf_finish(&sb); out: bus_topo_unlock(); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%iommu", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_IOMMU, device_sysctl_handler, "A", "iommu unit handling the device requests"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); void bus_topo_assert(void) { GIANT_REQUIRED; } struct mtx * bus_topo_mtx(void) { return (&Giant); } void bus_topo_lock(void) { mtx_lock(bus_topo_mtx()); } void bus_topo_unlock(void) { mtx_unlock(bus_topo_mtx()); } /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); static int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Retrieve the current bus pass * * Retrieves the current bus pass level. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ int bus_get_pass(void) { return (bus_current_pass); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ static void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } static void device_handle_nomatch(device_t dev) { BUS_PROBE_NOMATCH(dev->parent, dev); EVENTHANDLER_DIRECT_INVOKE(device_nomatch, dev); dev->flags |= DF_DONENOMATCH; } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; devclass_t child_dc; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; child_dc = devclass_find_internal(driver->name, parentname, TRUE); if (dcp != NULL) *dcp = child_dc; dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { device_handle_nomatch(dev); } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated unit. If * @p dc is NULL, @c -1 is returned to indicate that not even the devclass has * been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. Note: This can return INT_MAX which * may be rejected elsewhere. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c DEVICE_UNIT_ANY if any * will do). The allocated unit number is returned in @p *unitp. * * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure * @retval EINVAL unit is negative or we've run out of units */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == DEVICE_UNIT_ANY) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* Unit numbers are either DEVICE_UNIT_ANY or in [0,INT_MAX) */ if ((unit < 0 && unit != DEVICE_UNIT_ANY) || unit == INT_MAX) return (EINVAL); /* If we were given a wired unit number, check for existing device */ if (unit != DEVICE_UNIT_ANY) { if (unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0; unit < INT_MAX; unit++) { /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; break; } } /* * Unit numbers must be in the range [0, INT_MAX), so exclude INT_MAX as * too large. We constrain maxunit below to be <= INT_MAX. This means we * can treat unit and maxunit as normal integers with normal math * everywhere and we only have to flag INT_MAX as invalid. */ if (unit == INT_MAX) return (EINVAL); /* * We've selected a unit beyond the length of the table, so let's extend * the table to make room for all units up to and including this one. */ if (unit >= dc->maxunit) { int newsize; newsize = unit + 1; dc->devices = reallocf(dc->devices, newsize * sizeof(*dc->devices), M_BUS, M_WAITOK); memset(dc->devices + dc->maxunit, 0, sizeof(device_t) * (newsize - dc->maxunit)); dc->maxunit = newsize; } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure * @retval EINVAL Unit number invalid or too many units */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = DEVICE_UNIT_ANY; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c DEVICE_UNIT_ANY * to leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == DEVICE_UNIT_ANY) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; LIST_INIT(&dev->props); dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c DEVICE_UNIT_ANY if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c DEVICE_UNIT_ANY if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == DEVICE_UNIT_ANY, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* * Detach child. Ideally this cleans up any grandchild * devices. */ if ((error = device_detach(child)) != 0) return (error); /* Delete any grandchildren left after detach. */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } device_destroy_props(dev); if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is * @c DEVICE_UNIT_ANY, return the first child of @p dev * which has name @p classname (that is, the one with the * lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != DEVICE_UNIT_ANY) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; /* We should preserve the devclass (or lack of) set by the bus. */ int hasclass = (child->devclass != NULL); bus_topo_assert(); dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. */ if (child->state == DS_ALIVE) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* * If probe returns 0, this is the driver that wins this * device. */ if (result == 0) { best = dl; pri = 0; goto exact_match; /* C doesn't have break 2 */ } /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } } if (best == NULL) return (ENXIO); /* * If we found a driver, change state and initialise the devclass. * Set the winning driver, devclass, and flags. */ result = device_set_driver(child, best->driver); if (result != 0) return (result); if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) { (void)device_set_driver(child, NULL); return (result); } } resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); /* * A bit bogus. Call the probe method again to make sure that we have * the right description for the device. */ result = DEVICE_PROBE(child); if (result > 0) { if (!hasclass) (void)device_set_devclass(child, NULL); (void)device_set_driver(child, NULL); return (result); } exact_match: child->state = DS_ALIVE; bus_data_generation_update(); return (0); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling log() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_log(device_t dev, int pri, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); log(pri, "%.*s", (int) sbuf_len(&sb), sbuf_data(&sb)); retval = sbuf_len(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char *desc, bool allocated) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (allocated && desc) dev->flags |= DF_DESCMALLOCED; dev->desc = __DECONST(char *, desc); bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char *desc) { device_set_desc_internal(dev, desc, false); } /** * @brief Set the device's description * * A printf-like version of device_set_desc(). */ void device_set_descf(device_t dev, const char *fmt, ...) { va_list ap; char *buf = NULL; va_start(ap, fmt); vasprintf(&buf, M_BUS, fmt, ap); va_end(ap); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char *desc) { char *buf; buf = strdup_flags(desc, M_BUS, M_NOWAIT); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { /* * Mark the device as busy, recursively up the tree if this busy count * goes 0->1. */ if (refcount_acquire(&dev->busy) == 0 && dev->parent != NULL) device_busy(dev->parent); } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { /* * Mark the device as unbsy, recursively if this is the last busy count. */ if (refcount_release(&dev->busy) && dev->parent != NULL) device_unbusy(dev->parent); } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type) { device_t bus = device_get_parent(dev); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_HANDLE: /* Size checks done in implementation. */ break; case DEVICE_PROP_UINT32: if (sz % 4 != 0) return (-1); break; case DEVICE_PROP_UINT64: if (sz % 8 != 0) return (-1); break; default: return (-1); } return (BUS_GET_PROPERTY(bus, dev, prop, val, sz, type)); } bool device_has_property(device_t dev, const char *prop) { return (device_get_property(dev, prop, NULL, 0, DEVICE_PROP_ANY) >= 0); } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; bus_topo_assert(); if (dev->state >= DS_ALIVE) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { device_handle_nomatch(dev); } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; bus_topo_assert(); error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); return (device_attach(dev)); } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { /* * Mostly detach the device, but leave it attached to * the devclass to reserve the name and unit. */ device_disable(dev); (void)device_set_driver(dev, NULL); dev->state = DS_NOTPRESENT; if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } KASSERT(IS_DEFAULT_VNET(TD_TO_VNET(curthread)), ("device_attach: curthread is not in default vnet")); CURVNET_SET_QUIET(TD_TO_VNET(curthread)); device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); BUS_CHILD_DETACHED(dev->parent, dev); if (disable_failed_devs) { /* * When the user has asked to disable failed devices, we * directly disable the device, but leave it in the * attaching state. It will not try to probe/attach the * device further. This leaves the device numbering * intact for other similar devices in the system. It * can be removed from this state with devctl. */ device_disable(dev); } else { /* * Otherwise, when attach fails, tear down the state * around that so we can retry when, for example, new * drivers are loaded. */ if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; } CURVNET_RESTORE(); return (error); } CURVNET_RESTORE(); dev->flags |= DF_ATTACHED_ONCE; /* * We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; bus_topo_assert(); PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; if (unit == dev->unit) return (0); dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ /** * @brief Initialize a resource mapping request * * This is the internal implementation of the public API * resource_init_map_request. Callers may be using a different layout * of struct resource_map_request than the kernel, so callers pass in * the size of the structure they are using to identify the structure * layout. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Validate a resource mapping request * * Translate a device driver's mapping request (@p in) to a struct * resource_map_request using the current structure layout (@p out). * In addition, validate the offset and length from the mapping * request against the bounds of the resource @p r. If the offset or * length are invalid, fail with EINVAL. If the offset and length are * valid, the absolute starting address of the requested mapping is * returned in @p startp and the length of the requested mapping is * returned in @p lengthp. */ int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp) { rman_res_t end, length, start; /* * This assumes that any callers of this function are compiled * into the kernel and use the same version of the structure * as this file. */ MPASS(out->size == sizeof(struct resource_map_request)); if (in != NULL) bcopy(in, out, imin(in->size, out->size)); start = rman_get_start(r) + out->offset; if (out->length == 0) length = rman_get_size(r); else length = out->length; end = start + length - 1; if (start > rman_get_end(r) || start < rman_get_start(r)) return (EINVAL); if (end > rman_get_end(r) || end < start) return (EINVAL); *lengthp = length; *startp = start; return (0); } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, res)); } rle = resource_list_find(rl, rman_get_type(res), rman_get_rid(res)); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { bus_identify_children(dev); return (0); } /** * @brief Ask drivers to add child devices of the given device. * * This function allows drivers for child devices of a bus to identify * child devices and add them as children of the given device. NB: * The driver for @param dev must implement the BUS_ADD_CHILD method. * * @param dev the parent device */ void bus_identify_children(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { bus_attach_children(dev); return (0); } /** * @brief Probe and attach all children of the given device * * This function attempts to attach a device driver to each unattached * child of the given device using device_probe_and_attach(). If an * individual child fails to attach this function continues attaching * other children. * * @param dev the parent device */ void bus_attach_children(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ void bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_attach_children, dev); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for - * a bus. It calls device_detach() for each of the device's - * children. + * a bus. It detaches and deletes all children. If an individual + * child fails to detach, this function stops and returns an error. + * + * @param dev the parent device + * + * @retval 0 success + * @retval non-zero a device would not detach */ int bus_generic_detach(device_t dev) { int error; error = bus_detach_children(dev); if (error != 0) return (error); - return (0); + return (device_delete_children(dev)); } /** * @brief Detach drivers from all children of a device * * This function attempts to detach a device driver from each attached * child of the given device using device_detach(). If an individual * child fails to detach this function stops and returns an error. * NB: Children that were successfully detached are not re-attached if * an error occurs. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int bus_detach_children(device_t dev) { device_t child; int error; /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) { child->flags |= DF_SUSPENDED; } else { printf("DEVICE_SUSPEND(%s) failed: %d\n", device_get_nameunit(child), error); } return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Helper function for implementing BUS_GET_PROPERTY(). * * This simply calls the BUS_GET_PROPERTY of the parent of dev, * until a non-default implementation is found. */ ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { if (device_get_parent(dev) != NULL) return (BUS_GET_PROPERTY(device_get_parent(dev), child, propname, propvalue, size, type)); return (-1); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, r, start, end)); return (EINVAL); } /* * @brief Helper function for implementing BUS_TRANSLATE_RESOURCE(). * * This simple implementation of BUS_TRANSLATE_RESOURCE() simply calls the * BUS_TRANSLATE_RESOURCE() method of the parent of @p dev. If there is no * parent, no translation happens. */ int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent) return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); *newstart = start; return (0); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() allocates a * resource from a resource manager. It uses BUS_GET_RMAN() * to obtain the resource manager. */ struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *r; struct rman *rm; rm = BUS_GET_RMAN(dev, type, flags); if (rm == NULL) return (NULL); r = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (r == NULL) return (NULL); rman_set_rid(r, *rid); rman_set_type(r, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, r) != 0) { rman_release_resource(r); return (NULL); } } return (r); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This implementation of BUS_ADJUST_RESOURCE() adjusts resources only * if they were allocated from the resource manager returned by * BUS_GET_RMAN(). */ int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() releases resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_release_resource(device_t dev, device_t child, struct resource *r) { #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, r); if (error != 0) return (error); } return (rman_release_resource(r)); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This implementation of BUS_ACTIVATE_RESOURCE() activates resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_activate_resource(device_t dev, device_t child, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error, type; type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_activate_resource(r); if (error != 0) return (error); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: if ((rman_get_flags(r) & RF_UNMAPPED) == 0) { error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map); if (error != 0) break; rman_set_mapping(r, &map); } break; #ifdef INTRNG case SYS_RES_IRQ: error = intr_activate_irq(child, r); break; #endif } if (error != 0) rman_deactivate_resource(r); return (error); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This implementation of BUS_DEACTIVATE_RESOURCE() deactivates * resources allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error, type; type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_deactivate_resource(r); if (error != 0) return (error); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: if ((rman_get_flags(r) & RF_UNMAPPED) == 0) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, r, &map); } break; #ifdef INTRNG case SYS_RES_IRQ: intr_deactivate_irq(child, r); break; #endif } return (0); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } /** * @brief Helper function for implementing BUS_GET_DOMAIN(). * * This simple implementation of BUS_GET_DOMAIN() calls the * BUS_GET_DOMAIN() method of the parent of @p dev. If @p dev * does not have a parent, the function fails with ENOENT. */ int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function to implement normal BUS_GET_DEVICE_PATH() * * This function knows how to (a) pass the request up the tree if there's * a parent and (b) Knows how to supply a FreeBSD locator. * * @param bus bus in the walk up the tree * @param child leaf node to print information about * @param locator BUS_LOCATOR_xxx string for locator * @param sb Buffer to print information into */ int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { int rv = 0; device_t parent; /* * We don't recurse on ACPI since either we know the handle for the * device or we don't. And if we're in the generic routine, we don't * have a ACPI override. All other locators build up a path by having * their parents create a path and then adding the path element for this * node. That's why we recurse with parent, bus rather than the typical * parent, child: each spot in the tree is independent of what our child * will do with this path. */ parent = device_get_parent(bus); if (parent != NULL && strcmp(locator, BUS_LOCATOR_ACPI) != 0) { rv = BUS_GET_DEVICE_PATH(parent, bus, locator, sb); } if (strcmp(locator, BUS_LOCATOR_FREEBSD) == 0) { if (rv == 0) { sbuf_printf(sb, "/%s", device_get_nameunit(child)); } return (rv); } /* * Don't know what to do. So assume we do nothing. Not sure that's * the right thing, but keeps us from having a big list here. */ return (0); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENODEV); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, r, start, end)); } int bus_adjust_resource_old(device_t dev, int type __unused, struct resource *r, rman_res_t start, rman_res_t end) { return (bus_adjust_resource(dev, r, start, end)); } /** * @brief Wrapper function for BUS_TRANSLATE_RESOURCE(). * * This function simply calls the BUS_TRANSLATE_RESOURCE() method of the * parent of @p dev. */ int bus_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent == NULL) return (EINVAL); return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, r)); } int bus_activate_resource_old(device_t dev, int type, int rid, struct resource *r) { return (bus_activate_resource(dev, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, r)); } int bus_deactivate_resource_old(device_t dev, int type, int rid, struct resource *r) { return (bus_deactivate_resource(dev, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, r, args, map)); } int bus_map_resource_old(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { return (bus_map_resource(dev, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, r, map)); } int bus_unmap_resource_old(device_t dev, int type, struct resource *r, struct resource_map *map) { return (bus_unmap_resource(dev, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); rv = BUS_RELEASE_RESOURCE(dev->parent, dev, r); return (rv); } int bus_release_resource_old(device_t dev, int type, int rid, struct resource *r) { return (bus_release_resource(dev, r)); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO(). * * This function simply calls the BUS_CHILD_PNPINFO() method of the parent of @p * dev. */ int bus_child_pnpinfo(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_PNPINFO(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_pnpinfo * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_CHILD_LOCATION(). * * This function simply calls the BUS_CHILD_LOCATION() method of the parent of * @p dev. */ int bus_child_location(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_LOCATION(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_location * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devctl2_init(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); bus_child_pnpinfo(dev, &sb); sbuf_putc(&sb, '\0'); bus_child_location(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { device_handle_nomatch(dev); } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int device_get_path(device_t dev, const char *locator, struct sbuf *sb) { device_t parent; int error; KASSERT(sb != NULL, ("sb is NULL")); parent = device_get_parent(dev); if (parent == NULL) { error = sbuf_putc(sb, '/'); } else { error = BUS_GET_DEVICE_PATH(parent, dev, locator, sb); if (error == 0) { error = sbuf_error(sb); if (error == 0 && sbuf_len(sb) <= 1) error = EIO; } } sbuf_finish(sb); return (error); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ bus_topo_lock(); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; case DEV_GET_PATH: error = find_device(req, &dev); break; default: error = ENOTTY; break; } if (error) { bus_topo_unlock(); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev)) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (dev->devclass != NULL) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->devclass->name, dev->unit)) resource_unset_value(dev->devclass->name, dev->unit, "disabled"); /* Allow any drivers to rebid. */ if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); } error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = DEVICE_UNIT_ANY; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; case DEV_GET_PATH: { struct sbuf *sb; char locator[64]; ssize_t len; error = copyinstr(req->dr_buffer.buffer, locator, sizeof(locator), NULL); if (error != 0) break; sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL /* | SBUF_WAITOK */); error = device_get_path(dev, locator, sb); if (error == 0) { len = sbuf_len(sb); if (req->dr_buffer.length < len) { error = ENAMETOOLONG; } else { error = copyout(sbuf_data(sb), req->dr_buffer.buffer, len); } req->dr_buffer.length = len; } sbuf_delete(sb); break; } } bus_topo_unlock(); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0644, "devctl2"); } /* * For maintaining device 'at' location info to avoid recomputing it */ struct device_location_node { const char *dln_locator; const char *dln_path; TAILQ_ENTRY(device_location_node) dln_link; }; typedef TAILQ_HEAD(device_location_list, device_location_node) device_location_list_t; struct device_location_cache { device_location_list_t dlc_list; }; /* * Location cache for wired devices. */ device_location_cache_t * dev_wired_cache_init(void) { device_location_cache_t *dcp; dcp = malloc(sizeof(*dcp), M_BUS, M_WAITOK | M_ZERO); TAILQ_INIT(&dcp->dlc_list); return (dcp); } void dev_wired_cache_fini(device_location_cache_t *dcp) { struct device_location_node *dln, *tdln; TAILQ_FOREACH_SAFE(dln, &dcp->dlc_list, dln_link, tdln) { free(dln, M_BUS); } free(dcp, M_BUS); } static struct device_location_node * dev_wired_cache_lookup(device_location_cache_t *dcp, const char *locator) { struct device_location_node *dln; TAILQ_FOREACH(dln, &dcp->dlc_list, dln_link) { if (strcmp(locator, dln->dln_locator) == 0) return (dln); } return (NULL); } static struct device_location_node * dev_wired_cache_add(device_location_cache_t *dcp, const char *locator, const char *path) { struct device_location_node *dln; size_t loclen, pathlen; loclen = strlen(locator) + 1; pathlen = strlen(path) + 1; dln = malloc(sizeof(*dln) + loclen + pathlen, M_BUS, M_WAITOK | M_ZERO); dln->dln_locator = (char *)(dln + 1); memcpy(__DECONST(char *, dln->dln_locator), locator, loclen); dln->dln_path = dln->dln_locator + loclen; memcpy(__DECONST(char *, dln->dln_path), path, pathlen); TAILQ_INSERT_HEAD(&dcp->dlc_list, dln, dln_link); return (dln); } bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at) { struct sbuf *sb; const char *cp; char locator[32]; int error, len; struct device_location_node *res; cp = strchr(at, ':'); if (cp == NULL) return (false); len = cp - at; if (len > sizeof(locator) - 1) /* Skip too long locator */ return (false); memcpy(locator, at, len); locator[len] = '\0'; cp++; error = 0; /* maybe cache this inside device_t and look that up, but not yet */ res = dev_wired_cache_lookup(dcp, locator); if (res == NULL) { sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL | SBUF_NOWAIT); if (sb != NULL) { error = device_get_path(dev, locator, sb); if (error == 0) { res = dev_wired_cache_add(dcp, locator, sbuf_data(sb)); } sbuf_delete(sb); } } if (error != 0 || res == NULL || res->dln_path == NULL) return (false); return (strcmp(res->dln_path, cp) == 0); } static struct device_prop_elm * device_prop_find(device_t dev, const char *name) { struct device_prop_elm *e; bus_topo_assert(); LIST_FOREACH(e, &dev->props, link) { if (strcmp(name, e->name) == 0) return (e); } return (NULL); } int device_set_prop(device_t dev, const char *name, void *val, device_prop_dtr_t dtr, void *dtr_ctx) { struct device_prop_elm *e, *e1; bus_topo_assert(); e = device_prop_find(dev, name); if (e != NULL) goto found; e1 = malloc(sizeof(*e), M_BUS, M_WAITOK); e = device_prop_find(dev, name); if (e != NULL) { free(e1, M_BUS); goto found; } e1->name = name; e1->val = val; e1->dtr = dtr; e1->dtr_ctx = dtr_ctx; LIST_INSERT_HEAD(&dev->props, e1, link); return (0); found: LIST_REMOVE(e, link); if (e->dtr != NULL) e->dtr(dev, name, e->val, e->dtr_ctx); e->val = val; e->dtr = dtr; e->dtr_ctx = dtr_ctx; LIST_INSERT_HEAD(&dev->props, e, link); return (EEXIST); } int device_get_prop(device_t dev, const char *name, void **valp) { struct device_prop_elm *e; bus_topo_assert(); e = device_prop_find(dev, name); if (e == NULL) return (ENOENT); *valp = e->val; return (0); } int device_clear_prop(device_t dev, const char *name) { struct device_prop_elm *e; bus_topo_assert(); e = device_prop_find(dev, name); if (e == NULL) return (ENOENT); LIST_REMOVE(e, link); if (e->dtr != NULL) e->dtr(dev, e->name, e->val, e->dtr_ctx); free(e, M_BUS); return (0); } static void device_destroy_props(device_t dev) { struct device_prop_elm *e; bus_topo_assert(); while ((e = LIST_FIRST(&dev->props)) != NULL) { LIST_REMOVE_HEAD(&dev->props, link); if (e->dtr != NULL) e->dtr(dev, e->name, e->val, e->dtr_ctx); free(e, M_BUS); } } void device_clear_prop_alldev(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { device_clear_prop(dev, name); } } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if obsolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/sys/param.h b/sys/sys/param.h index f675dd024ee0..d2aad1ff98a1 100644 --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -1,381 +1,381 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * documentation/content/en/books/porters-handbook/versions/_index.adoc * * Encoding: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. * Short hand: MMmmXXX * * __FreeBSD_version is bumped every time there's a change in the base system * that's noteworthy. A noteworthy change is any change which changes the * kernel's KBI in -CURRENT, one that changes some detail about the system that * external software (or the ports system) would want to know about, one that * adds a system call, one that adds or deletes a shipped library, a security * fix, or similar change not specifically noted here. Bumps should be limited * to one per day / a couple per week except for security fixes. * * The approved way to obtain this from a shell script is: * awk '/^\#define[[:space:]]*__FreeBSD_version/ {print $3}' * Other methods to parse this file may work, but are not guaranteed against * future changes. The above script works back to FreeBSD 3.x when this macro * was introduced. This number is propagated to other places needing it that * cannot include sys/param.h and should only be updated here. */ #undef __FreeBSD_version -#define __FreeBSD_version 1500029 +#define __FreeBSD_version 1500030 /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(_WANT_P_OSREL) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_CK_SUPERBLOCK 1300000 #define P_OSREL_CK_INODE 1300005 #define P_OSREL_POWERPC_NEW_AUX_ARGS 1300070 #define P_OSREL_TIDPID 1400079 #define P_OSREL_ARM64_SPSR 1400084 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 255 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #if !defined(_KERNEL) && !defined(_STANDALONE) && !defined(LOCORE) /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define PNOLOCK 0x400 /* OR'd with pri to allow sleeping w/o a lock */ #define PRILASTFLAG 0x400 /* Last flag defined above */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) __align_down(x, y) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) __align_up(x, y) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). Since the intermediate * calculation is done with 64-bit precision, the maximum load average that can * be calculated is approximately 2^32 / FSCALE. * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11. This gives a maximum load avg of 2 million. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */