diff --git a/sys/arm/nvidia/tegra_sdhci.c b/sys/arm/nvidia/tegra_sdhci.c index 3bd9a9bb4632..fa6810b655c6 100644 --- a/sys/arm/nvidia/tegra_sdhci.c +++ b/sys/arm/nvidia/tegra_sdhci.c @@ -1,472 +1,476 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * SDHCI driver glue for NVIDIA Tegra family * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include "opt_mmccam.h" /* Tegra SDHOST controller vendor register definitions */ #define SDMMC_VENDOR_CLOCK_CNTRL 0x100 #define VENDOR_CLOCK_CNTRL_CLK_SHIFT 8 #define VENDOR_CLOCK_CNTRL_CLK_MASK 0xFF #define SDMMC_VENDOR_SYS_SW_CNTRL 0x104 #define SDMMC_VENDOR_CAP_OVERRIDES 0x10C #define SDMMC_VENDOR_BOOT_CNTRL 0x110 #define SDMMC_VENDOR_BOOT_ACK_TIMEOUT 0x114 #define SDMMC_VENDOR_BOOT_DAT_TIMEOUT 0x118 #define SDMMC_VENDOR_DEBOUNCE_COUNT 0x11C #define SDMMC_VENDOR_MISC_CNTRL 0x120 #define VENDOR_MISC_CTRL_ENABLE_SDR104 0x8 #define VENDOR_MISC_CTRL_ENABLE_SDR50 0x10 #define VENDOR_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 #define VENDOR_MISC_CTRL_ENABLE_DDR50 0x200 #define SDMMC_MAX_CURRENT_OVERRIDE 0x124 #define SDMMC_MAX_CURRENT_OVERRIDE_HI 0x128 #define SDMMC_VENDOR_CLK_GATE_HYSTERESIS_COUNT 0x1D0 #define SDMMC_VENDOR_PHWRESET_VAL0 0x1D4 #define SDMMC_VENDOR_PHWRESET_VAL1 0x1D8 #define SDMMC_VENDOR_PHWRESET_VAL2 0x1DC #define SDMMC_SDMEMCOMPPADCTRL_0 0x1E0 #define SDMMC_AUTO_CAL_CONFIG 0x1E4 #define SDMMC_AUTO_CAL_INTERVAL 0x1E8 #define SDMMC_AUTO_CAL_STATUS 0x1EC #define SDMMC_SDMMC_MCCIF_FIFOCTRL 0x1F4 #define SDMMC_TIMEOUT_WCOAL_SDMMC 0x1F8 /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-sdhci", 1}, {"nvidia,tegra210-sdhci", 1}, {NULL, 0}, }; struct tegra_sdhci_softc { device_t dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; u_int quirks; /* Chip specific quirks */ u_int caps; /* If we override SDHCI_CAPABILITIES */ uint32_t max_clk; /* Max possible freq */ clk_t clk; hwreset_t reset; gpio_pin_t gpio_power; struct sdhci_fdt_gpio *gpio; int force_card_present; struct sdhci_slot slot; }; static inline uint32_t RD4(struct tegra_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static uint8_t tegra_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_1(sc->mem_res, off)); } static uint16_t tegra_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_2(sc->mem_res, off)); } static uint32_t tegra_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; uint32_t val32; sc = device_get_softc(dev); val32 = bus_read_4(sc->mem_res, off); /* Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void tegra_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_1(sc->mem_res, off, val); } static void tegra_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_2(sc->mem_res, off, val); } static void tegra_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->mem_res, off, val); } static void tegra_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_intr(void *arg) { struct tegra_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); RD4(sc, SDHCI_INT_STATUS); } static int tegra_sdhci_get_ro(device_t brdev, device_t reqdev) { struct tegra_sdhci_softc *sc = device_get_softc(brdev); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool tegra_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct tegra_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static int tegra_sdhci_probe(device_t dev) { struct tegra_sdhci_softc *sc; phandle_t node; pcell_t cid; const struct ofw_compat_data *cd; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); cd = ofw_bus_search_compatible(dev, compat_data); if (cd->ocd_data == 0) return (ENXIO); node = ofw_bus_get_node(dev); device_set_desc(dev, "Tegra SDHCI controller"); /* Allow dts to patch quirks, slots, and max-frequency. */ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0) sc->quirks = cid; if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0) sc->max_clk = cid; return (BUS_PROBE_DEFAULT); } static int tegra_sdhci_attach(device_t dev) { struct tegra_sdhci_softc *sc; int rid, rv; uint64_t freq; phandle_t node, prop; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); rv = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); rv = ENXIO; goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sdhci", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sdhci' reset\n"); goto fail; } rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot reset 'sdhci' reset\n"); goto fail; } gpio_pin_get_by_ofw_property(sc->dev, node, "power-gpios", &sc->gpio_power); if (OF_hasprop(node, "assigned-clocks")) { rv = clk_set_assigned(sc->dev, node); if (rv != 0) { device_printf(dev, "Cannot set assigned clocks\n"); goto fail; } } rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get clock\n"); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock\n"); goto fail; } rv = clk_set_freq(sc->clk, 48000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock\n"); } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } DELAY(4000); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot unreset 'sdhci' reset\n"); goto fail; } if (bootverbose) device_printf(dev, " Base MMC clock: %jd\n", (uintmax_t)freq); /* Fill slot information. */ sc->max_clk = (int)freq; sc->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_MISSING_CAPS; /* Limit real slot capabilities. */ sc->caps = RD4(sc, SDHCI_CAPABILITIES); if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } if (OF_hasprop(node, "non-removable")) sc->force_card_present = 1; /* * Clear clock field, so SDHCI driver uses supplied frequency. * in sc->slot.max_clk */ sc->caps &= ~SDHCI_CLOCK_V3_BASE_MASK; sc->slot.quirks = sc->quirks; sc->slot.max_clk = sc->max_clk; sc->slot.caps = sc->caps; if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, tegra_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); rv = ENXIO; goto fail; } rv = sdhci_init_slot(dev, &sc->slot, 0); if (rv != 0) { goto fail; } sc->gpio = sdhci_fdt_gpio_setup(sc->dev, &sc->slot); bus_identify_children(dev); bus_attach_children(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->gpio != NULL) sdhci_fdt_gpio_teardown(sc->gpio); if (sc->intr_cookie != NULL) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->gpio_power != NULL) gpio_pin_release(sc->gpio_power); if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra_sdhci_detach(device_t dev) { struct tegra_sdhci_softc *sc = device_get_softc(dev); struct sdhci_slot *slot = &sc->slot; + int error; + + error = bus_detach_children(dev); + if (error != 0) + return (error); - bus_detach_children(dev); sdhci_fdt_gpio_teardown(sc->gpio); clk_release(sc->clk); bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sdhci_cleanup_slot(slot); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); return (0); } static device_method_t tegra_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_sdhci_probe), DEVMETHOD(device_attach, tegra_sdhci_attach), DEVMETHOD(device_detach, tegra_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, tegra_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, tegra_sdhci_read_1), DEVMETHOD(sdhci_read_2, tegra_sdhci_read_2), DEVMETHOD(sdhci_read_4, tegra_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, tegra_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, tegra_sdhci_write_1), DEVMETHOD(sdhci_write_2, tegra_sdhci_write_2), DEVMETHOD(sdhci_write_4, tegra_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, tegra_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present, tegra_sdhci_get_card_present), DEVMETHOD_END }; static DEFINE_CLASS_0(sdhci, tegra_sdhci_driver, tegra_sdhci_methods, sizeof(struct tegra_sdhci_softc)); DRIVER_MODULE(sdhci_tegra, simplebus, tegra_sdhci_driver, NULL, NULL); SDHCI_DEPEND(sdhci_tegra); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci); #endif diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c index 9fc3f414477b..ea8f9c18490e 100644 --- a/sys/arm/ti/cpsw/if_cpsw.c +++ b/sys/arm/ti/cpsw/if_cpsw.c @@ -1,3009 +1,3012 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Damjan Marion * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * TI Common Platform Ethernet Switch (CPSW) Driver * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. * * This controller is documented in the AM335x Technical Reference * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. * * It is basically a single Ethernet port (port 0) wired internally to * a 3-port store-and-forward switch connected to two independent * "sliver" controllers (port 1 and port 2). You can operate the * controller in a variety of different ways by suitably configuring * the slivers and the Address Lookup Engine (ALE) that routes packets * between the ports. * * This code was developed and tested on a BeagleBone with * an AM335x SoC. */ #include #include "opt_cpsw.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include #include #include #include #include #include #ifdef CPSW_ETHERSWITCH #include #include "etherswitch_if.h" #endif #include "if_cpswreg.h" #include "if_cpswvar.h" #include "miibus_if.h" /* Device probe/attach/detach. */ static int cpsw_probe(device_t); static int cpsw_attach(device_t); static int cpsw_detach(device_t); static int cpswp_probe(device_t); static int cpswp_attach(device_t); static int cpswp_detach(device_t); static phandle_t cpsw_get_node(device_t, device_t); /* Device Init/shutdown. */ static int cpsw_shutdown(device_t); static void cpswp_init(void *); static void cpswp_init_locked(void *); static void cpswp_stop_locked(struct cpswp_softc *); /* Device Suspend/Resume. */ static int cpsw_suspend(device_t); static int cpsw_resume(device_t); /* Ioctl. */ static int cpswp_ioctl(if_t, u_long command, caddr_t data); static int cpswp_miibus_readreg(device_t, int phy, int reg); static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); static void cpswp_miibus_statchg(device_t); /* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); static void cpsw_rx_enqueue(struct cpsw_softc *); static void cpswp_start(if_t); static void cpsw_intr_tx(void *); static void cpswp_tx_enqueue(struct cpswp_softc *); static int cpsw_tx_dequeue(struct cpsw_softc *); /* Misc interrupts and watchdog. */ static void cpsw_intr_rx_thresh(void *); static void cpsw_intr_misc(void *); static void cpswp_tick(void *); static void cpswp_ifmedia_sts(if_t, struct ifmediareq *); static int cpswp_ifmedia_upd(if_t); static void cpsw_tx_watchdog(void *); /* ALE support */ static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); static void cpsw_ale_dump_table(struct cpsw_softc *); static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, int); static int cpswp_ale_update_addresses(struct cpswp_softc *, int); /* Statistics and sysctls. */ static void cpsw_add_sysctls(struct cpsw_softc *); static void cpsw_stats_collect(struct cpsw_softc *); static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); #ifdef CPSW_ETHERSWITCH static etherswitch_info_t *cpsw_getinfo(device_t); static int cpsw_getport(device_t, etherswitch_port_t *); static int cpsw_setport(device_t, etherswitch_port_t *); static int cpsw_getconf(device_t, etherswitch_conf_t *); static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_readreg(device_t, int); static int cpsw_writereg(device_t, int, int); static int cpsw_readphy(device_t, int, int); static int cpsw_writephy(device_t, int, int, int); #endif /* * Arbitrary limit on number of segments in an mbuf to be transmitted. * Packets with more segments than this will be defragmented before * they are queued. */ #define CPSW_TXFRAGS 16 /* Shared resources. */ static device_method_t cpsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpsw_probe), DEVMETHOD(device_attach, cpsw_attach), DEVMETHOD(device_detach, cpsw_detach), DEVMETHOD(device_shutdown, cpsw_shutdown), DEVMETHOD(device_suspend, cpsw_suspend), DEVMETHOD(device_resume, cpsw_resume), /* Bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, cpsw_get_node), #ifdef CPSW_ETHERSWITCH /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), DEVMETHOD(etherswitch_readreg, cpsw_readreg), DEVMETHOD(etherswitch_writereg, cpsw_writereg), DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), DEVMETHOD(etherswitch_getport, cpsw_getport), DEVMETHOD(etherswitch_setport, cpsw_setport), DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), DEVMETHOD(etherswitch_getconf, cpsw_getconf), #endif DEVMETHOD_END }; static driver_t cpsw_driver = { "cpswss", cpsw_methods, sizeof(struct cpsw_softc), }; DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); /* Port/Slave resources. */ static device_method_t cpswp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpswp_probe), DEVMETHOD(device_attach, cpswp_attach), DEVMETHOD(device_detach, cpswp_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), DEVMETHOD_END }; static driver_t cpswp_driver = { "cpsw", cpswp_methods, sizeof(struct cpswp_softc), }; #ifdef CPSW_ETHERSWITCH DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); #endif DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); MODULE_DEPEND(cpsw, miibus, 1, 1, 1); #ifdef CPSW_ETHERSWITCH static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; #endif static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; static struct resource_spec irq_res_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct { void (*cb)(void *); } cpsw_intr_cb[] = { { cpsw_intr_rx_thresh }, { cpsw_intr_rx }, { cpsw_intr_tx }, { cpsw_intr_misc }, }; /* Number of entries here must match size of stats * array in struct cpswp_softc. */ static struct cpsw_stat { int reg; char *oid; } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { {0x00, "GoodRxFrames"}, {0x04, "BroadcastRxFrames"}, {0x08, "MulticastRxFrames"}, {0x0C, "PauseRxFrames"}, {0x10, "RxCrcErrors"}, {0x14, "RxAlignErrors"}, {0x18, "OversizeRxFrames"}, {0x1c, "RxJabbers"}, {0x20, "ShortRxFrames"}, {0x24, "RxFragments"}, {0x30, "RxOctets"}, {0x34, "GoodTxFrames"}, {0x38, "BroadcastTxFrames"}, {0x3c, "MulticastTxFrames"}, {0x40, "PauseTxFrames"}, {0x44, "DeferredTxFrames"}, {0x48, "CollisionsTxFrames"}, {0x4c, "SingleCollisionTxFrames"}, {0x50, "MultipleCollisionTxFrames"}, {0x54, "ExcessiveCollisions"}, {0x58, "LateCollisions"}, {0x5c, "TxUnderrun"}, {0x60, "CarrierSenseErrors"}, {0x64, "TxOctets"}, {0x68, "RxTx64OctetFrames"}, {0x6c, "RxTx65to127OctetFrames"}, {0x70, "RxTx128to255OctetFrames"}, {0x74, "RxTx256to511OctetFrames"}, {0x78, "RxTx512to1024OctetFrames"}, {0x7c, "RxTx1024upOctetFrames"}, {0x80, "NetOctets"}, {0x84, "RxStartOfFrameOverruns"}, {0x88, "RxMiddleOfFrameOverruns"}, {0x8c, "RxDmaOverruns"} }; /* * Basic debug support. */ static void cpsw_debugf_head(const char *funcname) { int t = (int)(time_second % (24 * 60 * 60)); printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); } static void cpsw_debugf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } #define CPSW_DEBUGF(_sc, a) do { \ if ((_sc)->debug) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) /* * Locking macros */ #define CPSW_TX_LOCK(sc) do { \ mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->tx.lock); \ } while (0) #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_PORT_LOCK(_sc) do { \ mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ mtx_lock(&(_sc)->lock); \ } while (0) #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) /* * Read/Write macros */ #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) #define cpsw_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_reg), (_val)) #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) #define cpsw_cpdma_bd_paddr(sc, slot) \ BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) #define cpsw_cpdma_read_bd(sc, slot, val) \ bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd(sc, slot, val) \ bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ bus_write_2(sc->mem_res, slot->bd_offset + 14, val) #define cpsw_cpdma_read_bd_flags(sc, slot) \ bus_read_2(sc->mem_res, slot->bd_offset + 14) #define cpsw_write_hdp_slot(sc, queue, slot) \ cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) #define cpsw_read_cp(sc, queue) \ cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) #define cpsw_write_cp(sc, queue, val) \ cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) #define cpsw_write_cp_slot(sc, queue, slot) \ cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) #if 0 /* XXX temporary function versions for debugging. */ static void cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t reg = queue->hdp_offset; uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); cpsw_write_4(sc, reg, v); } static void cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); cpsw_write_cp(sc, queue, v); } #endif /* * Expanded dump routines for verbose debugging. */ static void cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", "Port0"}; struct cpsw_cpdma_bd bd; const char *sep; int i; cpsw_cpdma_read_bd(sc, slot, &bd); printf("BD Addr : 0x%08x Next : 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); printf(" Flags: "); sep = ""; for (i = 0; i < 16; ++i) { if (bd.flags & (1 << (15 - i))) { printf("%s%s", sep, flags[i]); sep = ","; } } printf("\n"); if (slot->mbuf) { printf(" Ether: %14D\n", (char *)(slot->mbuf->m_data), " "); printf(" Packet: %16D\n", (char *)(slot->mbuf->m_data) + 14, " "); } } #define CPSW_DUMP_SLOT(cs, slot) do { \ IF_DEBUG(sc) { \ cpsw_dump_slot(sc, slot); \ } \ } while (0) static void cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) { struct cpsw_slot *slot; int i = 0; int others = 0; STAILQ_FOREACH(slot, q, next) { if (i > CPSW_TXFRAGS) ++others; else cpsw_dump_slot(sc, slot); ++i; } if (others) printf(" ... and %d more.\n", others); printf("\n"); } #define CPSW_DUMP_QUEUE(sc, q) do { \ IF_DEBUG(sc) { \ cpsw_dump_queue(sc, q); \ } \ } while (0) static void cpsw_init_slots(struct cpsw_softc *sc) { struct cpsw_slot *slot; int i; STAILQ_INIT(&sc->avail); /* Put the slot descriptors onto the global avail list. */ for (i = 0; i < nitems(sc->_slots); i++) { slot = &sc->_slots[i]; slot->bd_offset = cpsw_cpdma_bd_offset(i); STAILQ_INSERT_TAIL(&sc->avail, slot, next); } } static int cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) { const int max_slots = nitems(sc->_slots); struct cpsw_slot *slot; int i; if (requested < 0) requested = max_slots; for (i = 0; i < requested; ++i) { slot = STAILQ_FIRST(&sc->avail); if (slot == NULL) return (0); if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { device_printf(sc->dev, "failed to create dmamap\n"); return (ENOMEM); } STAILQ_REMOVE_HEAD(&sc->avail, next); STAILQ_INSERT_TAIL(&queue->avail, slot, next); ++queue->avail_queue_len; ++queue->queue_slots; } return (0); } static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { int error __diagused; if (slot->dmamap) { if (slot->mbuf) bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); KASSERT(error == 0, ("Mapping still active")); slot->dmamap = NULL; } if (slot->mbuf) { m_freem(slot->mbuf); slot->mbuf = NULL; } } static void cpsw_reset(struct cpsw_softc *sc) { int i; callout_stop(&sc->watchdog.callout); /* Reset RMII/RGMII wrapper. */ cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) ; /* Disable TX and RX interrupts for all cores. */ for (i = 0; i < 3; ++i) { cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); } /* Reset CPSW subsystem. */ cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) ; /* Reset Sliver port 1 and 2 */ for (i = 0; i < 2; i++) { /* Reset */ cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) ; } /* Reset DMA controller. */ cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) ; /* Disable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); /* Clear all queues. */ for (i = 0; i < 8; i++) { cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); } /* Clear all interrupt Masks */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } static void cpsw_init(struct cpsw_softc *sc) { struct cpsw_slot *slot; uint32_t reg; /* Disable the interrupt pacing. */ reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); /* Clear ALE */ cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); /* Enable ALE */ reg = CPSW_ALE_CTL_ENABLE; if (sc->dualemac) reg |= CPSW_ALE_CTL_VLAN_AWARE; cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); /* Set Host Port Mapping. */ cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); /* Initialize ALE: set host port to forwarding(3). */ cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpsw_write_4(sc, CPSW_SS_PTYPE, 0); /* Enable statistics for ports 0, 1 and 2 */ cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); /* Turn off flow control. */ cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); /* Make IP hdr aligned with 4 */ cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); /* Initialize RX Buffer Descriptors */ cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); /* Enable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); /* Enable Interrupts for core 0 */ cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); /* Enable host Error Interrupt */ cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); /* Enable interrupts for RX and TX on Channel 0 */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); /* Select MII in GMII_SEL, Internal Delay mode */ //ti_scm_reg_write_4(0x650, 0); /* Initialize active queues. */ slot = STAILQ_FIRST(&sc->tx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->tx, slot); slot = STAILQ_FIRST(&sc->rx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->rx, slot); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); /* Activate network interface. */ sc->rx.running = 1; sc->tx.running = 1; sc->watchdog.timer = 0; callout_init(&sc->watchdog.callout, 0); callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * Device Probe, Attach, Detach. * */ static int cpsw_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,cpsw")) return (ENXIO); device_set_desc(dev, "3-port Switch Ethernet Subsystem"); return (BUS_PROBE_DEFAULT); } static int cpsw_intr_attach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (bus_setup_intr(sc->dev, sc->irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { return (-1); } } return (0); } static void cpsw_intr_detach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (sc->ih_cookie[i]) { bus_teardown_intr(sc->dev, sc->irq_res[i], sc->ih_cookie[i]); } } } static int cpsw_get_fdt_data(struct cpsw_softc *sc, int port) { char *name; int len, phy, vlan; pcell_t phy_id[3], vlan_id; phandle_t child; unsigned long mdio_child_addr; /* Find any slave with phy-handle/phy_id */ phy = -1; vlan = -1; for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { if (OF_getprop_alloc(child, "name", (void **)&name) < 0) continue; if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { OF_prop_free(name); continue; } OF_prop_free(name); if (mdio_child_addr != slave_mdio_addr[port] && mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) continue; if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ /* Users with old DTB will have phy_id instead */ phy = -1; len = OF_getproplen(child, "phy_id"); if (len / sizeof(pcell_t) == 2) { /* Get phy address from fdt */ if (OF_getencprop(child, "phy_id", phy_id, len) > 0) phy = phy_id[1]; } } len = OF_getproplen(child, "dual_emac_res_vlan"); if (len / sizeof(pcell_t) == 1) { /* Get phy address from fdt */ if (OF_getencprop(child, "dual_emac_res_vlan", &vlan_id, len) > 0) { vlan = vlan_id; } } break; } if (phy == -1) return (ENXIO); sc->port[port].phy = phy; sc->port[port].vlan = vlan; return (0); } static int cpsw_attach(device_t dev) { int error, i; struct cpsw_softc *sc; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); getbinuptime(&sc->attach_uptime); if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, sizeof(sc->active_slave)) <= 0) { sc->active_slave = 0; } if (sc->active_slave > 1) sc->active_slave = 1; if (OF_hasprop(sc->node, "dual_emac")) sc->dualemac = 1; for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; if (cpsw_get_fdt_data(sc, i) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } } /* Initialize mutexes */ mtx_init(&sc->tx.lock, device_get_nameunit(dev), "cpsw TX lock", MTX_DEF); mtx_init(&sc->rx.lock, device_get_nameunit(dev), "cpsw RX lock", MTX_DEF); /* Allocate IRQ resources */ error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); if (error) { device_printf(dev, "could not allocate IRQ resources\n"); cpsw_detach(dev); return (ENXIO); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(sc->dev, "failed to allocate memory resource\n"); cpsw_detach(dev); return (ENXIO); } reg = cpsw_read_4(sc, CPSW_SS_IDVER); device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); return (error); } /* Allocate a NULL buffer for padding. */ sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); cpsw_init_slots(sc); /* Allocate slots to TX and RX queues. */ STAILQ_INIT(&sc->rx.avail); STAILQ_INIT(&sc->rx.active); STAILQ_INIT(&sc->tx.avail); STAILQ_INIT(&sc->tx.active); // For now: 128 slots to TX, rest to RX. // XXX TODO: start with 32/64 and grow dynamically based on demand. if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { device_printf(dev, "failed to allocate dmamaps\n"); cpsw_detach(dev); return (ENOMEM); } device_printf(dev, "Initial queue size TX=%d RX=%d\n", sc->tx.queue_slots, sc->rx.queue_slots); sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); if (cpsw_intr_attach(sc) == -1) { device_printf(dev, "failed to setup interrupts\n"); cpsw_detach(dev); return (ENXIO); } #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) cpsw_vgroups[i].vid = -1; #endif /* Reset the controller. */ cpsw_reset(sc); cpsw_init(sc); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; sc->port[i].dev = device_add_child(dev, "cpsw", i); if (sc->port[i].dev == NULL) { cpsw_detach(dev); return (ENXIO); } } bus_identify_children(dev); bus_attach_children(dev); return (0); } static int cpsw_detach(device_t dev) { struct cpsw_softc *sc; int error, i; error = bus_generic_detach(dev); if (error != 0) return (error); sc = device_get_softc(dev); if (device_is_attached(dev)) { callout_stop(&sc->watchdog.callout); callout_drain(&sc->watchdog.callout); } /* Stop and release all interrupts */ cpsw_intr_detach(sc); /* Free dmamaps and mbufs */ for (i = 0; i < nitems(sc->_slots); ++i) cpsw_free_slot(sc, &sc->_slots[i]); /* Free null padding buffer. */ if (sc->nullpad) free(sc->nullpad, M_DEVBUF); /* Free DMA tag */ if (sc->mbuf_dtag) { error = bus_dma_tag_destroy(sc->mbuf_dtag); KASSERT(error == 0, ("Unable to destroy DMA tag")); } /* Free IO memory handler */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resources(dev, irq_res_spec, sc->irq_res); /* Destroy mutexes */ mtx_destroy(&sc->rx.lock); mtx_destroy(&sc->tx.lock); return (0); } static phandle_t cpsw_get_node(device_t bus, device_t dev) { /* Share controller node with port device. */ return (ofw_bus_get_node(bus)); } static int cpswp_probe(device_t dev) { if (device_get_unit(dev) > 1) { device_printf(dev, "Only two ports are supported.\n"); return (ENXIO); } device_set_desc(dev, "Ethernet Switch Port"); return (BUS_PROBE_DEFAULT); } static int cpswp_attach(device_t dev) { int error; if_t ifp; struct cpswp_softc *sc; uint32_t reg; uint8_t mac_addr[ETHER_ADDR_LEN]; phandle_t opp_table; struct syscon *syscon; sc = device_get_softc(dev); sc->dev = dev; sc->pdev = device_get_parent(dev); sc->swsc = device_get_softc(sc->pdev); sc->unit = device_get_unit(dev); sc->phy = sc->swsc->port[sc->unit].phy; sc->vlan = sc->swsc->port[sc->unit].vlan; if (sc->swsc->dualemac && sc->vlan == -1) sc->vlan = sc->unit + 1; if (sc->unit == 0) { sc->physel = MDIOUSERPHYSEL0; sc->phyaccess = MDIOUSERACCESS0; } else { sc->physel = MDIOUSERPHYSEL1; sc->phyaccess = MDIOUSERACCESS1; } mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", MTX_DEF); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if_initname(ifp, device_get_name(sc->dev), sc->unit); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setinitfn(ifp, cpswp_init); if_setstartfn(ifp, cpswp_start); if_setioctlfn(ifp, cpswp_ioctl); if_setsendqlen(ifp, sc->swsc->tx.queue_slots); if_setsendqready(ifp); /* FIXME: For now; Go and kidnap syscon from opp-table */ /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ opp_table = OF_finddevice("/opp-table"); if (opp_table == -1) { device_printf(dev, "Cant find /opp-table\n"); cpswp_detach(dev); return (ENXIO); } if (!OF_hasprop(opp_table, "syscon")) { device_printf(dev, "/opp-table doesnt have required syscon property\n"); cpswp_detach(dev); return (ENXIO); } if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { device_printf(dev, "Failed to get syscon\n"); cpswp_detach(dev); return (ENXIO); } /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); mac_addr[0] = reg & 0xFF; mac_addr[1] = (reg >> 8) & 0xFF; mac_addr[2] = (reg >> 16) & 0xFF; mac_addr[3] = (reg >> 24) & 0xFF; /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); mac_addr[4] = reg & 0xFF; mac_addr[5] = (reg >> 8) & 0xFF; error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); cpswp_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Select PHY and enable interrupts */ cpsw_write_4(sc->swsc, sc->physel, MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); ether_ifattach(sc->ifp, mac_addr); callout_init(&sc->mii_callout, 0); return (0); } static int cpswp_detach(device_t dev) { struct cpswp_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_PORT_LOCK(sc); cpswp_stop_locked(sc); CPSW_PORT_UNLOCK(sc); callout_drain(&sc->mii_callout); } - bus_generic_detach(dev); - if_free(sc->ifp); mtx_destroy(&sc->lock); return (0); } /* * * Init/Shutdown. * */ static int cpsw_ports_down(struct cpsw_softc *sc) { struct cpswp_softc *psc; if_t ifp1, ifp2; if (!sc->dualemac) return (1); psc = device_get_softc(sc->port[0].dev); ifp1 = psc->ifp; psc = device_get_softc(sc->port[1].dev); ifp2 = psc->ifp; if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0) return (1); return (0); } static void cpswp_init(void *arg) { struct cpswp_softc *sc = arg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); cpswp_init_locked(arg); CPSW_PORT_UNLOCK(sc); } static void cpswp_init_locked(void *arg) { #ifdef CPSW_ETHERSWITCH int i; #endif struct cpswp_softc *sc = arg; if_t ifp; uint32_t reg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); ifp = sc->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; getbinuptime(&sc->init_uptime); if (!sc->swsc->rx.running && !sc->swsc->tx.running) { /* Reset the controller. */ cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } /* Set Slave Mapping. */ cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 0x33221100); cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); /* Enable MAC RX/TX modules. */ /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg |= CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); /* Initialize ALE: set port to forwarding, initialize addrs */ cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpswp_ale_update_addresses(sc, 1); if (sc->swsc->dualemac) { /* Set Port VID. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), sc->vlan & 0xfff); cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) { if (cpsw_vgroups[i].vid != -1) continue; cpsw_vgroups[i].vid = sc->vlan; break; } #endif } mii_mediachg(sc->mii); callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } static int cpsw_shutdown(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static void cpsw_rx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_RX_LOCK(sc); CPSW_DEBUGF(sc, ("starting RX teardown")); sc->rx.teardown = 1; cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); CPSW_RX_UNLOCK(sc); while (sc->rx.running) { if (++i > 10) { device_printf(sc->dev, "Unable to cleanly shutdown receiver\n"); return; } DELAY(200); } if (!sc->rx.running) CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); } static void cpsw_tx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_TX_LOCK(sc); CPSW_DEBUGF(sc, ("starting TX teardown")); /* Start the TX queue teardown if queue is not empty. */ if (STAILQ_FIRST(&sc->tx.active) != NULL) cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); else sc->tx.teardown = 1; cpsw_tx_dequeue(sc); while (sc->tx.running && ++i < 10) { DELAY(200); cpsw_tx_dequeue(sc); } if (sc->tx.running) { device_printf(sc->dev, "Unable to cleanly shutdown transmitter\n"); } CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", i, sc->tx.active_queue_len)); CPSW_TX_UNLOCK(sc); } static void cpswp_stop_locked(struct cpswp_softc *sc) { if_t ifp; uint32_t reg; ifp = sc->ifp; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); /* Stop ticker */ callout_stop(&sc->mii_callout); /* Tear down the RX/TX queues. */ if (cpsw_ports_down(sc->swsc)) { cpsw_rx_teardown(sc->swsc); cpsw_tx_teardown(sc->swsc); } /* Stop MAC RX/TX modules. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg &= ~CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); if (cpsw_ports_down(sc->swsc)) { /* Capture stats before we reset controller. */ cpsw_stats_collect(sc->swsc); cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } } /* * Suspend/Resume. */ static int cpsw_suspend(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static int cpsw_resume(device_t dev) { struct cpsw_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); return (0); } /* * * IOCTL * */ static void cpsw_set_promisc(struct cpswp_softc *sc, int set) { uint32_t reg; /* * Enabling promiscuous mode requires ALE_BYPASS to be enabled. * That disables the ALE forwarding logic and causes every * packet to be sent only to the host port. In bypass mode, * the ALE processes host port transmit packets the same as in * normal mode. */ reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); reg &= ~CPSW_ALE_CTL_BYPASS; if (set) reg |= CPSW_ALE_CTL_BYPASS; cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); } static void cpsw_set_allmulti(struct cpswp_softc *sc, int set) { if (set) { printf("All-multicast mode unimplemented\n"); } } static int cpswp_ioctl(if_t ifp, u_long command, caddr_t data) { struct cpswp_softc *sc; struct ifreq *ifr; int error; uint32_t changed; error = 0; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (command) { case SIOCSIFCAP: changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); else if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); } error = 0; break; case SIOCSIFFLAGS: CPSW_PORT_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { changed = if_getflags(ifp) ^ sc->if_flags; CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); if (changed & IFF_PROMISC) cpsw_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC); if (changed & IFF_ALLMULTI) cpsw_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI); } else { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: starting up")); cpswp_init_locked(sc); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); cpswp_stop_locked(sc); } sc->if_flags = if_getflags(ifp); CPSW_PORT_UNLOCK(sc); break; case SIOCADDMULTI: cpswp_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: /* Ugh. DELMULTI doesn't provide the specific address being removed, so the best we can do is remove everything and rebuild it all. */ cpswp_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * * MIIBUS * */ static int cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) { uint32_t r, retries = CPSW_MIIBUS_RETRIES; while (--retries) { r = cpsw_read_4(sc, reg); if ((r & MDIO_PHYACCESS_GO) == 0) return (1); DELAY(CPSW_MIIBUS_DELAY); } return (0); } static int cpswp_miibus_readreg(device_t dev, int phy, int reg) { struct cpswp_softc *sc; uint32_t cmd, r; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to read\n"); return (0); } /* Set GO, reg, phy */ cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during read\n"); return (0); } r = cpsw_read_4(sc->swsc, sc->phyaccess); if ((r & MDIO_PHYACCESS_ACK) == 0) { device_printf(dev, "Failed to read from PHY.\n"); r = 0; } return (r & 0xFFFF); } static int cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct cpswp_softc *sc; uint32_t cmd; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to write\n"); return (0); } /* Set GO, WRITE, reg, phy, and value */ cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during write\n"); return (0); } return (0); } static void cpswp_miibus_statchg(device_t dev) { struct cpswp_softc *sc; uint32_t mac_control, reg; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); reg = CPSW_SL_MACCONTROL(sc->unit); mac_control = cpsw_read_4(sc->swsc, reg); mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: mac_control |= CPSW_SL_MACTL_GIG; break; case IFM_100_TX: mac_control |= CPSW_SL_MACTL_IFCTL_A; break; } if (sc->mii->mii_media_active & IFM_FDX) mac_control |= CPSW_SL_MACTL_FULLDUPLEX; cpsw_write_4(sc->swsc, reg, mac_control); } /* * * Transmit/Receive Packets. * */ static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); if (sc->rx.teardown) { sc->rx.running = 0; sc->rx.teardown = 0; cpsw_write_cp(sc, &sc->rx, 0xfffffffc); } received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static struct mbuf * cpsw_rx_dequeue(struct cpsw_softc *sc) { int nsegs, port, removed; struct cpsw_cpdma_bd bd; struct cpsw_slot *last, *slot; struct cpswp_softc *psc; struct mbuf *m, *m0, *mb_head, *mb_tail; uint16_t m0_flags; nsegs = 0; m0 = NULL; last = NULL; mb_head = NULL; mb_tail = NULL; removed = 0; /* Pull completed packets off hardware RX queue. */ while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { cpsw_cpdma_read_bd(sc, slot, &bd); /* * Stop on packets still in use by hardware, but do not stop * on packets with the teardown complete flag, they will be * discarded later. */ if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == CPDMA_BD_OWNER) break; last = slot; ++removed; STAILQ_REMOVE_HEAD(&sc->rx.active, next); STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m = slot->mbuf; slot->mbuf = NULL; if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(sc, ("RX teardown is complete")); m_freem(m); sc->rx.running = 0; sc->rx.teardown = 0; break; } port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; KASSERT(port >= 0 && port <= 1, ("patcket received with invalid port: %d", port)); psc = device_get_softc(sc->port[port].dev); /* Set up mbuf */ m->m_data += bd.bufoff; m->m_len = bd.buflen; if (bd.flags & CPDMA_BD_SOP) { m->m_pkthdr.len = bd.pktlen; m->m_pkthdr.rcvif = psc->ifp; m->m_flags |= M_PKTHDR; m0_flags = bd.flags; m0 = m; } nsegs++; m->m_next = NULL; m->m_nextpkt = NULL; if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { if (m0_flags & CPDMA_BD_PASS_CRC) m_adj(m0, -ETHER_CRC_LEN); m0_flags = 0; m0 = NULL; if (nsegs > sc->rx.longest_chain) sc->rx.longest_chain = nsegs; nsegs = 0; } if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ if ((bd.flags & (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == CPDMA_BD_SOP) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } } if (STAILQ_FIRST(&sc->rx.active) != NULL && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->rx, STAILQ_FIRST(&sc->rx.active)); sc->rx.queue_restart++; } /* Add mbuf to packet list to be returned. */ if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { mb_tail->m_nextpkt = m; } else if (mb_tail != NULL) { mb_tail->m_next = m; } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { if (bootverbose) printf( "%s: %s: discanding fragment packet w/o header\n", __func__, if_name(psc->ifp)); m_freem(m); continue; } else { mb_head = m; } mb_tail = m; } if (removed != 0) { cpsw_write_cp_slot(sc, &sc->rx, last); sc->rx.queue_removes += removed; sc->rx.avail_queue_len += removed; sc->rx.active_queue_len -= removed; if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); } return (mb_head); } static void cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; int error, nsegs, added = 0; /* Register new mbufs with hardware. */ first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (first_new_slot == NULL) first_new_slot = slot; if (slot->mbuf == NULL) { slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (slot->mbuf == NULL) { device_printf(sc->dev, "Unable to fill RX queue\n"); break; } slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; } error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); if (error != 0 || nsegs != 1) { device_printf(sc->dev, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); /* Create and submit new rx descriptor. */ if ((next = STAILQ_NEXT(slot, next)) != NULL) bd.next = cpsw_cpdma_bd_paddr(sc, next); else bd.next = 0; bd.bufptr = seg->ds_addr; bd.bufoff = 0; bd.buflen = MCLBYTES - 1; bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; cpsw_cpdma_write_bd(sc, slot, &bd); ++added; STAILQ_REMOVE_HEAD(&sc->rx.avail, next); STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); } if (added == 0 || first_new_slot == NULL) return; CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); /* Link new entries to hardware RX queue. */ if (last_old_slot == NULL) { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); } sc->rx.queue_adds += added; sc->rx.avail_queue_len -= added; sc->rx.active_queue_len += added; cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) sc->rx.max_active_queue_len = sc->rx.active_queue_len; } static void cpswp_start(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || sc->swsc->tx.running == 0) { return; } CPSW_TX_LOCK(sc->swsc); cpswp_tx_enqueue(sc); cpsw_tx_dequeue(sc->swsc); CPSW_TX_UNLOCK(sc->swsc); } static void cpsw_intr_tx(void *arg) { struct cpsw_softc *sc; sc = (struct cpsw_softc *)arg; CPSW_TX_LOCK(sc); if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) cpsw_write_cp(sc, &sc->tx, 0xfffffffc); cpsw_tx_dequeue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); CPSW_TX_UNLOCK(sc); } static void cpswp_tx_enqueue(struct cpswp_softc *sc) { bus_dma_segment_t segs[CPSW_TXFRAGS]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; struct mbuf *m0; int error, nsegs, seg, added = 0, padlen; /* Pull pending packets from IF queue and prep them for DMA. */ last = NULL; first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { m0 = if_dequeue(sc->ifp); if (m0 == NULL) break; slot->mbuf = m0; padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; if (padlen < 0) padlen = 0; else if (padlen > 0) m_append(slot->mbuf, padlen, sc->swsc->nullpad); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); /* If the packet is too fragmented, try to simplify. */ if (error == EFBIG || (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m0 = m_defrag(slot->mbuf, M_NOWAIT); if (m0 == NULL) { device_printf(sc->dev, "Can't defragment packet; dropping\n"); m_freem(slot->mbuf); } else { CPSW_DEBUGF(sc->swsc, ("Requeueing defragmented packet")); if_sendq_prepend(sc->ifp, m0); } slot->mbuf = NULL; continue; } if (error != 0) { device_printf(sc->dev, "%s: Can't setup DMA (error=%d), dropping packet\n", __func__, error); bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREWRITE); CPSW_DEBUGF(sc->swsc, ("Queueing TX packet: %d segments + %d pad bytes", nsegs, padlen)); if (first_new_slot == NULL) first_new_slot = slot; /* Link from the previous descriptor. */ if (last != NULL) cpsw_cpdma_write_bd_next(sc->swsc, last, slot); slot->ifp = sc->ifp; /* If there is only one segment, the for() loop * gets skipped and the single buffer gets set up * as both SOP and EOP. */ if (nsegs > 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; /* Start by setting up the first buffer. */ bd.bufptr = segs[0].ds_addr; bd.bufoff = 0; bd.buflen = segs[0].ds_len; bd.pktlen = m_length(slot->mbuf, NULL); bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; if (sc->swsc->dualemac) { bd.flags |= CPDMA_BD_TO_PORT; bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); } for (seg = 1; seg < nsegs; ++seg) { /* Save the previous buffer (which isn't EOP) */ cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); slot = STAILQ_FIRST(&sc->swsc->tx.avail); /* Setup next buffer (which isn't SOP) */ if (nsegs > seg + 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; bd.bufptr = segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = segs[seg].ds_len; bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER; } /* Save the final buffer. */ bd.flags |= CPDMA_BD_EOP; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); last = slot; added += nsegs; if (nsegs > sc->swsc->tx.longest_chain) sc->swsc->tx.longest_chain = nsegs; BPF_MTAP(sc->ifp, m0); } if (first_new_slot == NULL) return; /* Attach the list of new buffers to the hardware TX queue. */ if (last_old_slot != NULL && (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & CPDMA_BD_EOQ) == 0) { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, first_new_slot); } else { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } sc->swsc->tx.queue_adds += added; sc->swsc->tx.avail_queue_len -= added; sc->swsc->tx.active_queue_len += added; if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; } CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); } static int cpsw_tx_dequeue(struct cpsw_softc *sc) { struct cpsw_slot *slot, *last_removed_slot = NULL; struct cpsw_cpdma_bd bd; uint32_t flags, removed = 0; /* Pull completed buffers off the hardware TX queue. */ slot = STAILQ_FIRST(&sc->tx.active); while (slot != NULL) { flags = cpsw_cpdma_read_bd_flags(sc, slot); /* TearDown complete is only marked on the SOP for the packet. */ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { sc->tx.teardown = 1; } if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) break; /* Hardware is still using this packet. */ bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; if (slot->ifp) { if (sc->tx.teardown == 0) if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); } /* Dequeue any additional buffers used by this packet. */ while (slot != NULL && slot->mbuf == NULL) { STAILQ_REMOVE_HEAD(&sc->tx.active, next); STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); ++removed; last_removed_slot = slot; slot = STAILQ_FIRST(&sc->tx.active); } cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); /* Restart the TX queue if necessary. */ cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); if (slot != NULL && bd.next != 0 && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->tx, slot); sc->tx.queue_restart++; break; } } if (removed != 0) { sc->tx.queue_removes += removed; sc->tx.active_queue_len -= removed; sc->tx.avail_queue_len += removed; if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); } if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { CPSW_DEBUGF(sc, ("TX teardown is complete")); sc->tx.teardown = 0; sc->tx.running = 0; } return (removed); } /* * * Miscellaneous interrupts. * */ static void cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static void cpsw_intr_misc_host_error(struct cpsw_softc *sc) { uint32_t intstat; uint32_t dmastat; int txerr, rxerr, txchan, rxchan; printf("\n\n"); device_printf(sc->dev, "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); printf("\n\n"); intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); txerr = (dmastat >> 20) & 15; txchan = (dmastat >> 16) & 7; rxerr = (dmastat >> 12) & 15; rxchan = (dmastat >> 8) & 7; switch (txerr) { case 0: break; case 1: printf("SOP error on TX channel %d\n", txchan); break; case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); break; case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); break; case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); break; case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); break; case 6: printf("Packet length error on TX channel %d\n", txchan); break; default: printf("Unknown error on TX channel %d\n", txchan); break; } if (txerr != 0) { printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); cpsw_dump_queue(sc, &sc->tx.active); } switch (rxerr) { case 0: break; case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); break; case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); break; case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); break; case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); break; default: printf("Unknown RX error on RX channel %d\n", rxchan); break; } if (rxerr != 0) { printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); printf("CPSW_CPDMA_RX%d_CP=0x%x\n", rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); cpsw_dump_queue(sc, &sc->rx.active); } printf("\nALE Table\n"); cpsw_ale_dump_table(sc); // XXX do something useful here?? panic("CPSW HOST ERROR INTERRUPT"); // Suppress this interrupt in the future. cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); // The watchdog will probably reset the controller // in a little while. It will probably fail again. } static void cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); if (stat & CPSW_WR_C_MISC_EVNT_PEND) CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); if (stat & CPSW_WR_C_MISC_STAT_PEND) cpsw_stats_collect(sc); if (stat & CPSW_WR_C_MISC_HOST_PEND) cpsw_intr_misc_host_error(sc); if (stat & CPSW_WR_C_MISC_MDIOLINK) { cpsw_write_4(sc, MDIOLINKINTMASKED, cpsw_read_4(sc, MDIOLINKINTMASKED)); } if (stat & CPSW_WR_C_MISC_MDIOUSER) { CPSW_DEBUGF(sc, ("MDIO operation completed interrupt unimplemented")); } cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } /* * * Periodic Checks and Watchdog. * */ static void cpswp_tick(void *msc) { struct cpswp_softc *sc = msc; /* Check for media type change */ mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); cpswp_ifmedia_upd(sc->ifp); } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); } static void cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct cpswp_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CPSW_PORT_UNLOCK(sc); } static int cpswp_ifmedia_upd(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; CPSW_PORT_UNLOCK(sc); return (0); } static void cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) { struct cpswp_softc *psc; int i; cpsw_debugf_head("CPSW watchdog"); device_printf(sc->dev, "watchdog timeout\n"); printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); cpsw_dump_queue(sc, &sc->tx.active); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } } static void cpsw_tx_watchdog(void *msc) { struct cpsw_softc *sc; sc = msc; CPSW_TX_LOCK(sc); if (sc->tx.active_queue_len == 0 || !sc->tx.running) { sc->watchdog.timer = 0; /* Nothing to do. */ } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ } else if (cpsw_tx_dequeue(sc) > 0) { sc->watchdog.timer = 0; /* We just did something. */ } else { /* There was something to do but it didn't get done. */ ++sc->watchdog.timer; if (sc->watchdog.timer > 5) { sc->watchdog.timer = 0; ++sc->watchdog.resets; cpsw_tx_watchdog_full_reset(sc); } } sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; CPSW_TX_UNLOCK(sc); /* Schedule another timeout one second from now */ callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * ALE support routines. * */ static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static void cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; /* First four entries are link address and broadcast. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); } } } static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, uint8_t *mac) { int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3], ale_type; /* Find a matching entry or a free entry. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && (((ale_entry[0] >>24) & 0xFF) == mac[2]) && (((ale_entry[0] >>16) & 0xFF) == mac[3]) && (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (ENOMEM); i = free_index; } if (vlan != -1) ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; else ale_type = ALE_TYPE_ADDR << 28; /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ ale_entry[1] |= ALE_MCAST_FWD | ale_type; /* Set portmask [68:66] */ ale_entry[2] = (portmap & 7) << 2; cpsw_ale_write_entry(sc, i, ale_entry); return 0; } static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); switch (ALE_TYPE(ale_entry)) { case ALE_TYPE_VLAN: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); printf("\n"); break; case ALE_TYPE_ADDR: case ALE_TYPE_VLAN_ADDR: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", (ale_entry[1] >> 8) & 0xFF, (ale_entry[1] >> 0) & 0xFF, (ale_entry[0] >>24) & 0xFF, (ale_entry[0] >>16) & 0xFF, (ale_entry[0] >> 8) & 0xFF, (ale_entry[0] >> 0) & 0xFF); printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("port: %u ", ALE_PORTS(ale_entry)); printf("\n"); break; } } printf("\n"); } static u_int cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct cpswp_softc *sc = arg; uint32_t portmask; if (sc->swsc->dualemac) portmask = 1 << (sc->unit + 1) | 1 << 0; else portmask = 7; cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); return (1); } static int cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) { uint8_t *mac; uint32_t ale_entry[3], ale_type, portmask; if (sc->swsc->dualemac) { ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; portmask = 1 << (sc->unit + 1) | 1 << 0; } else { ale_type = ALE_TYPE_ADDR << 28; portmask = 7; } /* * Route incoming packets for our MAC address to Port 0 (host). * For simplicity, keep this entry at table index 0 for port 1 and * at index 2 for port 2 in the ALE. */ mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr); ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ ale_entry[2] = 0; /* port = 0 */ cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); /* Set outgoing MAC Address for slave port. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), mac[5] << 8 | mac[4]); /* Keep the broadcast address at table entry 1 (or 3). */ ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; ale_entry[2] = portmask << 2; cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); /* SIOCDELMULTI doesn't specify the particular address being removed, so we have to remove all and rebuild. */ if (purge) cpsw_ale_remove_all_mc_entries(sc->swsc); /* Set other multicast addrs desired. */ if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); return (0); } static int cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, int untag, int mcregflood, int mcunregflood) { int free_index, i, matching_index; uint32_t ale_entry[3]; free_index = matching_index = -1; /* Find a matching entry or a free entry. */ for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if (ALE_VLAN(ale_entry) == vlan) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (-1); i = free_index; } ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | (mcunregflood & 7) << 8 | (ports & 7); ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); return (0); } /* * * Statistics and Sysctls. * */ #if 0 static void cpsw_stats_dump(struct cpsw_softc *sc) { int i; uint32_t r; for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, (intmax_t)sc->shadow_stats[i], r, (intmax_t)sc->shadow_stats[i] + r)); } } #endif static void cpsw_stats_collect(struct cpsw_softc *sc) { int i; uint32_t r; CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); sc->shadow_stats[i] += r; cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); } } static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct cpsw_stat *stat; uint64_t result; sc = (struct cpsw_softc *)arg1; stat = &cpsw_stat_sysctls[oidp->oid_number]; result = sc->shadow_stats[oidp->oid_number]; result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); return (sysctl_handle_64(oidp, &result, 0, req)); } static int cpsw_stat_attached(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct bintime t; unsigned result; sc = (struct cpsw_softc *)arg1; getbinuptime(&t); bintime_sub(&t, &sc->attach_uptime); result = t.sec; return (sysctl_handle_int(oidp, &result, 0, req)); } static int cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) { int error; struct cpsw_softc *sc; uint32_t ctrl, intr_per_ms; sc = (struct cpsw_softc *)arg1; error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); if (error != 0 || req->newptr == NULL) return (error); ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); if (sc->coal_us == 0) { /* Disable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); return (0); } if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) sc->coal_us = CPSW_WR_C_IMAX_US_MAX; if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) sc->coal_us = CPSW_WR_C_IMAX_US_MIN; intr_per_ms = 1000 / sc->coal_us; /* Just to make sure... */ if (intr_per_ms > CPSW_WR_C_IMAX_MAX) intr_per_ms = CPSW_WR_C_IMAX_MAX; if (intr_per_ms < CPSW_WR_C_IMAX_MIN) intr_per_ms = CPSW_WR_C_IMAX_MIN; /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; /* Enable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); return (0); } static int cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *swsc; struct cpswp_softc *sc; struct bintime t; unsigned result; swsc = arg1; sc = device_get_softc(swsc->port[arg2].dev); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { getbinuptime(&t); bintime_sub(&t, &sc->init_uptime); result = t.sec; } else result = 0; return (sysctl_handle_int(oidp, &result, 0, req)); } static void cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", CTLFLAG_RD, &queue->queue_slots, 0, "Total buffers currently assigned to this queue"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", CTLFLAG_RD, &queue->active_queue_len, 0, "Buffers currently registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", CTLFLAG_RD, &queue->max_active_queue_len, 0, "Max value of activeBuffers since last driver reset"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", CTLFLAG_RD, &queue->avail_queue_len, 0, "Buffers allocated to this queue but not currently " "registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", CTLFLAG_RD, &queue->max_avail_queue_len, 0, "Max value of availBuffers since last driver reset"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", CTLFLAG_RD, &queue->queue_adds, 0, "Total buffers added to queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", CTLFLAG_RD, &queue->queue_removes, 0, "Total buffers removed from queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", CTLFLAG_RD, &queue->queue_restart, 0, "Total times the queue has been restarted"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", CTLFLAG_RD, &queue->longest_chain, 0, "Max buffers used for a single packet"); } static void cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", CTLFLAG_RD, &sc->watchdog.resets, 0, "Total number of watchdog resets"); } static void cpsw_add_sysctls(struct cpsw_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *stats_node, *queue_node, *node; struct sysctl_oid_list *parent, *stats_parent, *queue_parent; struct sysctl_oid_list *ports_parent, *port_parent; char port[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stat_attached, "IU", "Time since driver attach"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, cpsw_intr_coalesce, "IU", "minimum time between interrupts"); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); ports_parent = SYSCTL_CHILDREN(node); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; port[0] = '0' + i; port[1] = '\0'; node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Port Statistics"); port_parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, cpsw_stat_uptime, "IU", "Seconds since driver init"); } stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); stats_parent = SYSCTL_CHILDREN(stats_node); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { SYSCTL_ADD_PROC(ctx, stats_parent, i, cpsw_stat_sysctls[i].oid, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stats_sysctl, "IU", cpsw_stat_sysctls[i].oid); } queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); queue_parent = SYSCTL_CHILDREN(queue_node); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->tx); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->rx); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); cpsw_add_watchdog_sysctls(ctx, node, sc); } #ifdef CPSW_ETHERSWITCH static etherswitch_info_t etherswitch_info = { .es_nports = CPSW_PORTS + 1, .es_nvlangroups = CPSW_VLANS, .es_name = "TI Common Platform Ethernet Switch (CPSW)", .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, }; static etherswitch_info_t * cpsw_getinfo(device_t dev) { return (ðerswitch_info); } static int cpsw_getport(device_t dev, etherswitch_port_t *p) { int err; struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmediareq *ifmr; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); err = 0; sc = device_get_softc(dev); if (p->es_port == CPSW_CPU_PORT) { p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; } else { psc = device_get_softc(sc->port[p->es_port - 1].dev); err = ifmedia_ioctl(psc->ifp, &p->es_ifr, &psc->mii->mii_media, SIOCGIFMEDIA); } reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); p->es_pvid = reg & ETHERSWITCH_VID_MASK; reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (reg & ALE_PORTCTL_DROP_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; if (reg & ALE_PORTCTL_INGRESS) p->es_flags |= ETHERSWITCH_PORT_INGRESS; return (err); } static int cpsw_setport(device_t dev, etherswitch_port_t *p) { struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmedia *ifm; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); sc = device_get_softc(dev); if (p->es_pvid != 0) { cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), p->es_pvid & ETHERSWITCH_VID_MASK); } reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= ALE_PORTCTL_DROP_UNTAGGED; else reg &= ~ALE_PORTCTL_DROP_UNTAGGED; if (p->es_flags & ETHERSWITCH_PORT_INGRESS) reg |= ALE_PORTCTL_INGRESS; else reg &= ~ALE_PORTCTL_INGRESS; cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); /* CPU port does not allow media settings. */ if (p->es_port == CPSW_CPU_PORT) return (0); psc = device_get_softc(sc->port[p->es_port - 1].dev); ifm = &psc->mii->mii_media; return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static int cpsw_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i, vid; uint32_t ale_entry[3]; struct cpsw_softc *sc; sc = device_get_softc(dev); if (vg->es_vlangroup >= CPSW_VLANS) return (EINVAL); vg->es_vid = 0; vid = cpsw_vgroups[vg->es_vlangroup].vid; if (vid == -1) return (0); for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vid != ALE_VLAN(ale_entry)) continue; vg->es_fid = 0; vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); } return (0); } static void cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vlan != ALE_VLAN(ale_entry)) continue; ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); break; } } static int cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i; struct cpsw_softc *sc; sc = device_get_softc(dev); for (i = 0; i < CPSW_VLANS; i++) { /* Is this Vlan ID in use by another vlangroup ? */ if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) return (EINVAL); } if (vg->es_vid == 0) { if (cpsw_vgroups[vg->es_vlangroup].vid == -1) return (0); cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); cpsw_vgroups[vg->es_vlangroup].vid = -1; vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_vid = 0; return (0); } vg->es_vid &= ETHERSWITCH_VID_MASK; vg->es_member_ports &= CPSW_PORTS_MASK; vg->es_untagged_ports &= CPSW_PORTS_MASK; if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) return (EINVAL); cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, vg->es_untagged_ports, vg->es_member_ports, 0); return (0); } static int cpsw_readreg(device_t dev, int addr) { /* Not supported. */ return (0); } static int cpsw_writereg(device_t dev, int addr, int value) { /* Not supported. */ return (0); } static int cpsw_readphy(device_t dev, int phy, int reg) { /* Not supported. */ return (0); } static int cpsw_writephy(device_t dev, int phy, int reg, int data) { /* Not supported. */ return (0); } #endif diff --git a/sys/arm/ti/ti_spi.c b/sys/arm/ti/ti_spi.c index 26d6691e5bb5..fd0e0b085363 100644 --- a/sys/arm/ti/ti_spi.c +++ b/sys/arm/ti/ti_spi.c @@ -1,574 +1,577 @@ /*- * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static void ti_spi_intr(void *); static int ti_spi_detach(device_t); #undef TI_SPI_DEBUG #ifdef TI_SPI_DEBUG #define IRQSTATUSBITS \ "\020\1TX0_EMPTY\2TX0_UNDERFLOW\3RX0_FULL\4RX0_OVERFLOW" \ "\5TX1_EMPTY\6TX1_UNDERFLOW\7RX1_FULL\11TX2_EMPTY" \ "\12TX1_UNDERFLOW\13RX2_FULL\15TX3_EMPTY\16TX3_UNDERFLOW" \ "\17RX3_FULL\22EOW" #define CONFBITS \ "\020\1PHA\2POL\7EPOL\17DMAW\20DMAR\21DPE0\22DPE1\23IS" \ "\24TURBO\25FORCE\30SBE\31SBPOL\34FFEW\35FFER\36CLKG" #define STATBITS \ "\020\1RXS\2TXS\3EOT\4TXFFE\5TXFFF\6RXFFE\7RXFFFF" #define MODULCTRLBITS \ "\020\1SINGLE\2NOSPIEN\3SLAVE\4SYST\10MOA\11FDAA" #define CTRLBITS \ "\020\1ENABLED" static void ti_spi_printr(device_t dev) { int clk, conf, ctrl, div, i, j, wl; struct ti_spi_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = TI_SPI_READ(sc, MCSPI_SYSCONFIG); device_printf(dev, "SYSCONFIG: %#x\n", reg); reg = TI_SPI_READ(sc, MCSPI_SYSSTATUS); device_printf(dev, "SYSSTATUS: %#x\n", reg); reg = TI_SPI_READ(sc, MCSPI_IRQSTATUS); device_printf(dev, "IRQSTATUS: 0x%b\n", reg, IRQSTATUSBITS); reg = TI_SPI_READ(sc, MCSPI_IRQENABLE); device_printf(dev, "IRQENABLE: 0x%b\n", reg, IRQSTATUSBITS); reg = TI_SPI_READ(sc, MCSPI_MODULCTRL); device_printf(dev, "MODULCTRL: 0x%b\n", reg, MODULCTRLBITS); for (i = 0; i < sc->sc_numcs; i++) { ctrl = TI_SPI_READ(sc, MCSPI_CTRL_CH(i)); conf = TI_SPI_READ(sc, MCSPI_CONF_CH(i)); device_printf(dev, "CH%dCONF: 0x%b\n", i, conf, CONFBITS); if (conf & MCSPI_CONF_CLKG) { div = (conf >> MCSPI_CONF_CLK_SHIFT) & MCSPI_CONF_CLK_MSK; div |= ((ctrl >> MCSPI_CTRL_EXTCLK_SHIFT) & MCSPI_CTRL_EXTCLK_MSK) << 4; } else { div = 1; j = (conf >> MCSPI_CONF_CLK_SHIFT) & MCSPI_CONF_CLK_MSK; while (j-- > 0) div <<= 1; } clk = TI_SPI_GCLK / div; wl = ((conf >> MCSPI_CONF_WL_SHIFT) & MCSPI_CONF_WL_MSK) + 1; device_printf(dev, "wordlen: %-2d clock: %d\n", wl, clk); reg = TI_SPI_READ(sc, MCSPI_STAT_CH(i)); device_printf(dev, "CH%dSTAT: 0x%b\n", i, reg, STATBITS); device_printf(dev, "CH%dCTRL: 0x%b\n", i, ctrl, CTRLBITS); } reg = TI_SPI_READ(sc, MCSPI_XFERLEVEL); device_printf(dev, "XFERLEVEL: %#x\n", reg); } #endif static void ti_spi_set_clock(struct ti_spi_softc *sc, int ch, int freq) { uint32_t clkdiv, conf, div, extclk, reg; clkdiv = TI_SPI_GCLK / freq; if (clkdiv > MCSPI_EXTCLK_MSK) { extclk = 0; clkdiv = 0; div = 1; while (TI_SPI_GCLK / div > freq && clkdiv <= 0xf) { clkdiv++; div <<= 1; } conf = clkdiv << MCSPI_CONF_CLK_SHIFT; } else { extclk = clkdiv >> 4; clkdiv &= MCSPI_CONF_CLK_MSK; conf = MCSPI_CONF_CLKG | clkdiv << MCSPI_CONF_CLK_SHIFT; } reg = TI_SPI_READ(sc, MCSPI_CTRL_CH(ch)); reg &= ~(MCSPI_CTRL_EXTCLK_MSK << MCSPI_CTRL_EXTCLK_SHIFT); reg |= extclk << MCSPI_CTRL_EXTCLK_SHIFT; TI_SPI_WRITE(sc, MCSPI_CTRL_CH(ch), reg); reg = TI_SPI_READ(sc, MCSPI_CONF_CH(ch)); reg &= ~(MCSPI_CONF_CLKG | MCSPI_CONF_CLK_MSK << MCSPI_CONF_CLK_SHIFT); TI_SPI_WRITE(sc, MCSPI_CONF_CH(ch), reg | conf); } static int ti_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,omap4-mcspi")) return (ENXIO); device_set_desc(dev, "TI McSPI controller"); return (BUS_PROBE_DEFAULT); } static int ti_spi_attach(device_t dev) { int err, i, rid, timeout; struct ti_spi_softc *sc; uint32_t rev; sc = device_get_softc(dev); sc->sc_dev = dev; /* Activate the McSPI module. */ err = ti_sysc_clock_enable(device_get_parent(dev)); if (err) { device_printf(dev, "Error: failed to activate source clock\n"); return (err); } /* Get the number of available channels. */ if ((OF_getencprop(ofw_bus_get_node(dev), "ti,spi-num-cs", &sc->sc_numcs, sizeof(sc->sc_numcs))) <= 0) { sc->sc_numcs = 2; } rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Hook up our interrupt handler. */ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ti_spi_intr, sc, &sc->sc_intrhand)) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot setup the interrupt handler\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "ti_spi", NULL, MTX_DEF); /* Issue a softreset to the controller */ TI_SPI_WRITE(sc, MCSPI_SYSCONFIG, MCSPI_SYSCONFIG_SOFTRESET); timeout = 1000; while (!(TI_SPI_READ(sc, MCSPI_SYSSTATUS) & MCSPI_SYSSTATUS_RESETDONE)) { if (--timeout == 0) { device_printf(dev, "Error: Controller reset operation timed out\n"); ti_spi_detach(dev); return (ENXIO); } DELAY(100); } /* Print the McSPI module revision. */ rev = TI_SPI_READ(sc, ti_sysc_get_rev_address_offset_host(device_get_parent(dev))); device_printf(dev, "scheme: %#x func: %#x rtl: %d rev: %d.%d custom rev: %d\n", (rev >> MCSPI_REVISION_SCHEME_SHIFT) & MCSPI_REVISION_SCHEME_MSK, (rev >> MCSPI_REVISION_FUNC_SHIFT) & MCSPI_REVISION_FUNC_MSK, (rev >> MCSPI_REVISION_RTL_SHIFT) & MCSPI_REVISION_RTL_MSK, (rev >> MCSPI_REVISION_MAJOR_SHIFT) & MCSPI_REVISION_MAJOR_MSK, (rev >> MCSPI_REVISION_MINOR_SHIFT) & MCSPI_REVISION_MINOR_MSK, (rev >> MCSPI_REVISION_CUSTOM_SHIFT) & MCSPI_REVISION_CUSTOM_MSK); /* Set Master mode, single channel. */ TI_SPI_WRITE(sc, MCSPI_MODULCTRL, MCSPI_MODULCTRL_SINGLE); /* Clear pending interrupts and disable interrupts. */ TI_SPI_WRITE(sc, MCSPI_IRQENABLE, 0x0); TI_SPI_WRITE(sc, MCSPI_IRQSTATUS, 0xffff); for (i = 0; i < sc->sc_numcs; i++) { /* * Default to SPI mode 0, CS active low, 8 bits word length and * 500kHz clock. */ TI_SPI_WRITE(sc, MCSPI_CONF_CH(i), MCSPI_CONF_DPE0 | MCSPI_CONF_EPOL | (8 - 1) << MCSPI_CONF_WL_SHIFT); /* Set initial clock - 500kHz. */ ti_spi_set_clock(sc, i, 500000); } #ifdef TI_SPI_DEBUG ti_spi_printr(dev); #endif device_add_child(dev, "spibus", DEVICE_UNIT_ANY); bus_attach_children(dev); return (0); } static int ti_spi_detach(device_t dev) { struct ti_spi_softc *sc; + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); sc = device_get_softc(dev); /* Clear pending interrupts and disable interrupts. */ TI_SPI_WRITE(sc, MCSPI_IRQENABLE, 0); TI_SPI_WRITE(sc, MCSPI_IRQSTATUS, 0xffff); /* Reset controller. */ TI_SPI_WRITE(sc, MCSPI_SYSCONFIG, MCSPI_SYSCONFIG_SOFTRESET); - bus_generic_detach(dev); - mtx_destroy(&sc->sc_mtx); if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (0); } static int ti_spi_fill_fifo(struct ti_spi_softc *sc) { int bytes, timeout; struct spi_command *cmd; uint32_t written; uint8_t *data; cmd = sc->sc_cmd; bytes = min(sc->sc_len - sc->sc_written, sc->sc_fifolvl); while (bytes-- > 0) { data = (uint8_t *)cmd->tx_cmd; written = sc->sc_written++; if (written >= cmd->tx_cmd_sz) { data = (uint8_t *)cmd->tx_data; written -= cmd->tx_cmd_sz; } if (sc->sc_fifolvl == 1) { /* FIFO disabled. */ timeout = 1000; while (--timeout > 0 && (TI_SPI_READ(sc, MCSPI_STAT_CH(sc->sc_cs)) & MCSPI_STAT_TXS) == 0) { DELAY(100); } if (timeout == 0) return (-1); } TI_SPI_WRITE(sc, MCSPI_TX_CH(sc->sc_cs), data[written]); } return (0); } static int ti_spi_drain_fifo(struct ti_spi_softc *sc) { int bytes, timeout; struct spi_command *cmd; uint32_t read; uint8_t *data; cmd = sc->sc_cmd; bytes = min(sc->sc_len - sc->sc_read, sc->sc_fifolvl); while (bytes-- > 0) { data = (uint8_t *)cmd->rx_cmd; read = sc->sc_read++; if (read >= cmd->rx_cmd_sz) { data = (uint8_t *)cmd->rx_data; read -= cmd->rx_cmd_sz; } if (sc->sc_fifolvl == 1) { /* FIFO disabled. */ timeout = 1000; while (--timeout > 0 && (TI_SPI_READ(sc, MCSPI_STAT_CH(sc->sc_cs)) & MCSPI_STAT_RXS) == 0) { DELAY(100); } if (timeout == 0) return (-1); } data[read] = TI_SPI_READ(sc, MCSPI_RX_CH(sc->sc_cs)); } return (0); } static void ti_spi_intr(void *arg) { struct ti_spi_softc *sc; uint32_t status; sc = (struct ti_spi_softc *)arg; TI_SPI_LOCK(sc); status = TI_SPI_READ(sc, MCSPI_IRQSTATUS); /* * No new TX_empty or RX_full event will be asserted while the CPU has * not performed the number of writes or reads defined by * MCSPI_XFERLEVEL[AEL] and MCSPI_XFERLEVEL[AFL]. It is responsibility * of CPU perform the right number of writes and reads. */ if (status & MCSPI_IRQ_TX0_EMPTY) ti_spi_fill_fifo(sc); if (status & MCSPI_IRQ_RX0_FULL) ti_spi_drain_fifo(sc); /* Clear interrupt status. */ TI_SPI_WRITE(sc, MCSPI_IRQSTATUS, status); /* Check for end of transfer. */ if (sc->sc_written == sc->sc_len && sc->sc_read == sc->sc_len) { sc->sc_flags |= TI_SPI_DONE; wakeup(sc->sc_dev); } TI_SPI_UNLOCK(sc); } static int ti_spi_pio_transfer(struct ti_spi_softc *sc) { while (sc->sc_len - sc->sc_written > 0) { if (ti_spi_fill_fifo(sc) == -1) return (EIO); if (ti_spi_drain_fifo(sc) == -1) return (EIO); } return (0); } static int ti_spi_gcd(int a, int b) { int m; while ((m = a % b) != 0) { a = b; b = m; } return (b); } static int ti_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { int err; struct ti_spi_softc *sc; uint32_t clockhz, cs, mode, reg; sc = device_get_softc(dev); KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); /* Get the proper chip select for this child. */ spibus_get_cs(child, &cs); spibus_get_clock(child, &clockhz); spibus_get_mode(child, &mode); cs &= ~SPIBUS_CS_HIGH; if (cs > sc->sc_numcs) { device_printf(dev, "Invalid chip select %d requested by %s\n", cs, device_get_nameunit(child)); return (EINVAL); } if (mode > 3) { device_printf(dev, "Invalid mode %d requested by %s\n", mode, device_get_nameunit(child)); return (EINVAL); } TI_SPI_LOCK(sc); /* If the controller is in use wait until it is available. */ while (sc->sc_flags & TI_SPI_BUSY) mtx_sleep(dev, &sc->sc_mtx, 0, "ti_spi", 0); /* Now we have control over SPI controller. */ sc->sc_flags = TI_SPI_BUSY; /* Save the SPI command data. */ sc->sc_cs = cs; sc->sc_cmd = cmd; sc->sc_read = 0; sc->sc_written = 0; sc->sc_len = cmd->tx_cmd_sz + cmd->tx_data_sz; sc->sc_fifolvl = ti_spi_gcd(sc->sc_len, TI_SPI_FIFOSZ); if (sc->sc_fifolvl < 2 || sc->sc_len > 0xffff) sc->sc_fifolvl = 1; /* FIFO disabled. */ /* Disable FIFO for now. */ sc->sc_fifolvl = 1; /* Set the bus frequency. */ ti_spi_set_clock(sc, sc->sc_cs, clockhz); /* Disable the FIFO. */ TI_SPI_WRITE(sc, MCSPI_XFERLEVEL, 0); /* 8 bits word, d0 miso, d1 mosi, mode 0 and CS active low. */ reg = TI_SPI_READ(sc, MCSPI_CONF_CH(sc->sc_cs)); reg &= ~(MCSPI_CONF_FFER | MCSPI_CONF_FFEW | MCSPI_CONF_SBPOL | MCSPI_CONF_SBE | MCSPI_CONF_TURBO | MCSPI_CONF_IS | MCSPI_CONF_DPE1 | MCSPI_CONF_DPE0 | MCSPI_CONF_DMAR | MCSPI_CONF_DMAW | MCSPI_CONF_EPOL); reg |= MCSPI_CONF_DPE0 | MCSPI_CONF_EPOL | MCSPI_CONF_WL8BITS; reg |= mode; /* POL and PHA are the low bits, we can just OR-in mode */ TI_SPI_WRITE(sc, MCSPI_CONF_CH(sc->sc_cs), reg); #if 0 /* Enable channel interrupts. */ reg = TI_SPI_READ(sc, MCSPI_IRQENABLE); reg |= 0xf; TI_SPI_WRITE(sc, MCSPI_IRQENABLE, reg); #endif /* Start the transfer. */ reg = TI_SPI_READ(sc, MCSPI_CTRL_CH(sc->sc_cs)); TI_SPI_WRITE(sc, MCSPI_CTRL_CH(sc->sc_cs), reg | MCSPI_CTRL_ENABLE); /* Force CS on. */ reg = TI_SPI_READ(sc, MCSPI_CONF_CH(sc->sc_cs)); TI_SPI_WRITE(sc, MCSPI_CONF_CH(sc->sc_cs), reg |= MCSPI_CONF_FORCE); err = 0; if (sc->sc_fifolvl == 1) err = ti_spi_pio_transfer(sc); /* Force CS off. */ reg = TI_SPI_READ(sc, MCSPI_CONF_CH(sc->sc_cs)); reg &= ~MCSPI_CONF_FORCE; TI_SPI_WRITE(sc, MCSPI_CONF_CH(sc->sc_cs), reg); /* Disable IRQs. */ reg = TI_SPI_READ(sc, MCSPI_IRQENABLE); reg &= ~0xf; TI_SPI_WRITE(sc, MCSPI_IRQENABLE, reg); TI_SPI_WRITE(sc, MCSPI_IRQSTATUS, 0xf); /* Disable the SPI channel. */ reg = TI_SPI_READ(sc, MCSPI_CTRL_CH(sc->sc_cs)); reg &= ~MCSPI_CTRL_ENABLE; TI_SPI_WRITE(sc, MCSPI_CTRL_CH(sc->sc_cs), reg); /* Disable FIFO. */ reg = TI_SPI_READ(sc, MCSPI_CONF_CH(sc->sc_cs)); reg &= ~(MCSPI_CONF_FFER | MCSPI_CONF_FFEW); TI_SPI_WRITE(sc, MCSPI_CONF_CH(sc->sc_cs), reg); /* Release the controller and wakeup the next thread waiting for it. */ sc->sc_flags = 0; wakeup_one(dev); TI_SPI_UNLOCK(sc); return (err); } static phandle_t ti_spi_get_node(device_t bus, device_t dev) { /* Share controller node with spibus. */ return (ofw_bus_get_node(bus)); } static device_method_t ti_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_spi_probe), DEVMETHOD(device_attach, ti_spi_attach), DEVMETHOD(device_detach, ti_spi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, ti_spi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ti_spi_get_node), DEVMETHOD_END }; static driver_t ti_spi_driver = { "spi", ti_spi_methods, sizeof(struct ti_spi_softc), }; DRIVER_MODULE(ti_spi, simplebus, ti_spi_driver, 0, 0); MODULE_DEPEND(ti_spi, ti_sysc, 1, 1, 1); diff --git a/sys/arm/xilinx/zy7_devcfg.c b/sys/arm/xilinx/zy7_devcfg.c index 1cb7ff9168cf..6df0fba1f56a 100644 --- a/sys/arm/xilinx/zy7_devcfg.c +++ b/sys/arm/xilinx/zy7_devcfg.c @@ -1,843 +1,845 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Zynq-7000 Devcfg driver. This allows programming the PL (FPGA) section * of Zynq. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.4) November 16, 2012. Xilinx doc UG585. PL Configuration is * covered in section 6.4.5. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct zy7_devcfg_softc { device_t dev; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; struct cdev *sc_ctl_dev; void *intrhandle; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; int is_open; struct sysctl_ctx_list sysctl_tree; struct sysctl_oid *sysctl_tree_top; }; static struct zy7_devcfg_softc *zy7_devcfg_softc_p; #define FCLK_NUM 4 struct zy7_fclk_config { int source; int frequency; int actual_frequency; }; static struct zy7_fclk_config fclk_configs[FCLK_NUM]; #define DEVCFG_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define DEVCFG_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define DEVCFG_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ "zy7_devcfg", MTX_DEF) #define DEVCFG_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx); #define DEVCFG_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED); #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) SYSCTL_NODE(_hw, OID_AUTO, fpga, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Xilinx Zynq-7000 PL (FPGA) section"); static int zy7_devcfg_sysctl_pl_done(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_hw_fpga, OID_AUTO, pl_done, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, zy7_devcfg_sysctl_pl_done, "I", "PL section config DONE signal"); static int zy7_en_level_shifters = 1; SYSCTL_INT(_hw_fpga, OID_AUTO, en_level_shifters, CTLFLAG_RW, &zy7_en_level_shifters, 0, "Enable PS-PL level shifters after device config"); static int zy7_ps_vers = 0; SYSCTL_INT(_hw, OID_AUTO, ps_vers, CTLFLAG_RD, &zy7_ps_vers, 0, "Zynq-7000 PS version"); static int zy7_devcfg_fclk_sysctl_level_shifters(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_hw_fpga, OID_AUTO, level_shifters, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_NEEDGIANT, NULL, 0, zy7_devcfg_fclk_sysctl_level_shifters, "I", "Enable/disable level shifters"); /* cdev entry points. */ static int zy7_devcfg_open(struct cdev *, int, int, struct thread *); static int zy7_devcfg_write(struct cdev *, struct uio *, int); static int zy7_devcfg_close(struct cdev *, int, int, struct thread *); struct cdevsw zy7_devcfg_cdevsw = { .d_version = D_VERSION, .d_open = zy7_devcfg_open, .d_write = zy7_devcfg_write, .d_close = zy7_devcfg_close, .d_name = "devcfg", }; /* Devcfg block registers. */ #define ZY7_DEVCFG_CTRL 0x0000 #define ZY7_DEVCFG_CTRL_FORCE_RST (1<<31) #define ZY7_DEVCFG_CTRL_PCFG_PROG_B (1<<30) #define ZY7_DEVCFG_CTRL_PCFG_POR_CNT_4K (1<<29) #define ZY7_DEVCFG_CTRL_PCAP_PR (1<<27) #define ZY7_DEVCFG_CTRL_PCAP_MODE (1<<26) #define ZY7_DEVCFG_CTRL_QTR_PCAP_RATE_EN (1<<25) #define ZY7_DEVCFG_CTRL_MULTIBOOT_EN (1<<24) #define ZY7_DEVCFG_CTRL_JTAG_CHAIN_DIS (1<<23) #define ZY7_DEVCFG_CTRL_USER_MODE (1<<15) #define ZY7_DEVCFG_CTRL_RESVD_WR11 (3<<13) /* always write 11 */ #define ZY7_DEVCFG_CTRL_PCFG_AES_FUSE (1<<12) #define ZY7_DEVCFG_CTRL_PCFG_AES_EN_MASK (7<<9) /* all 1's or 0's */ #define ZY7_DEVCFG_CTRL_SEU_EN (1<<8) #define ZY7_DEVCFG_CTRL_SEC_EN (1<<7) #define ZY7_DEVCFG_CTRL_SPNIDEN (1<<6) #define ZY7_DEVCFG_CTRL_SPIDEN (1<<5) #define ZY7_DEVCFG_CTRL_NIDEN (1<<4) #define ZY7_DEVCFG_CTRL_DBGEN (1<<3) #define ZY7_DEVCFG_CTRL_DAP_EN_MASK (7<<0) /* all 1's to enable */ #define ZY7_DEVCFG_LOCK 0x004 #define ZY7_DEVCFG_LOCK_AES_FUSE_LOCK (1<<4) #define ZY7_DEVCFG_LOCK_AES_EN (1<<3) #define ZY7_DEVCFG_LOCK_SEU_LOCK (1<<2) #define ZY7_DEVCFG_LOCK_SEC_LOCK (1<<1) #define ZY7_DEVCFG_LOCK_DBG_LOCK (1<<0) #define ZY7_DEVCFG_CFG 0x008 #define ZY7_DEVCFG_CFG_RFIFO_TH_MASK (3<<10) #define ZY7_DEVCFG_CFG_WFIFO_TH_MASK (3<<8) #define ZY7_DEVCFG_CFG_RCLK_EDGE (1<<7) #define ZY7_DEVCFG_CFG_WCLK_EDGE (1<<6) #define ZY7_DEVCFG_CFG_DIS_SRC_INC (1<<5) #define ZY7_DEVCFG_CFG_DIS_DST_INC (1<<4) #define ZY7_DEVCFG_INT_STATUS 0x00C #define ZY7_DEVCFG_INT_MASK 0x010 #define ZY7_DEVCFG_INT_PSS_GTS_USR_B (1<<31) #define ZY7_DEVCFG_INT_PSS_FST_CFG_B (1<<30) #define ZY7_DEVCFG_INT_PSS_GPWRDWN_B (1<<29) #define ZY7_DEVCFG_INT_PSS_GTS_CFG_B (1<<28) #define ZY7_DEVCFG_INT_CFG_RESET_B (1<<27) #define ZY7_DEVCFG_INT_AXI_WTO (1<<23) /* axi write timeout */ #define ZY7_DEVCFG_INT_AXI_WERR (1<<22) /* axi write err */ #define ZY7_DEVCFG_INT_AXI_RTO (1<<21) /* axi read timeout */ #define ZY7_DEVCFG_INT_AXI_RERR (1<<20) /* axi read err */ #define ZY7_DEVCFG_INT_RX_FIFO_OV (1<<18) /* rx fifo overflow */ #define ZY7_DEVCFG_INT_WR_FIFO_LVL (1<<17) /* wr fifo < level */ #define ZY7_DEVCFG_INT_RD_FIFO_LVL (1<<16) /* rd fifo >= level */ #define ZY7_DEVCFG_INT_DMA_CMD_ERR (1<<15) #define ZY7_DEVCFG_INT_DMA_Q_OV (1<<14) #define ZY7_DEVCFG_INT_DMA_DONE (1<<13) #define ZY7_DEVCFG_INT_DMA_PCAP_DONE (1<<12) #define ZY7_DEVCFG_INT_P2D_LEN_ERR (1<<11) #define ZY7_DEVCFG_INT_PCFG_HMAC_ERR (1<<6) #define ZY7_DEVCFG_INT_PCFG_SEU_ERR (1<<5) #define ZY7_DEVCFG_INT_PCFG_POR_B (1<<4) #define ZY7_DEVCFG_INT_PCFG_CFG_RST (1<<3) #define ZY7_DEVCFG_INT_PCFG_DONE (1<<2) #define ZY7_DEVCFG_INT_PCFG_INIT_PE (1<<1) #define ZY7_DEVCFG_INT_PCFG_INIT_NE (1<<0) #define ZY7_DEVCFG_INT_ERRORS 0x00f0f860 #define ZY7_DEVCFG_INT_ALL 0xf8f7f87f #define ZY7_DEVCFG_STATUS 0x014 #define ZY7_DEVCFG_STATUS_DMA_CMD_Q_F (1<<31) /* cmd queue full */ #define ZY7_DEVCFG_STATUS_DMA_CMD_Q_E (1<<30) /* cmd queue empty */ #define ZY7_DEVCFG_STATUS_DONE_COUNT_MASK (3<<28) #define ZY7_DEVCFG_STATUS_DONE_COUNT_SHIFT 28 #define ZY7_DEVCFG_STATUS_RX_FIFO_LVL_MASK (0x1f<<20) #define ZY7_DEVCFG_STATUS_RX_FIFO_LVL_SHIFT 20 #define ZY7_DEVCFG_STATUS_TX_FIFO_LVL_MASK (0x7f<<12) #define ZY7_DEVCFG_STATUS_TX_FIFO_LVL_SHIFT 12 #define ZY7_DEVCFG_STATUS_PSS_GTS_USR_B (1<<11) #define ZY7_DEVCFG_STATUS_PSS_FST_CFG_B (1<<10) #define ZY7_DEVCFG_STATUS_PSS_GPWRDWN_B (1<<9) #define ZY7_DEVCFG_STATUS_PSS_GTS_CFG_B (1<<8) #define ZY7_DEVCFG_STATUS_ILL_APB_ACCE (1<<6) #define ZY7_DEVCFG_STATUS_PSS_CFG_RESET_B (1<<5) #define ZY7_DEVCFG_STATUS_PCFG_INIT (1<<4) #define ZY7_DEVCFG_STATUS_EFUSE_BBRAM_KEY_DIS (1<<3) #define ZY7_DEVCFG_STATUS_EFUSE_SEC_EN (1<<2) #define ZY7_DEVCFG_STATUS_EFUSE_JTAG_DIS (1<<1) #define ZY7_DEVCFG_DMA_SRC_ADDR 0x018 #define ZY7_DEVCFG_DMA_DST_ADDR 0x01c #define ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP 1 #define ZY7_DEVCFG_DMA_ADDR_ILLEGAL 0xffffffff #define ZY7_DEVCFG_DMA_SRC_LEN 0x020 /* in 4-byte words. */ #define ZY7_DEVCFG_DMA_SRC_LEN_MAX 0x7ffffff #define ZY7_DEVCFG_DMA_DST_LEN 0x024 #define ZY7_DEVCFG_ROM_SHADOW 0x028 #define ZY7_DEVCFG_MULTIBOOT_ADDR 0x02c #define ZY7_DEVCFG_SW_ID 0x030 #define ZY7_DEVCFG_UNLOCK 0x034 #define ZY7_DEVCFG_UNLOCK_MAGIC 0x757bdf0d #define ZY7_DEVCFG_MCTRL 0x080 #define ZY7_DEVCFG_MCTRL_PS_VERS_MASK (0xf<<28) #define ZY7_DEVCFG_MCTRL_PS_VERS_SHIFT 28 #define ZY7_DEVCFG_MCTRL_PCFG_POR_B (1<<8) #define ZY7_DEVCFG_MCTRL_INT_PCAP_LPBK (1<<4) #define ZY7_DEVCFG_XADCIF_CFG 0x100 #define ZY7_DEVCFG_XADCIF_INT_STAT 0x104 #define ZY7_DEVCFG_XADCIF_INT_MASK 0x108 #define ZY7_DEVCFG_XADCIF_MSTS 0x10c #define ZY7_DEVCFG_XADCIF_CMD_FIFO 0x110 #define ZY7_DEVCFG_XADCIF_RD_FIFO 0x114 #define ZY7_DEVCFG_XADCIF_MCTL 0x118 static int zy7_devcfg_fclk_sysctl_source(SYSCTL_HANDLER_ARGS) { char buf[4]; struct zy7_fclk_config *cfg; int unit; int error; cfg = arg1; unit = arg2; switch (cfg->source) { case ZY7_PL_FCLK_SRC_IO: case ZY7_PL_FCLK_SRC_IO_ALT: strncpy(buf, "IO", sizeof(buf)); break; case ZY7_PL_FCLK_SRC_DDR: strncpy(buf, "DDR", sizeof(buf)); break; case ZY7_PL_FCLK_SRC_ARM: strncpy(buf, "ARM", sizeof(buf)); break; default: strncpy(buf, "???", sizeof(buf)); break; } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strcasecmp(buf, "io") == 0) cfg->source = ZY7_PL_FCLK_SRC_IO; else if (strcasecmp(buf, "ddr") == 0) cfg->source = ZY7_PL_FCLK_SRC_DDR; else if (strcasecmp(buf, "arm") == 0) cfg->source = ZY7_PL_FCLK_SRC_ARM; else return (EINVAL); zy7_pl_fclk_set_source(unit, cfg->source); if (cfg->frequency > 0) cfg->actual_frequency = zy7_pl_fclk_get_freq(unit); return (0); } static int zy7_devcfg_fclk_sysctl_freq(SYSCTL_HANDLER_ARGS) { struct zy7_fclk_config *cfg; int unit; int error; int freq; int new_actual_freq; cfg = arg1; unit = arg2; freq = cfg->frequency; error = sysctl_handle_int(oidp, &freq, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (freq > 0) { new_actual_freq = zy7_pl_fclk_set_freq(unit, freq); if (new_actual_freq < 0) return (EINVAL); if (!zy7_pl_fclk_enabled(unit)) zy7_pl_fclk_enable(unit); } else { zy7_pl_fclk_disable(unit); new_actual_freq = 0; } cfg->frequency = freq; cfg->actual_frequency = new_actual_freq; return (0); } static int zy7_devcfg_fclk_sysctl_level_shifters(SYSCTL_HANDLER_ARGS) { int error, enabled; enabled = zy7_pl_level_shifters_enabled(); error = sysctl_handle_int(oidp, &enabled, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enabled) zy7_pl_level_shifters_enable(); else zy7_pl_level_shifters_disable(); return (0); } static int zy7_devcfg_init_fclk_sysctl(struct zy7_devcfg_softc *sc) { struct sysctl_oid *fclk_node; char fclk_num[4]; int i; sysctl_ctx_init(&sc->sysctl_tree); sc->sysctl_tree_top = SYSCTL_ADD_NODE(&sc->sysctl_tree, SYSCTL_STATIC_CHILDREN(_hw_fpga), OID_AUTO, "fclk", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); if (sc->sysctl_tree_top == NULL) { sysctl_ctx_free(&sc->sysctl_tree); return (-1); } for (i = 0; i < FCLK_NUM; i++) { snprintf(fclk_num, sizeof(fclk_num), "%d", i); fclk_node = SYSCTL_ADD_NODE(&sc->sysctl_tree, SYSCTL_CHILDREN(sc->sysctl_tree_top), OID_AUTO, fclk_num, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_INT(&sc->sysctl_tree, SYSCTL_CHILDREN(fclk_node), OID_AUTO, "actual_freq", CTLFLAG_RD, &fclk_configs[i].actual_frequency, i, "Actual frequency"); SYSCTL_ADD_PROC(&sc->sysctl_tree, SYSCTL_CHILDREN(fclk_node), OID_AUTO, "freq", CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_NEEDGIANT, &fclk_configs[i], i, zy7_devcfg_fclk_sysctl_freq, "I", "Configured frequency"); SYSCTL_ADD_PROC(&sc->sysctl_tree, SYSCTL_CHILDREN(fclk_node), OID_AUTO, "source", CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, &fclk_configs[i], i, zy7_devcfg_fclk_sysctl_source, "A", "Clock source"); } return (0); } /* Enable programming the PL through PCAP. */ static void zy7_devcfg_init_hw(struct zy7_devcfg_softc *sc) { DEVCFG_SC_ASSERT_LOCKED(sc); /* Set devcfg control register. */ WR4(sc, ZY7_DEVCFG_CTRL, ZY7_DEVCFG_CTRL_PCFG_PROG_B | ZY7_DEVCFG_CTRL_PCAP_PR | ZY7_DEVCFG_CTRL_PCAP_MODE | ZY7_DEVCFG_CTRL_USER_MODE | ZY7_DEVCFG_CTRL_RESVD_WR11 | ZY7_DEVCFG_CTRL_SPNIDEN | ZY7_DEVCFG_CTRL_SPIDEN | ZY7_DEVCFG_CTRL_NIDEN | ZY7_DEVCFG_CTRL_DBGEN | ZY7_DEVCFG_CTRL_DAP_EN_MASK); /* Turn off internal PCAP loopback. */ WR4(sc, ZY7_DEVCFG_MCTRL, RD4(sc, ZY7_DEVCFG_MCTRL) & ~ZY7_DEVCFG_MCTRL_INT_PCAP_LPBK); } /* Clear previous configuration of the PL by asserting PROG_B. */ static int zy7_devcfg_reset_pl(struct zy7_devcfg_softc *sc) { uint32_t devcfg_ctl; int tries, err; DEVCFG_SC_ASSERT_LOCKED(sc); devcfg_ctl = RD4(sc, ZY7_DEVCFG_CTRL); /* Clear sticky bits and set up INIT signal positive edge interrupt. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_PCFG_INIT_PE); /* Deassert PROG_B (active low). */ devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* * Wait for INIT to assert. If it is already asserted, we may not get * an edge interrupt so cancel it and continue. */ if ((RD4(sc, ZY7_DEVCFG_STATUS) & ZY7_DEVCFG_STATUS_PCFG_INIT) != 0) { /* Already asserted. Cancel interrupt. */ WR4(sc, ZY7_DEVCFG_INT_MASK, ~0); } else { /* Wait for positive edge interrupt. */ err = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "zy7i1", hz); if (err != 0) return (err); } /* Reassert PROG_B (active low). */ devcfg_ctl &= ~ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* Wait for INIT deasserted. This happens almost instantly. */ tries = 0; while ((RD4(sc, ZY7_DEVCFG_STATUS) & ZY7_DEVCFG_STATUS_PCFG_INIT) != 0) { if (++tries >= 100) return (EIO); DELAY(5); } /* Clear sticky bits and set up INIT positive edge interrupt. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_PCFG_INIT_PE); /* Deassert PROG_B again. */ devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* * Wait for INIT asserted indicating FPGA internal initialization * is complete. */ err = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "zy7i2", hz); if (err != 0) return (err); /* Clear sticky DONE bit in interrupt status. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); return (0); } /* Callback function for bus_dmamap_load(). */ static void zy7_dma_cb2(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { if (!error && nsegs == 1) *(bus_addr_t *)arg = seg[0].ds_addr; } static int zy7_devcfg_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct zy7_devcfg_softc *sc = dev->si_drv1; int err; DEVCFG_SC_LOCK(sc); if (sc->is_open) { DEVCFG_SC_UNLOCK(sc); return (EBUSY); } sc->dma_map = NULL; err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->dma_tag); if (err) { DEVCFG_SC_UNLOCK(sc); return (err); } sc->is_open = 1; DEVCFG_SC_UNLOCK(sc); return (0); } static int zy7_devcfg_write(struct cdev *dev, struct uio *uio, int ioflag) { struct zy7_devcfg_softc *sc = dev->si_drv1; void *dma_mem; bus_addr_t dma_physaddr; int segsz, err; DEVCFG_SC_LOCK(sc); /* First write? Reset PL. */ if (uio->uio_offset == 0 && uio->uio_resid > 0) { zy7_devcfg_init_hw(sc); zy7_slcr_preload_pl(); err = zy7_devcfg_reset_pl(sc); if (err != 0) { DEVCFG_SC_UNLOCK(sc); return (err); } } /* Allocate dma memory and load. */ err = bus_dmamem_alloc(sc->dma_tag, &dma_mem, BUS_DMA_NOWAIT, &sc->dma_map); if (err != 0) { DEVCFG_SC_UNLOCK(sc); return (err); } err = bus_dmamap_load(sc->dma_tag, sc->dma_map, dma_mem, PAGE_SIZE, zy7_dma_cb2, &dma_physaddr, 0); if (err != 0) { bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map); DEVCFG_SC_UNLOCK(sc); return (err); } while (uio->uio_resid > 0) { /* If DONE signal has been set, we shouldn't write anymore. */ if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) & ZY7_DEVCFG_INT_PCFG_DONE) != 0) { err = EIO; break; } /* uiomove the data from user buffer to our dma map. */ segsz = MIN(PAGE_SIZE, uio->uio_resid); DEVCFG_SC_UNLOCK(sc); err = uiomove(dma_mem, segsz, uio); DEVCFG_SC_LOCK(sc); if (err != 0) break; /* Flush the cache to memory. */ bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_PREWRITE); /* Program devcfg's DMA engine. The ordering of these * register writes is critical. */ if (uio->uio_resid > segsz) WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR, (uint32_t) dma_physaddr); else WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR, (uint32_t) dma_physaddr | ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP); WR4(sc, ZY7_DEVCFG_DMA_DST_ADDR, ZY7_DEVCFG_DMA_ADDR_ILLEGAL); WR4(sc, ZY7_DEVCFG_DMA_SRC_LEN, (segsz+3)/4); WR4(sc, ZY7_DEVCFG_DMA_DST_LEN, 0); /* Now clear done bit and set up DMA done interrupt. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_DMA_DONE); /* Wait for DMA done interrupt. */ err = mtx_sleep(sc->dma_map, &sc->sc_mtx, PCATCH, "zy7dma", hz); if (err != 0) break; bus_dmamap_sync(sc->dma_tag, sc->dma_map, BUS_DMASYNC_POSTWRITE); /* Check DONE signal. */ if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) & ZY7_DEVCFG_INT_PCFG_DONE) != 0) zy7_slcr_postload_pl(zy7_en_level_shifters); } bus_dmamap_unload(sc->dma_tag, sc->dma_map); bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map); DEVCFG_SC_UNLOCK(sc); return (err); } static int zy7_devcfg_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct zy7_devcfg_softc *sc = dev->si_drv1; DEVCFG_SC_LOCK(sc); sc->is_open = 0; bus_dma_tag_destroy(sc->dma_tag); DEVCFG_SC_UNLOCK(sc); zy7_slcr_postload_pl(zy7_en_level_shifters); return (0); } static void zy7_devcfg_intr(void *arg) { struct zy7_devcfg_softc *sc = (struct zy7_devcfg_softc *)arg; uint32_t istatus, imask; DEVCFG_SC_LOCK(sc); istatus = RD4(sc, ZY7_DEVCFG_INT_STATUS); imask = ~RD4(sc, ZY7_DEVCFG_INT_MASK); /* Turn interrupt off. */ WR4(sc, ZY7_DEVCFG_INT_MASK, ~0); if ((istatus & imask) == 0) { DEVCFG_SC_UNLOCK(sc); return; } /* DMA done? */ if ((istatus & ZY7_DEVCFG_INT_DMA_DONE) != 0) wakeup(sc->dma_map); /* INIT_B positive edge? */ if ((istatus & ZY7_DEVCFG_INT_PCFG_INIT_PE) != 0) wakeup(sc); DEVCFG_SC_UNLOCK(sc); } /* zy7_devcfg_sysctl_pl_done() returns status of the PL_DONE signal. */ static int zy7_devcfg_sysctl_pl_done(SYSCTL_HANDLER_ARGS) { struct zy7_devcfg_softc *sc = zy7_devcfg_softc_p; int pl_done = 0; if (sc) { DEVCFG_SC_LOCK(sc); /* PCFG_DONE bit is sticky. Clear it before checking it. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_PCFG_DONE); pl_done = ((RD4(sc, ZY7_DEVCFG_INT_STATUS) & ZY7_DEVCFG_INT_PCFG_DONE) != 0); DEVCFG_SC_UNLOCK(sc); } return (sysctl_handle_int(oidp, &pl_done, 0, req)); } static int zy7_devcfg_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "xlnx,zy7_devcfg")) return (ENXIO); device_set_desc(dev, "Zynq devcfg block"); return (0); } static int zy7_devcfg_detach(device_t dev); static int zy7_devcfg_attach(device_t dev) { struct zy7_devcfg_softc *sc = device_get_softc(dev); int i; int rid, err; /* Allow only one attach. */ if (zy7_devcfg_softc_p != NULL) return (ENXIO); sc->dev = dev; DEVCFG_SC_LOCK_INIT(sc); /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); zy7_devcfg_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "cannot allocate IRQ\n"); zy7_devcfg_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, zy7_devcfg_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "cannot setup IRQ\n"); zy7_devcfg_detach(dev); return (err); } /* Create /dev/devcfg */ sc->sc_ctl_dev = make_dev(&zy7_devcfg_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "devcfg"); if (sc->sc_ctl_dev == NULL) { device_printf(dev, "failed to create /dev/devcfg"); zy7_devcfg_detach(dev); return (ENXIO); } sc->sc_ctl_dev->si_drv1 = sc; zy7_devcfg_softc_p = sc; /* Unlock devcfg registers. */ WR4(sc, ZY7_DEVCFG_UNLOCK, ZY7_DEVCFG_UNLOCK_MAGIC); /* Make sure interrupts are completely disabled. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); WR4(sc, ZY7_DEVCFG_INT_MASK, 0xffffffff); /* Get PS_VERS for SYSCTL. */ zy7_ps_vers = (RD4(sc, ZY7_DEVCFG_MCTRL) & ZY7_DEVCFG_MCTRL_PS_VERS_MASK) >> ZY7_DEVCFG_MCTRL_PS_VERS_SHIFT; for (i = 0; i < FCLK_NUM; i++) { fclk_configs[i].source = zy7_pl_fclk_get_source(i); fclk_configs[i].actual_frequency = zy7_pl_fclk_enabled(i) ? zy7_pl_fclk_get_freq(i) : 0; /* Initially assume actual frequency is the configure one */ fclk_configs[i].frequency = fclk_configs[i].actual_frequency; } if (zy7_devcfg_init_fclk_sysctl(sc) < 0) device_printf(dev, "failed to initialized sysctl tree\n"); return (0); } static int zy7_devcfg_detach(device_t dev) { struct zy7_devcfg_softc *sc = device_get_softc(dev); + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); if (sc->sysctl_tree_top != NULL) { sysctl_ctx_free(&sc->sysctl_tree); sc->sysctl_tree_top = NULL; } - if (device_is_attached(dev)) - bus_generic_detach(dev); - /* Get rid of /dev/devcfg0. */ if (sc->sc_ctl_dev != NULL) destroy_dev(sc->sc_ctl_dev); /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); zy7_devcfg_softc_p = NULL; DEVCFG_SC_LOCK_DESTROY(sc); return (0); } static device_method_t zy7_devcfg_methods[] = { /* device_if */ DEVMETHOD(device_probe, zy7_devcfg_probe), DEVMETHOD(device_attach, zy7_devcfg_attach), DEVMETHOD(device_detach, zy7_devcfg_detach), DEVMETHOD_END }; static driver_t zy7_devcfg_driver = { "zy7_devcfg", zy7_devcfg_methods, sizeof(struct zy7_devcfg_softc), }; DRIVER_MODULE(zy7_devcfg, simplebus, zy7_devcfg_driver, 0, 0); MODULE_DEPEND(zy7_devcfg, zy7_slcr, 1, 1, 1); diff --git a/sys/arm/xilinx/zy7_qspi.c b/sys/arm/xilinx/zy7_qspi.c index 53559571f7db..49d4cafd8d61 100644 --- a/sys/arm/xilinx/zy7_qspi.c +++ b/sys/arm/xilinx/zy7_qspi.c @@ -1,747 +1,749 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * This is a driver for the Quad-SPI Flash Controller in the Xilinx * Zynq-7000 SoC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static struct ofw_compat_data compat_data[] = { {"xlnx,zy7_qspi", 1}, {"xlnx,zynq-qspi-1.0", 1}, {NULL, 0} }; struct zy7_qspi_softc { device_t dev; device_t child; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint32_t cfg_reg_shadow; uint32_t lqspi_cfg_shadow; uint32_t spi_clock; uint32_t ref_clock; unsigned int spi_clk_real_freq; unsigned int rx_overflows; unsigned int tx_underflows; unsigned int interrupts; unsigned int stray_ints; struct spi_command *cmd; int tx_bytes; /* tx_cmd_sz + tx_data_sz */ int tx_bytes_sent; int rx_bytes; /* rx_cmd_sz + rx_data_sz */ int rx_bytes_rcvd; int busy; int is_dual; int is_stacked; int is_dio; }; #define ZY7_QSPI_DEFAULT_SPI_CLOCK 50000000 #define QSPI_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define QSPI_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define QSPI_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define QSPI_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define QSPI_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) /* * QSPI device registers. * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.2) July 1, 2018. Xilinx doc UG585. */ #define ZY7_QSPI_CONFIG_REG 0x0000 #define ZY7_QSPI_CONFIG_IFMODE (1U << 31) #define ZY7_QSPI_CONFIG_ENDIAN (1 << 26) #define ZY7_QSPI_CONFIG_HOLDB_DR (1 << 19) #define ZY7_QSPI_CONFIG_RSVD1 (1 << 17) /* must be 1 */ #define ZY7_QSPI_CONFIG_MANSTRT (1 << 16) #define ZY7_QSPI_CONFIG_MANSTRTEN (1 << 15) #define ZY7_QSPI_CONFIG_SSFORCE (1 << 14) #define ZY7_QSPI_CONFIG_PCS (1 << 10) #define ZY7_QSPI_CONFIG_REF_CLK (1 << 8) #define ZY7_QSPI_CONFIG_FIFO_WIDTH_MASK (3 << 6) #define ZY7_QSPI_CONFIG_FIFO_WIDTH32 (3 << 6) #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV_MASK (7 << 3) #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV_SHIFT 3 #define ZY7_QSPI_CONFIG_BAUD_RATE_DIV(x) ((x) << 3) /* divide by 2< 0) { nvalid = MIN(4, nbytes); data = 0xffffffff; /* * A hardware bug forces us to wait until the tx fifo is * empty before writing partial words. We'll come back * next tx interrupt. */ if (nvalid < 4 && (RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_TX_FIFO_NOT_FULL) == 0) return; if (sc->tx_bytes_sent < sc->cmd->tx_cmd_sz) { /* Writing command. */ n = MIN(nvalid, sc->cmd->tx_cmd_sz - sc->tx_bytes_sent); memcpy(&data, (uint8_t *)sc->cmd->tx_cmd + sc->tx_bytes_sent, n); if (nvalid > n) { /* Writing start of data. */ memcpy((uint8_t *)&data + n, sc->cmd->tx_data, nvalid - n); } } else /* Writing data. */ memcpy(&data, (uint8_t *)sc->cmd->tx_data + (sc->tx_bytes_sent - sc->cmd->tx_cmd_sz), nvalid); switch (nvalid) { case 1: WR4(sc, ZY7_QSPI_TXD1_REG, data); break; case 2: WR4(sc, ZY7_QSPI_TXD2_REG, data); break; case 3: WR4(sc, ZY7_QSPI_TXD3_REG, data); break; case 4: WR4(sc, ZY7_QSPI_TXD0_REG, data); break; } sc->tx_bytes_sent += nvalid; nbytes -= nvalid; } } /* Read hardware fifo data into command response and data buffers. */ static void zy7_qspi_read_fifo(struct zy7_qspi_softc *sc) { int n, nbytes; uint32_t data; do { data = RD4(sc, ZY7_QSPI_RX_DATA_REG); nbytes = MIN(4, sc->rx_bytes - sc->rx_bytes_rcvd); /* * Last word in non-word-multiple transfer is packed * non-intuitively. */ if (nbytes < 4) data >>= 8 * (4 - nbytes); if (sc->rx_bytes_rcvd < sc->cmd->rx_cmd_sz) { /* Reading command. */ n = MIN(nbytes, sc->cmd->rx_cmd_sz - sc->rx_bytes_rcvd); memcpy((uint8_t *)sc->cmd->rx_cmd + sc->rx_bytes_rcvd, &data, n); sc->rx_bytes_rcvd += n; nbytes -= n; data >>= 8 * n; } if (nbytes > 0) { /* Reading data. */ memcpy((uint8_t *)sc->cmd->rx_data + (sc->rx_bytes_rcvd - sc->cmd->rx_cmd_sz), &data, nbytes); sc->rx_bytes_rcvd += nbytes; } } while (sc->rx_bytes_rcvd < sc->rx_bytes && (RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0); } /* End a transfer early by draining rx fifo and disabling interrupts. */ static void zy7_qspi_abort_transfer(struct zy7_qspi_softc *sc) { /* Drain receive fifo. */ while ((RD4(sc, ZY7_QSPI_INTR_STAT_REG) & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0) (void)RD4(sc, ZY7_QSPI_RX_DATA_REG); /* Shut down interrupts. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_RX_OVERFLOW | ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_QSPI_INTR_TX_FIFO_NOT_FULL); } static void zy7_qspi_intr(void *arg) { struct zy7_qspi_softc *sc = (struct zy7_qspi_softc *)arg; uint32_t istatus; QSPI_SC_LOCK(sc); sc->interrupts++; istatus = RD4(sc, ZY7_QSPI_INTR_STAT_REG); /* Stray interrupts can happen if a transfer gets interrupted. */ if (!sc->busy) { sc->stray_ints++; QSPI_SC_UNLOCK(sc); return; } if ((istatus & ZY7_QSPI_INTR_RX_OVERFLOW) != 0) { device_printf(sc->dev, "rx fifo overflow!\n"); sc->rx_overflows++; /* Clear status bit. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ZY7_QSPI_INTR_RX_OVERFLOW); } /* Empty receive fifo before any more transmit data is sent. */ if (sc->rx_bytes_rcvd < sc->rx_bytes && (istatus & ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY) != 0) { zy7_qspi_read_fifo(sc); if (sc->rx_bytes_rcvd == sc->rx_bytes) /* Disable receive interrupts. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_QSPI_INTR_RX_OVERFLOW); } /* * Transmit underflows aren't really a bug because a hardware * bug forces us to allow the tx fifo to go empty between full * and partial fifo writes. Why bother counting? */ if ((istatus & ZY7_QSPI_INTR_TX_FIFO_UNDERFLOW) != 0) { sc->tx_underflows++; /* Clear status bit. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ZY7_QSPI_INTR_TX_FIFO_UNDERFLOW); } /* Fill transmit fifo. */ if (sc->tx_bytes_sent < sc->tx_bytes && (istatus & ZY7_QSPI_INTR_TX_FIFO_NOT_FULL) != 0) { zy7_qspi_write_fifo(sc, MIN(240, sc->tx_bytes - sc->tx_bytes_sent)); if (sc->tx_bytes_sent == sc->tx_bytes) { /* * Disable transmit FIFO interrupt, enable receive * FIFO interrupt. */ WR4(sc, ZY7_QSPI_INTR_DIS_REG, ZY7_QSPI_INTR_TX_FIFO_NOT_FULL); WR4(sc, ZY7_QSPI_INTR_EN_REG, ZY7_QSPI_INTR_RX_FIFO_NOT_EMPTY); } } /* Finished with transfer? */ if (sc->tx_bytes_sent == sc->tx_bytes && sc->rx_bytes_rcvd == sc->rx_bytes) { /* De-assert CS. */ sc->cfg_reg_shadow |= ZY7_QSPI_CONFIG_PCS; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); wakeup(sc->dev); } QSPI_SC_UNLOCK(sc); } /* Initialize hardware. */ static int zy7_qspi_init_hw(struct zy7_qspi_softc *sc) { uint32_t baud_div; /* Configure LQSPI Config register. Disable linear mode. */ sc->lqspi_cfg_shadow = RD4(sc, ZY7_QSPI_LQSPI_CFG_REG); sc->lqspi_cfg_shadow &= ~(ZY7_QSPI_LQSPI_CFG_LINEAR | ZY7_QSPI_LQSPI_CFG_TWO_MEM | ZY7_QSPI_LQSPI_CFG_SEP_BUS); if (sc->is_dual) { sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_TWO_MEM; if (sc->is_stacked) { sc->lqspi_cfg_shadow &= ~ZY7_QSPI_LQSPI_CFG_INST_CODE_MASK; sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_INST_CODE(sc->is_dio ? CMD_READ_DUAL_IO : CMD_READ_QUAD_OUTPUT); } else sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_SEP_BUS; } WR4(sc, ZY7_QSPI_LQSPI_CFG_REG, sc->lqspi_cfg_shadow); /* Find best clock divider. */ baud_div = 0; while ((sc->ref_clock >> (baud_div + 1)) > sc->spi_clock && baud_div < 8) baud_div++; if (baud_div >= 8) { device_printf(sc->dev, "cannot configure clock divider: ref=%d" " spi=%d.\n", sc->ref_clock, sc->spi_clock); return (EINVAL); } sc->spi_clk_real_freq = sc->ref_clock >> (baud_div + 1); /* * If divider is 2 (the max speed), use internal loopback master * clock for read data. (See section 12.3.1 in ref man.) */ if (baud_div == 0) WR4(sc, ZY7_QSPI_LPBK_DLY_ADJ_REG, ZY7_QSPI_LPBK_DLY_ADJ_USE_LPBK | ZY7_QSPI_LPBK_DLY_ADJ_DLY1(0) | ZY7_QSPI_LPBK_DLY_ADJ_DLY0(0)); else WR4(sc, ZY7_QSPI_LPBK_DLY_ADJ_REG, 0); /* Set up configuration register. */ sc->cfg_reg_shadow = ZY7_QSPI_CONFIG_IFMODE | ZY7_QSPI_CONFIG_HOLDB_DR | ZY7_QSPI_CONFIG_RSVD1 | ZY7_QSPI_CONFIG_SSFORCE | ZY7_QSPI_CONFIG_PCS | ZY7_QSPI_CONFIG_FIFO_WIDTH32 | ZY7_QSPI_CONFIG_BAUD_RATE_DIV(baud_div) | ZY7_QSPI_CONFIG_MODE_SEL; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); /* * Set thresholds. We must use 1 for tx threshold because there * is no fifo empty flag and we need one to implement a bug * workaround. */ WR4(sc, ZY7_QSPI_TX_THRESH_REG, 1); WR4(sc, ZY7_QSPI_RX_THRESH_REG, 1); /* Clear and disable all interrupts. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_QSPI_INTR_DIS_REG, ~0); /* Enable SPI. */ WR4(sc, ZY7_QSPI_EN_REG, ZY7_SPI_ENABLE); return (0); } static void zy7_qspi_add_sysctls(device_t dev) { struct zy7_qspi_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "spi_clk_real_freq", CTLFLAG_RD, &sc->spi_clk_real_freq, 0, "SPI clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overflows", CTLFLAG_RD, &sc->rx_overflows, 0, "RX FIFO overflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_underflows", CTLFLAG_RD, &sc->tx_underflows, 0, "TX FIFO underflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stray_ints", CTLFLAG_RD, &sc->stray_ints, 0, "stray interrupts"); } static int zy7_qspi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Zynq Quad-SPI Flash Controller"); return (BUS_PROBE_DEFAULT); } static int zy7_qspi_attach(device_t dev) { struct zy7_qspi_softc *sc; int rid, err; phandle_t node; pcell_t cell; sc = device_get_softc(dev); sc->dev = dev; QSPI_SC_LOCK_INIT(sc); /* Get ref-clock, spi-clock, and other properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock = fdt32_to_cpu(cell); else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "spi-clock", &cell, sizeof(cell)) > 0) sc->spi_clock = fdt32_to_cpu(cell); else sc->spi_clock = ZY7_QSPI_DEFAULT_SPI_CLOCK; if (OF_getprop(node, "is-stacked", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) { sc->is_dual = 1; sc->is_stacked = 1; } else if (OF_getprop(node, "is-dual", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) sc->is_dual = 1; if (OF_getprop(node, "is-dio", &cell, sizeof(cell)) > 0 && fdt32_to_cpu(cell) != 0) sc->is_dio = 1; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); zy7_qspi_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); zy7_qspi_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, zy7_qspi_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); zy7_qspi_detach(dev); return (err); } /* Configure the device. */ err = zy7_qspi_init_hw(sc); if (err) { zy7_qspi_detach(dev); return (err); } sc->child = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); zy7_qspi_add_sysctls(dev); /* Attach spibus driver as a child later when interrupts work. */ bus_delayed_attach_children(dev); return (0); } static int zy7_qspi_detach(device_t dev) { struct zy7_qspi_softc *sc = device_get_softc(dev); + int error; - if (device_is_attached(dev)) - bus_generic_detach(dev); + error = bus_generic_detach(dev); + if (error != 0) + return (error); /* Disable hardware. */ if (sc->mem_res != NULL) { /* Disable SPI. */ WR4(sc, ZY7_QSPI_EN_REG, 0); /* Clear and disable all interrupts. */ WR4(sc, ZY7_QSPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_QSPI_INTR_DIS_REG, ~0); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); QSPI_SC_LOCK_DESTROY(sc); return (0); } static phandle_t zy7_qspi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static int zy7_qspi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct zy7_qspi_softc *sc = device_get_softc(dev); int err = 0; KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); if (sc->is_dual && cmd->tx_data_sz % 2 != 0) { device_printf(dev, "driver does not support odd byte data " "transfers in dual mode. (sz=%d)\n", cmd->tx_data_sz); return (EINVAL); } QSPI_SC_LOCK(sc); /* Wait for controller available. */ while (sc->busy != 0) { err = mtx_sleep(dev, &sc->sc_mtx, 0, "zqspi0", 0); if (err) { QSPI_SC_UNLOCK(sc); return (err); } } /* Start transfer. */ sc->busy = 1; sc->cmd = cmd; sc->tx_bytes = sc->cmd->tx_cmd_sz + sc->cmd->tx_data_sz; sc->tx_bytes_sent = 0; sc->rx_bytes = sc->cmd->rx_cmd_sz + sc->cmd->rx_data_sz; sc->rx_bytes_rcvd = 0; /* Enable interrupts. zy7_qspi_intr() will handle transfer. */ WR4(sc, ZY7_QSPI_INTR_EN_REG, ZY7_QSPI_INTR_TX_FIFO_NOT_FULL | ZY7_QSPI_INTR_RX_OVERFLOW); #ifdef SPI_XFER_U_PAGE /* XXX: future support for stacked memories. */ if (sc->is_stacked) { if ((cmd->flags & SPI_XFER_U_PAGE) != 0) sc->lqspi_cfg_shadow |= ZY7_QSPI_LQSPI_CFG_U_PAGE; else sc->lqspi_cfg_shadow &= ~ZY7_QSPI_LQSPI_CFG_U_PAGE; WR4(sc, ZY7_QSPI_LQSPI_CFG_REG, sc->lqspi_cfg_shadow); } #endif /* Assert CS. */ sc->cfg_reg_shadow &= ~ZY7_QSPI_CONFIG_PCS; WR4(sc, ZY7_QSPI_CONFIG_REG, sc->cfg_reg_shadow); /* Wait for completion. */ err = mtx_sleep(dev, &sc->sc_mtx, 0, "zqspi1", hz * 2); if (err) zy7_qspi_abort_transfer(sc); /* Release controller. */ sc->busy = 0; wakeup_one(dev); QSPI_SC_UNLOCK(sc); return (err); } static device_method_t zy7_qspi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zy7_qspi_probe), DEVMETHOD(device_attach, zy7_qspi_attach), DEVMETHOD(device_detach, zy7_qspi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, zy7_qspi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, zy7_qspi_get_node), DEVMETHOD_END }; static driver_t zy7_qspi_driver = { "zy7_qspi", zy7_qspi_methods, sizeof(struct zy7_qspi_softc), }; DRIVER_MODULE(zy7_qspi, simplebus, zy7_qspi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, zy7_qspi, ofw_spibus_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); MODULE_DEPEND(zy7_qspi, ofw_spibus, 1, 1, 1); diff --git a/sys/arm/xilinx/zy7_slcr.c b/sys/arm/xilinx/zy7_slcr.c index 755b5d4806ed..a46ccb59a07b 100644 --- a/sys/arm/xilinx/zy7_slcr.c +++ b/sys/arm/xilinx/zy7_slcr.c @@ -1,708 +1,711 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Zynq-700 SLCR driver. Provides hooks for cpu_reset and PL control stuff. * In the future, maybe MIO control, clock control, etc. could go here. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.4) November 16, 2012. Xilinx doc UG585. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct zy7_slcr_softc { device_t dev; struct mtx sc_mtx; struct resource *mem_res; }; static struct zy7_slcr_softc *zy7_slcr_softc_p; extern void (*zynq7_cpu_reset); #define ZSLCR_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define ZSLCR_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define ZSLCR_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ "zy7_slcr", MTX_DEF) #define ZSLCR_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) #define ZYNQ_DEFAULT_PS_CLK_FREQUENCY 33333333 /* 33.3 Mhz */ SYSCTL_NODE(_hw, OID_AUTO, zynq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Xilinx Zynq-7000"); static char zynq_bootmode[64]; SYSCTL_STRING(_hw_zynq, OID_AUTO, bootmode, CTLFLAG_RD, zynq_bootmode, 0, "Zynq boot mode"); static char zynq_pssid[100]; SYSCTL_STRING(_hw_zynq, OID_AUTO, pssid, CTLFLAG_RD, zynq_pssid, 0, "Zynq PSS IDCODE"); static uint32_t zynq_reboot_status; SYSCTL_INT(_hw_zynq, OID_AUTO, reboot_status, CTLFLAG_RD, &zynq_reboot_status, 0, "Zynq REBOOT_STATUS register"); static int ps_clk_frequency; SYSCTL_INT(_hw_zynq, OID_AUTO, ps_clk_frequency, CTLFLAG_RD, &ps_clk_frequency, 0, "Zynq PS_CLK Frequency"); static int io_pll_frequency; SYSCTL_INT(_hw_zynq, OID_AUTO, io_pll_frequency, CTLFLAG_RD, &io_pll_frequency, 0, "Zynq IO PLL Frequency"); static int arm_pll_frequency; SYSCTL_INT(_hw_zynq, OID_AUTO, arm_pll_frequency, CTLFLAG_RD, &arm_pll_frequency, 0, "Zynq ARM PLL Frequency"); static int ddr_pll_frequency; SYSCTL_INT(_hw_zynq, OID_AUTO, ddr_pll_frequency, CTLFLAG_RD, &ddr_pll_frequency, 0, "Zynq DDR PLL Frequency"); static void zy7_slcr_unlock(struct zy7_slcr_softc *sc) { /* Unlock SLCR with magic number. */ WR4(sc, ZY7_SLCR_UNLOCK, ZY7_SLCR_UNLOCK_MAGIC); } static void zy7_slcr_lock(struct zy7_slcr_softc *sc) { /* Lock SLCR with magic number. */ WR4(sc, ZY7_SLCR_LOCK, ZY7_SLCR_LOCK_MAGIC); } static void zy7_slcr_cpu_reset(void) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); /* This has something to do with a work-around so the fsbl will load * the bitstream after soft-reboot. It's very important. */ WR4(sc, ZY7_SLCR_REBOOT_STAT, RD4(sc, ZY7_SLCR_REBOOT_STAT) & 0xf0ffffff); /* Soft reset */ WR4(sc, ZY7_SLCR_PSS_RST_CTRL, ZY7_SLCR_PSS_RST_CTRL_SOFT_RESET); for (;;) ; } /* Assert PL resets and disable level shifters in preparation of programming * the PL (FPGA) section. Called from zy7_devcfg.c. */ void zy7_slcr_preload_pl(void) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return; ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); /* Assert top level output resets. */ WR4(sc, ZY7_SLCR_FPGA_RST_CTRL, ZY7_SLCR_FPGA_RST_CTRL_RST_ALL); /* Disable all level shifters. */ WR4(sc, ZY7_SLCR_LVL_SHFTR_EN, 0); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); } /* After PL configuration, enable level shifters and deassert top-level * PL resets. Called from zy7_devcfg.c. Optionally, the level shifters * can be left disabled but that's rare of an FPGA application. That option * is controlled by a sysctl in the devcfg driver. */ void zy7_slcr_postload_pl(int en_level_shifters) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return; ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); if (en_level_shifters) /* Enable level shifters. */ WR4(sc, ZY7_SLCR_LVL_SHFTR_EN, ZY7_SLCR_LVL_SHFTR_EN_ALL); /* Deassert top level output resets. */ WR4(sc, ZY7_SLCR_FPGA_RST_CTRL, 0); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); } /* Override cgem_set_refclk() in gigabit ethernet driver * (sys/dev/cadence/if_cgem.c). This function is called to * request a change in the gem's reference clock speed. */ int cgem_set_ref_clk(int unit, int frequency) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; int div0, div1; if (!sc) return (-1); /* Find suitable divisor pairs. Round result to nearest khz * to test for match. */ for (div1 = 1; div1 <= ZY7_SLCR_GEM_CLK_CTRL_DIVISOR1_MAX; div1++) { div0 = (io_pll_frequency + div1 * frequency / 2) / div1 / frequency; if (div0 > 0 && div0 <= ZY7_SLCR_GEM_CLK_CTRL_DIVISOR_MAX && ((io_pll_frequency / div0 / div1) + 500) / 1000 == (frequency + 500) / 1000) break; } if (div1 > ZY7_SLCR_GEM_CLK_CTRL_DIVISOR1_MAX) return (-1); ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); /* Modify GEM reference clock. */ WR4(sc, unit ? ZY7_SLCR_GEM1_CLK_CTRL : ZY7_SLCR_GEM0_CLK_CTRL, (div1 << ZY7_SLCR_GEM_CLK_CTRL_DIVISOR1_SHIFT) | (div0 << ZY7_SLCR_GEM_CLK_CTRL_DIVISOR_SHIFT) | ZY7_SLCR_GEM_CLK_CTRL_SRCSEL_IO_PLL | ZY7_SLCR_GEM_CLK_CTRL_CLKACT); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); return (0); } /* * PL clocks management function */ int zy7_pl_fclk_set_source(int unit, int source) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; uint32_t reg; if (!sc) return (-1); ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); /* Modify FPGAx source. */ reg = RD4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit)); reg &= ~(ZY7_SLCR_FPGA_CLK_CTRL_SRCSEL_MASK); reg |= (source << ZY7_SLCR_FPGA_CLK_CTRL_SRCSEL_SHIFT); WR4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit), reg); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); return (0); } int zy7_pl_fclk_get_source(int unit) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; uint32_t reg; int source; if (!sc) return (-1); ZSLCR_LOCK(sc); /* Modify GEM reference clock. */ reg = RD4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit)); source = (reg & ZY7_SLCR_FPGA_CLK_CTRL_SRCSEL_MASK) >> ZY7_SLCR_FPGA_CLK_CTRL_SRCSEL_SHIFT; /* ZY7_PL_FCLK_SRC_IO is actually b0x */ if ((source & 2) == 0) source = ZY7_PL_FCLK_SRC_IO; ZSLCR_UNLOCK(sc); return (source); } int zy7_pl_fclk_set_freq(int unit, int frequency) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; int div0, div1; int base_frequency; uint32_t reg; int source; if (!sc) return (-1); source = zy7_pl_fclk_get_source(unit); switch (source) { case ZY7_PL_FCLK_SRC_IO: base_frequency = io_pll_frequency; break; case ZY7_PL_FCLK_SRC_ARM: base_frequency = arm_pll_frequency; break; case ZY7_PL_FCLK_SRC_DDR: base_frequency = ddr_pll_frequency; break; default: return (-1); } /* Find suitable divisor pairs. Round result to nearest khz * to test for match. */ for (div1 = 1; div1 <= ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR_MAX; div1++) { div0 = (base_frequency + div1 * frequency / 2) / div1 / frequency; if (div0 > 0 && div0 <= ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR_MAX && ((base_frequency / div0 / div1) + 500) / 1000 == (frequency + 500) / 1000) break; } if (div1 > ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR_MAX) return (-1); ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); /* Modify FPGAx reference clock. */ reg = RD4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit)); reg &= ~(ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR1_MASK | ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR0_MASK); reg |= (div1 << ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR1_SHIFT) | (div0 << ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR0_SHIFT); WR4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit), reg); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); return (base_frequency / div0 / div1); } int zy7_pl_fclk_get_freq(int unit) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; int div0, div1; int base_frequency; int frequency; uint32_t reg; int source; if (!sc) return (-1); source = zy7_pl_fclk_get_source(unit); switch (source) { case ZY7_PL_FCLK_SRC_IO: base_frequency = io_pll_frequency; break; case ZY7_PL_FCLK_SRC_ARM: base_frequency = arm_pll_frequency; break; case ZY7_PL_FCLK_SRC_DDR: base_frequency = ddr_pll_frequency; break; default: return (-1); } ZSLCR_LOCK(sc); /* Modify FPGAx reference clock. */ reg = RD4(sc, ZY7_SLCR_FPGA_CLK_CTRL(unit)); div1 = (reg & ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR1_MASK) >> ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR1_SHIFT; div0 = (reg & ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR0_MASK) >> ZY7_SLCR_FPGA_CLK_CTRL_DIVISOR0_SHIFT; ZSLCR_UNLOCK(sc); if (div0 == 0) div0 = 1; if (div1 == 0) div1 = 1; frequency = (base_frequency / div0 / div1); /* Round to KHz */ frequency = (frequency + 500) / 1000; frequency = frequency * 1000; return (frequency); } int zy7_pl_fclk_enable(int unit) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return (-1); ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); WR4(sc, ZY7_SLCR_FPGA_THR_CTRL(unit), 0); WR4(sc, ZY7_SLCR_FPGA_THR_CNT(unit), 0); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); return (0); } int zy7_pl_fclk_disable(int unit) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return (-1); ZSLCR_LOCK(sc); /* Unlock SLCR registers. */ zy7_slcr_unlock(sc); WR4(sc, ZY7_SLCR_FPGA_THR_CTRL(unit), 0); WR4(sc, ZY7_SLCR_FPGA_THR_CNT(unit), 1); /* Lock SLCR registers. */ zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); return (0); } int zy7_pl_fclk_enabled(int unit) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; uint32_t reg; if (!sc) return (-1); ZSLCR_LOCK(sc); reg = RD4(sc, ZY7_SLCR_FPGA_THR_CNT(unit)); ZSLCR_UNLOCK(sc); return !(reg & 1); } int zy7_pl_level_shifters_enabled(void) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; uint32_t reg; if (!sc) return (-1); ZSLCR_LOCK(sc); reg = RD4(sc, ZY7_SLCR_LVL_SHFTR_EN); ZSLCR_UNLOCK(sc); return (reg == ZY7_SLCR_LVL_SHFTR_EN_ALL); } void zy7_pl_level_shifters_enable(void) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return; ZSLCR_LOCK(sc); zy7_slcr_unlock(sc); WR4(sc, ZY7_SLCR_LVL_SHFTR_EN, ZY7_SLCR_LVL_SHFTR_EN_ALL); zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); } void zy7_pl_level_shifters_disable(void) { struct zy7_slcr_softc *sc = zy7_slcr_softc_p; if (!sc) return; ZSLCR_LOCK(sc); zy7_slcr_unlock(sc); WR4(sc, ZY7_SLCR_LVL_SHFTR_EN, 0); zy7_slcr_lock(sc); ZSLCR_UNLOCK(sc); } static int zy7_slcr_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "xlnx,zy7_slcr")) return (ENXIO); device_set_desc(dev, "Zynq-7000 slcr block"); return (0); } static int zy7_slcr_attach(device_t dev) { struct zy7_slcr_softc *sc = device_get_softc(dev); int rid; phandle_t node; pcell_t cell; uint32_t bootmode; uint32_t pss_idcode; uint32_t arm_pll_ctrl; uint32_t ddr_pll_ctrl; uint32_t io_pll_ctrl; static char *bootdev_names[] = { "JTAG", "Quad-SPI", "NOR", "(3?)", "NAND", "SD Card", "(6?)", "(7?)" }; /* Allow only one attach. */ if (zy7_slcr_softc_p != NULL) return (ENXIO); sc->dev = dev; ZSLCR_LOCK_INIT(sc); /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); return (ENOMEM); } /* Hook up cpu_reset. */ zy7_slcr_softc_p = sc; zynq7_cpu_reset = zy7_slcr_cpu_reset; /* Read info and set sysctls. */ bootmode = RD4(sc, ZY7_SLCR_BOOT_MODE); snprintf(zynq_bootmode, sizeof(zynq_bootmode), "0x%x: boot device: %s", bootmode, bootdev_names[bootmode & ZY7_SLCR_BOOT_MODE_BOOTDEV_MASK]); pss_idcode = RD4(sc, ZY7_SLCR_PSS_IDCODE); snprintf(zynq_pssid, sizeof(zynq_pssid), "0x%x: manufacturer: 0x%x device: 0x%x " "family: 0x%x sub-family: 0x%x rev: 0x%x", pss_idcode, (pss_idcode & ZY7_SLCR_PSS_IDCODE_MNFR_ID_MASK) >> ZY7_SLCR_PSS_IDCODE_MNFR_ID_SHIFT, (pss_idcode & ZY7_SLCR_PSS_IDCODE_DEVICE_MASK) >> ZY7_SLCR_PSS_IDCODE_DEVICE_SHIFT, (pss_idcode & ZY7_SLCR_PSS_IDCODE_FAMILY_MASK) >> ZY7_SLCR_PSS_IDCODE_FAMILY_SHIFT, (pss_idcode & ZY7_SLCR_PSS_IDCODE_SUB_FAMILY_MASK) >> ZY7_SLCR_PSS_IDCODE_SUB_FAMILY_SHIFT, (pss_idcode & ZY7_SLCR_PSS_IDCODE_REVISION_MASK) >> ZY7_SLCR_PSS_IDCODE_REVISION_SHIFT); zynq_reboot_status = RD4(sc, ZY7_SLCR_REBOOT_STAT); /* Derive PLL frequencies from PS_CLK. */ node = ofw_bus_get_node(dev); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) > 0) ps_clk_frequency = cell; else ps_clk_frequency = ZYNQ_DEFAULT_PS_CLK_FREQUENCY; arm_pll_ctrl = RD4(sc, ZY7_SLCR_ARM_PLL_CTRL); ddr_pll_ctrl = RD4(sc, ZY7_SLCR_DDR_PLL_CTRL); io_pll_ctrl = RD4(sc, ZY7_SLCR_IO_PLL_CTRL); /* Determine ARM PLL frequency. */ if (((arm_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) == 0 && (arm_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_FORCE) != 0) || ((arm_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) != 0 && (bootmode & ZY7_SLCR_BOOT_MODE_PLL_BYPASS) != 0)) /* PLL is bypassed. */ arm_pll_frequency = ps_clk_frequency; else arm_pll_frequency = ps_clk_frequency * ((arm_pll_ctrl & ZY7_SLCR_PLL_CTRL_FDIV_MASK) >> ZY7_SLCR_PLL_CTRL_FDIV_SHIFT); /* Determine DDR PLL frequency. */ if (((ddr_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) == 0 && (ddr_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_FORCE) != 0) || ((ddr_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) != 0 && (bootmode & ZY7_SLCR_BOOT_MODE_PLL_BYPASS) != 0)) /* PLL is bypassed. */ ddr_pll_frequency = ps_clk_frequency; else ddr_pll_frequency = ps_clk_frequency * ((ddr_pll_ctrl & ZY7_SLCR_PLL_CTRL_FDIV_MASK) >> ZY7_SLCR_PLL_CTRL_FDIV_SHIFT); /* Determine IO PLL frequency. */ if (((io_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) == 0 && (io_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_FORCE) != 0) || ((io_pll_ctrl & ZY7_SLCR_PLL_CTRL_BYPASS_QUAL) != 0 && (bootmode & ZY7_SLCR_BOOT_MODE_PLL_BYPASS) != 0)) /* PLL is bypassed. */ io_pll_frequency = ps_clk_frequency; else io_pll_frequency = ps_clk_frequency * ((io_pll_ctrl & ZY7_SLCR_PLL_CTRL_FDIV_MASK) >> ZY7_SLCR_PLL_CTRL_FDIV_SHIFT); /* Lock SLCR registers. */ zy7_slcr_lock(sc); return (0); } static int zy7_slcr_detach(device_t dev) { struct zy7_slcr_softc *sc = device_get_softc(dev); + int error; - bus_generic_detach(dev); + error = bus_generic_detach(dev); + if (error != 0) + return (error); /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); zy7_slcr_softc_p = NULL; zynq7_cpu_reset = NULL; ZSLCR_LOCK_DESTROY(sc); return (0); } static device_method_t zy7_slcr_methods[] = { /* device_if */ DEVMETHOD(device_probe, zy7_slcr_probe), DEVMETHOD(device_attach, zy7_slcr_attach), DEVMETHOD(device_detach, zy7_slcr_detach), DEVMETHOD_END }; static driver_t zy7_slcr_driver = { "zy7_slcr", zy7_slcr_methods, sizeof(struct zy7_slcr_softc), }; DRIVER_MODULE(zy7_slcr, simplebus, zy7_slcr_driver, 0, 0); MODULE_VERSION(zy7_slcr, 1); diff --git a/sys/arm/xilinx/zy7_spi.c b/sys/arm/xilinx/zy7_spi.c index d032deabf8b7..bb5bb7f46767 100644 --- a/sys/arm/xilinx/zy7_spi.c +++ b/sys/arm/xilinx/zy7_spi.c @@ -1,586 +1,588 @@ /*- * Copyright (c) 2018 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static struct ofw_compat_data compat_data[] = { {"xlnx,zy7_spi", 1}, {"xlnx,zynq-spi-1.0", 1}, {"cdns,spi-r1p6", 1}, {NULL, 0} }; struct zy7_spi_softc { device_t dev; device_t child; struct mtx sc_mtx; struct resource *mem_res; struct resource *irq_res; void *intrhandle; uint32_t cfg_reg_shadow; uint32_t spi_clock; uint32_t ref_clock; unsigned int spi_clk_real_freq; unsigned int rx_overflows; unsigned int tx_underflows; unsigned int interrupts; unsigned int stray_ints; struct spi_command *cmd; int tx_bytes; /* tx_cmd_sz + tx_data_sz */ int tx_bytes_sent; int rx_bytes; /* rx_cmd_sz + rx_data_sz */ int rx_bytes_rcvd; int busy; }; #define ZY7_SPI_DEFAULT_SPI_CLOCK 50000000 #define SPI_SC_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define SPI_SC_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define SPI_SC_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), NULL, MTX_DEF) #define SPI_SC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) #define SPI_SC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) /* * SPI device registers. * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.12.1) December 6, 2017. Xilinx doc UG585. */ #define ZY7_SPI_CONFIG_REG 0x0000 #define ZY7_SPI_CONFIG_MODEFAIL_GEN_EN (1 << 17) #define ZY7_SPI_CONFIG_MAN_STRT (1 << 16) #define ZY7_SPI_CONFIG_MAN_STRT_EN (1 << 15) #define ZY7_SPI_CONFIG_MAN_CS (1 << 14) #define ZY7_SPI_CONFIG_CS_MASK (0xf << 10) #define ZY7_SPI_CONFIG_CS(x) ((0xf ^ (1 << (x))) << 10) #define ZY7_SPI_CONFIG_PERI_SEL (1 << 9) #define ZY7_SPI_CONFIG_REF_CLK (1 << 8) #define ZY7_SPI_CONFIG_BAUD_RATE_DIV_MASK (7 << 3) #define ZY7_SPI_CONFIG_BAUD_RATE_DIV_SHIFT 3 #define ZY7_SPI_CONFIG_BAUD_RATE_DIV(x) ((x) << 3) /* divide by 2< 0) { if (sc->tx_bytes_sent < sc->cmd->tx_cmd_sz) /* Writing command. */ byte = *((uint8_t *)sc->cmd->tx_cmd + sc->tx_bytes_sent); else /* Writing data. */ byte = *((uint8_t *)sc->cmd->tx_data + (sc->tx_bytes_sent - sc->cmd->tx_cmd_sz)); WR4(sc, ZY7_SPI_TX_DATA_REG, (uint32_t)byte); sc->tx_bytes_sent++; nbytes--; } } /* Read hardware fifo data into command response and data buffers. */ static void zy7_spi_read_fifo(struct zy7_spi_softc *sc) { uint8_t byte; do { byte = RD4(sc, ZY7_SPI_RX_DATA_REG) & 0xff; if (sc->rx_bytes_rcvd < sc->cmd->rx_cmd_sz) /* Reading command. */ *((uint8_t *)sc->cmd->rx_cmd + sc->rx_bytes_rcvd) = byte; else /* Reading data. */ *((uint8_t *)sc->cmd->rx_data + (sc->rx_bytes_rcvd - sc->cmd->rx_cmd_sz)) = byte; sc->rx_bytes_rcvd++; } while (sc->rx_bytes_rcvd < sc->rx_bytes && (RD4(sc, ZY7_SPI_INTR_STAT_REG) & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0); } /* End a transfer early by draining rx fifo and disabling interrupts. */ static void zy7_spi_abort_transfer(struct zy7_spi_softc *sc) { /* Drain receive fifo. */ while ((RD4(sc, ZY7_SPI_INTR_STAT_REG) & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0) (void)RD4(sc, ZY7_SPI_RX_DATA_REG); /* Shut down interrupts. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_RX_OVERFLOW | ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_SPI_INTR_TX_FIFO_NOT_FULL); } static void zy7_spi_intr(void *arg) { struct zy7_spi_softc *sc = (struct zy7_spi_softc *)arg; uint32_t istatus; SPI_SC_LOCK(sc); sc->interrupts++; istatus = RD4(sc, ZY7_SPI_INTR_STAT_REG); /* Stray interrupts can happen if a transfer gets interrupted. */ if (!sc->busy) { sc->stray_ints++; SPI_SC_UNLOCK(sc); return; } if ((istatus & ZY7_SPI_INTR_RX_OVERFLOW) != 0) { device_printf(sc->dev, "rx fifo overflow!\n"); sc->rx_overflows++; /* Clear status bit. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ZY7_SPI_INTR_RX_OVERFLOW); } /* Empty receive fifo before any more transmit data is sent. */ if (sc->rx_bytes_rcvd < sc->rx_bytes && (istatus & ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY) != 0) { zy7_spi_read_fifo(sc); if (sc->rx_bytes_rcvd == sc->rx_bytes) /* Disable receive interrupts. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY | ZY7_SPI_INTR_RX_OVERFLOW); } /* Count tx underflows. They probably shouldn't happen. */ if ((istatus & ZY7_SPI_INTR_TX_FIFO_UNDERFLOW) != 0) { sc->tx_underflows++; /* Clear status bit. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ZY7_SPI_INTR_TX_FIFO_UNDERFLOW); } /* Fill transmit fifo. */ if (sc->tx_bytes_sent < sc->tx_bytes && (istatus & ZY7_SPI_INTR_TX_FIFO_NOT_FULL) != 0) { zy7_spi_write_fifo(sc, MIN(96, sc->tx_bytes - sc->tx_bytes_sent)); if (sc->tx_bytes_sent == sc->tx_bytes) { /* Disable transmit FIFO interrupt, enable receive * FIFO interrupt. */ WR4(sc, ZY7_SPI_INTR_DIS_REG, ZY7_SPI_INTR_TX_FIFO_NOT_FULL); WR4(sc, ZY7_SPI_INTR_EN_REG, ZY7_SPI_INTR_RX_FIFO_NOT_EMPTY); } } /* Finished with transfer? */ if (sc->tx_bytes_sent == sc->tx_bytes && sc->rx_bytes_rcvd == sc->rx_bytes) { /* De-assert CS. */ sc->cfg_reg_shadow &= ~(ZY7_SPI_CONFIG_CLK_PH | ZY7_SPI_CONFIG_CLK_POL); sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CS_MASK; WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); wakeup(sc->dev); } SPI_SC_UNLOCK(sc); } /* Initialize hardware. */ static int zy7_spi_init_hw(struct zy7_spi_softc *sc) { uint32_t baud_div; /* Find best clock divider. Divide by 2 not supported. */ baud_div = 1; while ((sc->ref_clock >> (baud_div + 1)) > sc->spi_clock && baud_div < 8) baud_div++; if (baud_div >= 8) { device_printf(sc->dev, "cannot configure clock divider: ref=%d" " spi=%d.\n", sc->ref_clock, sc->spi_clock); return (EINVAL); } sc->spi_clk_real_freq = sc->ref_clock >> (baud_div + 1); /* Set up configuration register. */ sc->cfg_reg_shadow = ZY7_SPI_CONFIG_MAN_CS | ZY7_SPI_CONFIG_CS_MASK | ZY7_SPI_CONFIG_BAUD_RATE_DIV(baud_div) | ZY7_SPI_CONFIG_MODE_SEL; WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); /* Set thresholds. */ WR4(sc, ZY7_SPI_TX_THRESH_REG, 32); WR4(sc, ZY7_SPI_RX_THRESH_REG, 1); /* Clear and disable all interrupts. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_SPI_INTR_DIS_REG, ~0); /* Enable SPI. */ WR4(sc, ZY7_SPI_EN_REG, ZY7_SPI_ENABLE); return (0); } static void zy7_spi_add_sysctls(device_t dev) { struct zy7_spi_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "spi_clk_real_freq", CTLFLAG_RD, &sc->spi_clk_real_freq, 0, "SPI clock real frequency"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overflows", CTLFLAG_RD, &sc->rx_overflows, 0, "RX FIFO overflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_underflows", CTLFLAG_RD, &sc->tx_underflows, 0, "TX FIFO underflow events"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, 0, "interrupt calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "stray_ints", CTLFLAG_RD, &sc->stray_ints, 0, "stray interrupts"); } static int zy7_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Zynq SPI Controller"); return (BUS_PROBE_DEFAULT); } static int zy7_spi_detach(device_t); static int zy7_spi_attach(device_t dev) { struct zy7_spi_softc *sc; int rid, err; phandle_t node; pcell_t cell; sc = device_get_softc(dev); sc->dev = dev; SPI_SC_LOCK_INIT(sc); /* Get ref-clock and spi-clock properties. */ node = ofw_bus_get_node(dev); if (OF_getprop(node, "ref-clock", &cell, sizeof(cell)) > 0) sc->ref_clock = fdt32_to_cpu(cell); else { device_printf(dev, "must have ref-clock property\n"); return (ENXIO); } if (OF_getprop(node, "spi-clock", &cell, sizeof(cell)) > 0) sc->spi_clock = fdt32_to_cpu(cell); else sc->spi_clock = ZY7_SPI_DEFAULT_SPI_CLOCK; /* Get memory resource. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resources.\n"); zy7_spi_detach(dev); return (ENOMEM); } /* Allocate IRQ. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate IRQ resource.\n"); zy7_spi_detach(dev); return (ENOMEM); } /* Activate the interrupt. */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, zy7_spi_intr, sc, &sc->intrhandle); if (err) { device_printf(dev, "could not setup IRQ.\n"); zy7_spi_detach(dev); return (err); } /* Configure the device. */ err = zy7_spi_init_hw(sc); if (err) { zy7_spi_detach(dev); return (err); } sc->child = device_add_child(dev, "spibus", DEVICE_UNIT_ANY); zy7_spi_add_sysctls(dev); /* Attach spibus driver as a child later when interrupts work. */ bus_delayed_attach_children(dev); return (0); } static int zy7_spi_detach(device_t dev) { struct zy7_spi_softc *sc = device_get_softc(dev); + int error; - if (device_is_attached(dev)) - bus_generic_detach(dev); + error = bus_generic_detach(dev); + if (error != 0) + return (error); /* Disable hardware. */ if (sc->mem_res != NULL) { /* Disable SPI. */ WR4(sc, ZY7_SPI_EN_REG, 0); /* Clear and disable all interrupts. */ WR4(sc, ZY7_SPI_INTR_STAT_REG, ~0); WR4(sc, ZY7_SPI_INTR_DIS_REG, ~0); } /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); SPI_SC_LOCK_DESTROY(sc); return (0); } static phandle_t zy7_spi_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } static int zy7_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct zy7_spi_softc *sc = device_get_softc(dev); uint32_t cs; uint32_t mode; int err = 0; KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); /* Get chip select and mode for this child. */ spibus_get_cs(child, &cs); cs &= ~SPIBUS_CS_HIGH; if (cs > 2) { device_printf(dev, "Invalid chip select %d requested by %s", cs, device_get_nameunit(child)); return (EINVAL); } spibus_get_mode(child, &mode); SPI_SC_LOCK(sc); /* Wait for controller available. */ while (sc->busy != 0) { err = mtx_sleep(dev, &sc->sc_mtx, 0, "zspi0", 0); if (err) { SPI_SC_UNLOCK(sc); return (err); } } /* Start transfer. */ sc->busy = 1; sc->cmd = cmd; sc->tx_bytes = sc->cmd->tx_cmd_sz + sc->cmd->tx_data_sz; sc->tx_bytes_sent = 0; sc->rx_bytes = sc->cmd->rx_cmd_sz + sc->cmd->rx_data_sz; sc->rx_bytes_rcvd = 0; /* Enable interrupts. zy7_spi_intr() will handle transfer. */ WR4(sc, ZY7_SPI_INTR_EN_REG, ZY7_SPI_INTR_TX_FIFO_NOT_FULL | ZY7_SPI_INTR_RX_OVERFLOW); /* Handle polarity and phase. */ if (mode == SPIBUS_MODE_CPHA || mode == SPIBUS_MODE_CPOL_CPHA) sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CLK_PH; if (mode == SPIBUS_MODE_CPOL || mode == SPIBUS_MODE_CPOL_CPHA) sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CLK_POL; /* Assert CS. */ sc->cfg_reg_shadow &= ~ZY7_SPI_CONFIG_CS_MASK; sc->cfg_reg_shadow |= ZY7_SPI_CONFIG_CS(cs); WR4(sc, ZY7_SPI_CONFIG_REG, sc->cfg_reg_shadow); /* Wait for completion. */ err = mtx_sleep(dev, &sc->sc_mtx, 0, "zspi1", hz * 2); if (err) zy7_spi_abort_transfer(sc); /* Release controller. */ sc->busy = 0; wakeup_one(dev); SPI_SC_UNLOCK(sc); return (err); } static device_method_t zy7_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zy7_spi_probe), DEVMETHOD(device_attach, zy7_spi_attach), DEVMETHOD(device_detach, zy7_spi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, zy7_spi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, zy7_spi_get_node), DEVMETHOD_END }; static driver_t zy7_spi_driver = { "zy7_spi", zy7_spi_methods, sizeof(struct zy7_spi_softc), }; DRIVER_MODULE(zy7_spi, simplebus, zy7_spi_driver, 0, 0); DRIVER_MODULE(ofw_spibus, zy7_spi, ofw_spibus_driver, 0, 0); SIMPLEBUS_PNP_INFO(compat_data); MODULE_DEPEND(zy7_spi, ofw_spibus, 1, 1, 1); diff --git a/sys/dev/etherswitch/felix/felix.c b/sys/dev/etherswitch/felix/felix.c index b6fb8cbb67c8..098767ee063e 100644 --- a/sys/dev/etherswitch/felix/felix.c +++ b/sys/dev/etherswitch/felix/felix.c @@ -1,1004 +1,1005 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "etherswitch_if.h" #include "miibus_if.h" MALLOC_DECLARE(M_FELIX); MALLOC_DEFINE(M_FELIX, "felix", "felix switch"); static device_probe_t felix_probe; static device_attach_t felix_attach; static device_detach_t felix_detach; static etherswitch_info_t* felix_getinfo(device_t); static int felix_getconf(device_t, etherswitch_conf_t *); static int felix_setconf(device_t, etherswitch_conf_t *); static void felix_lock(device_t); static void felix_unlock(device_t); static int felix_getport(device_t, etherswitch_port_t *); static int felix_setport(device_t, etherswitch_port_t *); static int felix_readreg_wrapper(device_t, int); static int felix_writereg_wrapper(device_t, int, int); static int felix_readphy(device_t, int, int); static int felix_writephy(device_t, int, int, int); static int felix_setvgroup(device_t, etherswitch_vlangroup_t *); static int felix_getvgroup(device_t, etherswitch_vlangroup_t *); static int felix_parse_port_fdt(felix_softc_t, phandle_t, int *); static int felix_setup(felix_softc_t); static void felix_setup_port(felix_softc_t, int); static void felix_tick(void *); static int felix_ifmedia_upd(if_t); static void felix_ifmedia_sts(if_t, struct ifmediareq *); static void felix_get_port_cfg(felix_softc_t, etherswitch_port_t *); static void felix_set_port_cfg(felix_softc_t, etherswitch_port_t *); static bool felix_is_phyport(felix_softc_t, int); static struct mii_data *felix_miiforport(felix_softc_t, unsigned int); static struct felix_pci_id felix_pci_ids[] = { {PCI_VENDOR_FREESCALE, FELIX_DEV_ID, FELIX_DEV_NAME}, {0, 0, NULL} }; static device_method_t felix_methods[] = { /* device interface */ DEVMETHOD(device_probe, felix_probe), DEVMETHOD(device_attach, felix_attach), DEVMETHOD(device_detach, felix_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, felix_getinfo), DEVMETHOD(etherswitch_getconf, felix_getconf), DEVMETHOD(etherswitch_setconf, felix_setconf), DEVMETHOD(etherswitch_lock, felix_lock), DEVMETHOD(etherswitch_unlock, felix_unlock), DEVMETHOD(etherswitch_getport, felix_getport), DEVMETHOD(etherswitch_setport, felix_setport), DEVMETHOD(etherswitch_readreg, felix_readreg_wrapper), DEVMETHOD(etherswitch_writereg, felix_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, felix_readphy), DEVMETHOD(etherswitch_writephyreg, felix_writephy), DEVMETHOD(etherswitch_setvgroup, felix_setvgroup), DEVMETHOD(etherswitch_getvgroup, felix_getvgroup), /* miibus interface */ DEVMETHOD(miibus_readreg, felix_readphy), DEVMETHOD(miibus_writereg, felix_writephy), DEVMETHOD_END }; DEFINE_CLASS_0(felix, felix_driver, felix_methods, sizeof(struct felix_softc)); DRIVER_MODULE_ORDERED(felix, pci, felix_driver, NULL, NULL, SI_ORDER_ANY); DRIVER_MODULE(miibus, felix, miibus_fdt_driver, NULL, NULL); DRIVER_MODULE(etherswitch, felix, etherswitch_driver, NULL, NULL); MODULE_VERSION(felix, 1); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, felix, felix_pci_ids, nitems(felix_pci_ids) - 1); static int felix_probe(device_t dev) { struct felix_pci_id *id; felix_softc_t sc; sc = device_get_softc(dev); sc->dev = dev; for (id = felix_pci_ids; id->vendor != 0; ++id) { if (pci_get_device(dev) != id->device || pci_get_vendor(dev) != id->vendor) continue; device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int felix_parse_port_fdt(felix_softc_t sc, phandle_t child, int *pport) { uint32_t port, status; phandle_t node; if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) { device_printf(sc->dev, "Port node doesn't have reg property\n"); return (ENXIO); } *pport = port; node = OF_getproplen(child, "ethernet"); if (node <= 0) sc->ports[port].cpu_port = false; else sc->ports[port].cpu_port = true; node = ofw_bus_find_child(child, "fixed-link"); if (node <= 0) { sc->ports[port].fixed_port = false; return (0); } sc->ports[port].fixed_port = true; if (OF_getencprop(node, "speed", &status, sizeof(status)) <= 0) { device_printf(sc->dev, "Port has fixed-link node without link speed specified\n"); return (ENXIO); } switch (status) { case 2500: status = IFM_2500_T; break; case 1000: status = IFM_1000_T; break; case 100: status = IFM_100_T; break; case 10: status = IFM_10_T; break; default: device_printf(sc->dev, "Unsupported link speed value of %d\n", status); return (ENXIO); } if (OF_hasprop(node, "full-duplex")) status |= IFM_FDX; else status |= IFM_HDX; status |= IFM_ETHER; sc->ports[port].fixed_link_status = status; return (0); } static int felix_init_interface(felix_softc_t sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ports[port].ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ports[port].ifp, sc); if_setflags(sc->ports[port].ifp, IFF_UP | IFF_BROADCAST | IFF_MULTICAST | IFF_DRV_RUNNING | IFF_SIMPLEX); sc->ports[port].ifname = malloc(strlen(name) + 1, M_FELIX, M_NOWAIT); if (sc->ports[port].ifname == NULL) { if_free(sc->ports[port].ifp); return (ENOMEM); } memcpy(sc->ports[port].ifname, name, strlen(name) + 1); if_initname(sc->ports[port].ifp, sc->ports[port].ifname, port); return (0); } static void felix_setup_port(felix_softc_t sc, int port) { /* Link speed has to be always set to 1000 in the clock register. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_CLK_CFG, FELIX_DEVGMII_CLK_CFG_SPEED_1000); FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_MAC_CFG, FELIX_DEVGMII_MAC_CFG_TX_ENA | FELIX_DEVGMII_MAC_CFG_RX_ENA); FELIX_WR4(sc, FELIX_QSYS_PORT_MODE(port), FELIX_QSYS_PORT_MODE_PORT_ENA); /* * Enable "VLANMTU". Each port has a configurable MTU. * Accept frames that are 8 and 4 bytes longer than it * for double and single tagged frames respectively. * Since etherswitch API doesn't provide an option to change * MTU don't touch it for now. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_VLAN_CFG, FELIX_DEVGMII_VLAN_CFG_ENA | FELIX_DEVGMII_VLAN_CFG_LEN_ENA | FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA); } static int felix_setup(felix_softc_t sc) { int timeout, i; uint32_t reg; /* Trigger soft reset, bit is self-clearing, with 5s timeout. */ FELIX_WR4(sc, FELIX_DEVCPU_GCB_RST, FELIX_DEVCPU_GCB_RST_EN); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_DEVCPU_GCB_RST); if ((reg & FELIX_DEVCPU_GCB_RST_EN) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch to reset\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_RAM_CTRL, FELIX_SYS_RAM_CTRL_INIT); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_SYS_RAM_CTRL); if ((reg & FELIX_SYS_RAM_CTRL_INIT) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch RAM init.\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_CFG, FELIX_SYS_CFG_CORE_EN); for (i = 0; i < sc->info.es_nports; i++) felix_setup_port(sc, i); return (0); } static int felix_timer_rate(SYSCTL_HANDLER_ARGS) { felix_softc_t sc; int error, value, old; sc = arg1; old = value = sc->timer_ticks; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 0) return (EINVAL); if (value == old) return (0); FELIX_LOCK(sc); sc->timer_ticks = value; callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); FELIX_UNLOCK(sc); return (0); } static int felix_attach(device_t dev) { phandle_t child, ports, node; int error, port, rid; felix_softc_t sc; uint32_t phy_addr; ssize_t size; sc = device_get_softc(dev); sc->info.es_nports = 0; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; strlcpy(sc->info.es_name, "Felix TSN Switch", sizeof(sc->info.es_name)); rid = PCIR_BAR(FELIX_BAR_MDIO); sc->mdio = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mdio == NULL) { device_printf(dev, "Failed to allocate MDIO registers.\n"); return (ENXIO); } rid = PCIR_BAR(FELIX_BAR_REGS); sc->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) { device_printf(dev, "Failed to allocate registers BAR.\n"); error = ENXIO; goto out_fail; } mtx_init(&sc->mtx, "felix lock", NULL, MTX_DEF); callout_init_mtx(&sc->tick_callout, &sc->mtx, 0); node = ofw_bus_get_node(dev); if (node <= 0) { error = ENXIO; goto out_fail; } ports = ofw_bus_find_child(node, "ports"); if (ports == 0) { device_printf(dev, "Failed to find \"ports\" property in DTS.\n"); error = ENXIO; goto out_fail; } for (child = OF_child(ports); child != 0; child = OF_peer(child)) { /* Do not parse disabled ports. */ if (ofw_bus_node_status_okay(child) == 0) continue; error = felix_parse_port_fdt(sc, child, &port); if (error != 0) goto out_fail; error = felix_init_interface(sc, port); if (error != 0) { device_printf(sc->dev, "Failed to initialize interface.\n"); goto out_fail; } if (sc->ports[port].fixed_port) { sc->info.es_nports++; continue; } size = OF_getencprop(child, "phy-handle", &node, sizeof(node)); if (size <= 0) { device_printf(sc->dev, "Failed to acquire PHY handle from FDT.\n"); error = ENXIO; goto out_fail; } node = OF_node_from_xref(node); size = OF_getencprop(node, "reg", &phy_addr, sizeof(phy_addr)); if (size <= 0) { device_printf(sc->dev, "Failed to obtain PHY address.\n"); error = ENXIO; goto out_fail; } sc->ports[port].phyaddr = phy_addr; sc->ports[port].miibus = NULL; error = mii_attach(dev, &sc->ports[port].miibus, sc->ports[port].ifp, felix_ifmedia_upd, felix_ifmedia_sts, BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, 0); if (error != 0) goto out_fail; sc->info.es_nports++; } error = felix_setup(sc); if (error != 0) goto out_fail; sc->timer_ticks = hz; /* Default to 1s. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "timer_ticks", CTLTYPE_INT | CTLFLAG_RW, sc, 0, felix_timer_rate, "I", "Number of ticks between timer invocations"); /* The tick routine has to be called with the lock held. */ FELIX_LOCK(sc); felix_tick(sc); FELIX_UNLOCK(sc); /* Allow etherswitch to attach as our child. */ bus_identify_children(dev); bus_attach_children(dev); return (0); out_fail: felix_detach(dev); return (error); } static int felix_detach(device_t dev) { felix_softc_t sc; int error; int i; - error = 0; sc = device_get_softc(dev); - bus_generic_detach(dev); + error = bus_generic_detach(dev); + if (error != 0) + return (error); mtx_lock(&sc->mtx); callout_stop(&sc->tick_callout); mtx_unlock(&sc->mtx); mtx_destroy(&sc->mtx); /* * If we have been fully attached do a soft reset. * This way after when driver is unloaded switch is left in unmanaged mode. */ if (device_is_attached(dev)) felix_setup(sc); for (i = 0; i < sc->info.es_nports; i++) { if (sc->ports[i].ifp != NULL) if_free(sc->ports[i].ifp); if (sc->ports[i].ifname != NULL) free(sc->ports[i].ifname, M_FELIX); } if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); if (sc->mdio != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->mdio), sc->mdio); return (error); } static etherswitch_info_t* felix_getinfo(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); return (&sc->info); } static int felix_getconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int felix_init_vlan(felix_softc_t sc) { int timeout = FELIX_INIT_TIMEOUT; uint32_t reg; int i; /* Flush VLAN table in hardware. */ FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_RESET); do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) == FELIX_ANA_VT_IDLE) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout during VLAN table reset.\n"); return (ETIMEDOUT); } /* Flush VLAN table in sc. */ for (i = 0; i < sc->info.es_nvlangroups; i++) sc->vlans[i] = 0; /* * Make all ports VLAN aware. * Read VID from incoming frames and use it for port grouping * purposes. * Don't set this if pvid is set. */ for (i = 0; i < sc->info.es_nports; i++) { reg = FELIX_ANA_PORT_RD4(sc, i, FELIX_ANA_PORT_VLAN_CFG); if ((reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK) != 0) continue; reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, i, FELIX_ANA_PORT_VLAN_CFG, reg); } return (0); } static int felix_setconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; int error; error = 0; /* Set the VLAN mode. */ sc = device_get_softc(dev); FELIX_LOCK(sc); if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { switch (conf->vlan_mode) { case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; sc->info.es_nvlangroups = FELIX_NUM_VLANS; error = felix_init_vlan(sc); break; default: error = EINVAL; } } FELIX_UNLOCK(sc); return (error); } static void felix_lock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); FELIX_LOCK(sc); } static void felix_unlock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_OWNED); FELIX_UNLOCK(sc); } static void felix_get_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; p->es_flags = 0; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (reg & FELIX_ANA_PORT_DROP_CFG_TAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED; if (reg & FELIX_ANA_PORT_DROP_CFG_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; reg = FELIX_DEVGMII_PORT_RD4(sc, p->es_port, FELIX_DEVGMII_VLAN_CFG); if (reg & FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA) p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG; reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (reg & FELIX_REW_PORT_TAG_CFG_ALL) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (reg & FELIX_ANA_PORT_VLAN_CFG_POP) p->es_flags |= ETHERSWITCH_PORT_STRIPTAGINGRESS; p->es_pvid = reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; } static int felix_getport(device_t dev, etherswitch_port_t *p) { struct ifmediareq *ifmr; struct mii_data *mii; felix_softc_t sc; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_get_port_cfg(sc, p); if (sc->ports[p->es_port].fixed_port) { ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; ifmr->ifm_active = sc->ports[p->es_port].fixed_link_status; ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; } else { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static void felix_set_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_TAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_TAGGED; if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_UNTAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_UNTAGGED; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG, reg); reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) reg |= FELIX_REW_PORT_TAG_CFG_ALL; else reg &= ~FELIX_REW_PORT_TAG_CFG_ALL; FELIX_REW_PORT_WR4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG, reg); reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (p->es_flags & ETHERSWITCH_PORT_STRIPTAGINGRESS) reg |= FELIX_ANA_PORT_VLAN_CFG_POP; else reg &= ~FELIX_ANA_PORT_VLAN_CFG_POP; reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_MASK; reg |= p->es_pvid & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; /* * If port VID is set use it for VLAN classification, * instead of frame VID. * By default the frame tag takes precedence. * Force the switch to ignore it. */ if (p->es_pvid != 0) reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; else reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG, reg); } static int felix_setport(device_t dev, etherswitch_port_t *p) { felix_softc_t sc; struct mii_data *mii; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_set_port_cfg(sc, p); if (felix_is_phyport(sc, p->es_port)) { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static int felix_readreg_wrapper(device_t dev, int addr_reg) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (UINT32_MAX); /* Can't return errors here. */ return (FELIX_RD4(sc, addr_reg)); } static int felix_writereg_wrapper(device_t dev, int addr_reg, int val) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (EINVAL); FELIX_WR4(sc, addr_reg, val); return (0); } static int felix_readphy(device_t dev, int phy, int reg) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_read(sc->mdio, FELIX_MDIO_BASE, phy, reg)); } static int felix_writephy(device_t dev, int phy, int reg, int data) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_write(sc->mdio, FELIX_MDIO_BASE, phy, reg, data)); } static int felix_set_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int i, vid; vid = vg->es_vid & ETHERSWITCH_VID_MASK; /* Tagged mode is not supported. */ if (vg->es_member_ports != vg->es_untagged_ports) return (EINVAL); /* * Hardware support 4096 groups, but we can't do group_id == vid. * Note that hw_group_id == vid. */ if (vid == 0) { /* Clear VLAN table entry using old VID. */ FELIX_WR4(sc, FELIX_ANA_VTIDX, sc->vlans[vg->es_vlangroup]); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_WRITE); sc->vlans[vg->es_vlangroup] = 0; return (0); } /* The VID is already used in a different group. */ for (i = 0; i < sc->info.es_nvlangroups; i++) if (i != vg->es_vlangroup && vid == sc->vlans[i]) return (EINVAL); /* This group already uses a different VID. */ if (sc->vlans[vg->es_vlangroup] != 0 && sc->vlans[vg->es_vlangroup] != vid) return (EINVAL); sc->vlans[vg->es_vlangroup] = vid; /* Assign members to the given group. */ reg = vg->es_member_ports & FELIX_ANA_VT_PORTMASK_MASK; reg <<= FELIX_ANA_VT_PORTMASK_SHIFT; reg |= FELIX_ANA_VT_WRITE; FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, reg); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); return (0); } static int felix_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_set_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static int felix_get_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int vid; vid = sc->vlans[vg->es_vlangroup]; if (vid == 0) return (0); FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_READ); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); reg >>= FELIX_ANA_VT_PORTMASK_SHIFT; reg &= FELIX_ANA_VT_PORTMASK_MASK; vg->es_untagged_ports = vg->es_member_ports = reg; vg->es_fid = 0; vg->es_vid = vid | ETHERSWITCH_VID_VALID; return (0); } static int felix_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_get_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static void felix_tick(void *arg) { struct mii_data *mii; felix_softc_t sc; int port; sc = arg; FELIX_LOCK_ASSERT(sc, MA_OWNED); for (port = 0; port < sc->info.es_nports; port++) { if (!felix_is_phyport(sc, port)) continue; mii = felix_miiforport(sc, port); MPASS(mii != NULL); mii_tick(mii); } if (sc->timer_ticks != 0) callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); } static int felix_ifmedia_upd(if_t ifp) { struct mii_data *mii; felix_softc_t sc; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void felix_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { felix_softc_t sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static bool felix_is_phyport(felix_softc_t sc, int port) { return (!sc->ports[port].fixed_port); } static struct mii_data* felix_miiforport(felix_softc_t sc, unsigned int port) { if (!felix_is_phyport(sc, port)) return (NULL); return (device_get_softc(sc->ports[port].miibus)); }