diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c index 8ed176eec768..ff3b8c6a22db 100644 --- a/sys/dev/age/if_age.c +++ b/sys/dev/age/if_age.c @@ -1,3343 +1,3343 @@ /*- * Copyright (c) 2008, Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) MODULE_DEPEND(age, pci, 1, 1, 1); MODULE_DEPEND(age, ether, 1, 1, 1); MODULE_DEPEND(age, miibus, 1, 1, 1); /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; TUNABLE_INT("hw.age.msi_disable", &msi_disable); TUNABLE_INT("hw.age.msix_disable", &msix_disable); /* * Devices supported by this driver. */ static struct age_dev { uint16_t age_vendorid; uint16_t age_deviceid; const char *age_name; } age_devs[] = { { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1, "Attansic Technology Corp, L1 Gigabit Ethernet" }, }; static int age_miibus_readreg(device_t, int, int); static int age_miibus_writereg(device_t, int, int, int); static void age_miibus_statchg(device_t); static void age_mediastatus(struct ifnet *, struct ifmediareq *); static int age_mediachange(struct ifnet *); static int age_probe(device_t); static void age_get_macaddr(struct age_softc *); static void age_phy_reset(struct age_softc *); static int age_attach(device_t); static int age_detach(device_t); static void age_sysctl_node(struct age_softc *); static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int age_check_boundary(struct age_softc *); static int age_dma_alloc(struct age_softc *); static void age_dma_free(struct age_softc *); static int age_shutdown(device_t); static void age_setwol(struct age_softc *); static int age_suspend(device_t); static int age_resume(device_t); static int age_encap(struct age_softc *, struct mbuf **); static void age_start(struct ifnet *); static void age_start_locked(struct ifnet *); static void age_watchdog(struct age_softc *); static int age_ioctl(struct ifnet *, u_long, caddr_t); static void age_mac_config(struct age_softc *); static void age_link_task(void *, int); static void age_stats_update(struct age_softc *); static int age_intr(void *); static void age_int_task(void *, int); static void age_txintr(struct age_softc *, int); static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); static int age_rxintr(struct age_softc *, int, int); static void age_tick(void *); static void age_reset(struct age_softc *); static void age_init(void *); static void age_init_locked(struct age_softc *); static void age_stop(struct age_softc *); static void age_stop_txmac(struct age_softc *); static void age_stop_rxmac(struct age_softc *); static void age_init_tx_ring(struct age_softc *); static int age_init_rx_ring(struct age_softc *); static void age_init_rr_ring(struct age_softc *); static void age_init_cmb_block(struct age_softc *); static void age_init_smb_block(struct age_softc *); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf *age_fixup_rx(struct ifnet *, struct mbuf *); #endif static int age_newbuf(struct age_softc *, struct age_rxdesc *); static void age_rxvlan(struct age_softc *); static void age_rxfilter(struct age_softc *); static int sysctl_age_stats(SYSCTL_HANDLER_ARGS); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS); static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS); static device_method_t age_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, age_probe), DEVMETHOD(device_attach, age_attach), DEVMETHOD(device_detach, age_detach), DEVMETHOD(device_shutdown, age_shutdown), DEVMETHOD(device_suspend, age_suspend), DEVMETHOD(device_resume, age_resume), /* MII interface. */ DEVMETHOD(miibus_readreg, age_miibus_readreg), DEVMETHOD(miibus_writereg, age_miibus_writereg), DEVMETHOD(miibus_statchg, age_miibus_statchg), { NULL, NULL } }; static driver_t age_driver = { "age", age_methods, sizeof(struct age_softc) }; static devclass_t age_devclass; DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0); DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0); static struct resource_spec age_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec age_irq_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec age_irq_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec age_irq_spec_msix[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; /* * Read a PHY register on the MII of the L1. */ static int age_miibus_readreg(device_t dev, int phy, int reg) { struct age_softc *sc; uint32_t v; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = AGE_PHY_TIMEOUT; i > 0; i--) { DELAY(1); v = CSR_READ_4(sc, AGE_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) { device_printf(sc->age_dev, "phy read timeout : %d\n", reg); return (0); } return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); } /* * Write a PHY register on the MII of the L1. */ static int age_miibus_writereg(device_t dev, int phy, int reg, int val) { struct age_softc *sc; uint32_t v; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = AGE_PHY_TIMEOUT; i > 0; i--) { DELAY(1); v = CSR_READ_4(sc, AGE_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) device_printf(sc->age_dev, "phy write timeout : %d\n", reg); return (0); } /* * Callback from MII layer when media changes. */ static void age_miibus_statchg(device_t dev) { struct age_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->age_link_task); } /* * Get the current interface media status. */ static void age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct age_softc *sc; struct mii_data *mii; sc = ifp->if_softc; AGE_LOCK(sc); mii = device_get_softc(sc->age_miibus); mii_pollstat(mii); ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = mii->mii_media_active; AGE_UNLOCK(sc); } /* * Set hardware to newly-selected media. */ static int age_mediachange(struct ifnet *ifp) { struct age_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; AGE_LOCK(sc); mii = device_get_softc(sc->age_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); AGE_UNLOCK(sc); return (error); } static int age_probe(device_t dev) { struct age_dev *sp; int i; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); sp = age_devs; for (i = 0; i < nitems(age_devs); i++, sp++) { if (vendor == sp->age_vendorid && devid == sp->age_deviceid) { device_set_desc(dev, sp->age_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void age_get_macaddr(struct age_softc *sc) { uint32_t ea[2], reg; int i, vpdc; reg = CSR_READ_4(sc, AGE_SPI_CTRL); if ((reg & SPI_VPD_ENB) != 0) { /* Get VPD stored in TWSI EEPROM. */ reg &= ~SPI_VPD_ENB; CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); } if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) { /* * PCI VPD capability found, let TWSI reload EEPROM. * This will set ethernet address of controller. */ CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | TWSI_CTRL_SW_LD_START); for (i = 100; i > 0; i--) { DELAY(1000); reg = CSR_READ_4(sc, AGE_TWSI_CTRL); if ((reg & TWSI_CTRL_SW_LD_START) == 0) break; } if (i == 0) device_printf(sc->age_dev, "reloading EEPROM timeout!\n"); } else { if (bootverbose) device_printf(sc->age_dev, "PCI VPD capability not found!\n"); } ea[0] = CSR_READ_4(sc, AGE_PAR0); ea[1] = CSR_READ_4(sc, AGE_PAR1); sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; } static void age_phy_reset(struct age_softc *sc) { uint16_t reg, pn; int i, linkup; /* Reset PHY. */ CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); DELAY(2000); CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); DELAY(2000); #define ATPHY_DBG_ADDR 0x1D #define ATPHY_DBG_DATA 0x1E #define ATPHY_CDTC 0x16 #define PHY_CDTC_ENB 0x0001 #define PHY_CDTC_POFF 8 #define ATPHY_CDTS 0x1C #define PHY_CDTS_STAT_OK 0x0000 #define PHY_CDTS_STAT_SHORT 0x0100 #define PHY_CDTS_STAT_OPEN 0x0200 #define PHY_CDTS_STAT_INVAL 0x0300 #define PHY_CDTS_STAT_MASK 0x0300 /* Check power saving mode. Magic from Linux. */ age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); for (linkup = 0, pn = 0; pn < 4; pn++) { age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC, (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); for (i = 200; i > 0; i--) { DELAY(1000); reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC); if ((reg & PHY_CDTC_ENB) == 0) break; } DELAY(1000); reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTS); if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { linkup++; break; } } age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); if (linkup == 0) { age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_ADDR, 0); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_DATA, 0x124E); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_ADDR, 1); reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_DATA); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_DATA, reg | 0x03); /* XXX */ DELAY(1500 * 1000); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_ADDR, 0); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_DBG_DATA, 0x024E); } #undef ATPHY_DBG_ADDR #undef ATPHY_DBG_DATA #undef ATPHY_CDTC #undef PHY_CDTC_ENB #undef PHY_CDTC_POFF #undef ATPHY_CDTS #undef PHY_CDTS_STAT_OK #undef PHY_CDTS_STAT_SHORT #undef PHY_CDTS_STAT_OPEN #undef PHY_CDTS_STAT_INVAL #undef PHY_CDTS_STAT_MASK } static int age_attach(device_t dev) { struct age_softc *sc; struct ifnet *ifp; uint16_t burst; int error, i, msic, msixc, pmc; error = 0; sc = device_get_softc(dev); sc->age_dev = dev; mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0); TASK_INIT(&sc->age_int_task, 0, age_int_task, sc); TASK_INIT(&sc->age_link_task, 0, age_link_task, sc); /* Map the device. */ pci_enable_busmaster(dev); sc->age_res_spec = age_res_spec_mem; sc->age_irq_spec = age_irq_spec_legacy; error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res); if (error != 0) { device_printf(dev, "cannot allocate memory resources.\n"); goto fail; } /* Set PHY address. */ sc->age_phyaddr = AGE_PHY_ADDR; /* Reset PHY. */ age_phy_reset(sc); /* Reset the ethernet controller. */ age_reset(sc); /* Get PCI and chip id/revision. */ sc->age_rev = pci_get_revid(dev); sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> MASTER_CHIP_REV_SHIFT; if (bootverbose) { device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev); device_printf(dev, "Chip id/revision : 0x%04x\n", sc->age_chip_rev); } /* * XXX * Unintialized hardware returns an invalid chip id/revision * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that * unplugged cable results in putting hardware into automatic * power down mode which in turn returns invalld chip revision. */ if (sc->age_chip_rev == 0xFFFF) { device_printf(dev,"invalid chip revision : 0x%04x -- " "not initialized?\n", sc->age_chip_rev); error = ENXIO; goto fail; } device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n", CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); /* Allocate IRQ resources. */ msixc = pci_msix_count(dev); msic = pci_msi_count(dev); if (bootverbose) { device_printf(dev, "MSIX count : %d\n", msixc); device_printf(dev, "MSI count : %d\n", msic); } /* Prefer MSIX over MSI. */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES && pci_alloc_msix(dev, &msixc) == 0) { if (msic == AGE_MSIX_MESSAGES) { device_printf(dev, "Using %d MSIX messages.\n", msixc); sc->age_flags |= AGE_FLAG_MSIX; sc->age_irq_spec = age_irq_spec_msix; } else pci_release_msi(dev); } if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 && msic == AGE_MSI_MESSAGES && pci_alloc_msi(dev, &msic) == 0) { if (msic == AGE_MSI_MESSAGES) { device_printf(dev, "Using %d MSI messages.\n", msic); sc->age_flags |= AGE_FLAG_MSI; sc->age_irq_spec = age_irq_spec_msi; } else pci_release_msi(dev); } } error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq); if (error != 0) { device_printf(dev, "cannot allocate IRQ resources.\n"); goto fail; } /* Get DMA parameters from PCIe device control register. */ if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { sc->age_flags |= AGE_FLAG_PCIE; burst = pci_read_config(dev, i + 0x08, 2); /* Max read request size. */ sc->age_dma_rd_burst = ((burst >> 12) & 0x07) << DMA_CFG_RD_BURST_SHIFT; /* Max payload size. */ sc->age_dma_wr_burst = ((burst >> 5) & 0x07) << DMA_CFG_WR_BURST_SHIFT; if (bootverbose) { device_printf(dev, "Read request size : %d bytes.\n", 128 << ((burst >> 12) & 0x07)); device_printf(dev, "TLP payload size : %d bytes.\n", 128 << ((burst >> 5) & 0x07)); } } else { sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; } /* Create device sysctl node. */ age_sysctl_node(sc); if ((error = age_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ age_get_macaddr(sc); ifp = sc->age_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure.\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = age_ioctl; ifp->if_start = age_start; ifp->if_init = age_init; ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO; if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { sc->age_flags |= AGE_FLAG_PMCAP; ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; } ifp->if_capenable = ifp->if_capabilities; /* Set up MII bus. */ error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange, age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->age_eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; ifp->if_capenable = ifp->if_capabilities; /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Create local taskq. */ sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->age_tq); if (sc->age_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENXIO; goto fail; } taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->age_dev)); if ((sc->age_flags & AGE_FLAG_MSIX) != 0) msic = AGE_MSIX_MESSAGES; else if ((sc->age_flags & AGE_FLAG_MSI) != 0) msic = AGE_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { error = bus_setup_intr(dev, sc->age_irq[i], INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc, &sc->age_intrhand[i]); if (error != 0) break; } if (error != 0) { device_printf(dev, "could not set up interrupt handler.\n"); taskqueue_free(sc->age_tq); sc->age_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error != 0) age_detach(dev); return (error); } static int age_detach(device_t dev) { struct age_softc *sc; struct ifnet *ifp; int i, msic; sc = device_get_softc(dev); ifp = sc->age_ifp; if (device_is_attached(dev)) { AGE_LOCK(sc); sc->age_flags |= AGE_FLAG_DETACH; age_stop(sc); AGE_UNLOCK(sc); callout_drain(&sc->age_tick_ch); taskqueue_drain(sc->age_tq, &sc->age_int_task); taskqueue_drain(taskqueue_swi, &sc->age_link_task); ether_ifdetach(ifp); } if (sc->age_tq != NULL) { taskqueue_drain(sc->age_tq, &sc->age_int_task); taskqueue_free(sc->age_tq); sc->age_tq = NULL; } if (sc->age_miibus != NULL) { device_delete_child(dev, sc->age_miibus); sc->age_miibus = NULL; } bus_generic_detach(dev); age_dma_free(sc); if (ifp != NULL) { if_free(ifp); sc->age_ifp = NULL; } if ((sc->age_flags & AGE_FLAG_MSIX) != 0) msic = AGE_MSIX_MESSAGES; else if ((sc->age_flags & AGE_FLAG_MSI) != 0) msic = AGE_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { if (sc->age_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->age_irq[i], sc->age_intrhand[i]); sc->age_intrhand[i] = NULL; } } bus_release_resources(dev, sc->age_irq_spec, sc->age_irq); if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0) pci_release_msi(dev); bus_release_resources(dev, sc->age_res_spec, sc->age_res); mtx_destroy(&sc->age_mtx); return (0); } static void age_sysctl_node(struct age_softc *sc) { int error; SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats, "I", "Statistics"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0, sysctl_hw_age_int_mod, "I", "age interrupt moderation"); /* Pull in device tunables. */ sc->age_int_mod = AGE_IM_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->age_dev), device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod); if (error == 0) { if (sc->age_int_mod < AGE_IM_TIMER_MIN || sc->age_int_mod > AGE_IM_TIMER_MAX) { device_printf(sc->age_dev, "int_mod value out of range; using default: %d\n", AGE_IM_TIMER_DEFAULT); sc->age_int_mod = AGE_IM_TIMER_DEFAULT; } } SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit, 0, sysctl_hw_age_proc_limit, "I", "max number of Rx events to process"); /* Pull in device tunables. */ sc->age_process_limit = AGE_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->age_dev), device_get_unit(sc->age_dev), "process_limit", &sc->age_process_limit); if (error == 0) { if (sc->age_process_limit < AGE_PROC_MIN || sc->age_process_limit > AGE_PROC_MAX) { device_printf(sc->age_dev, "process_limit value out of range; " "using default: %d\n", AGE_PROC_DEFAULT); sc->age_process_limit = AGE_PROC_DEFAULT; } } } struct age_dmamap_arg { bus_addr_t age_busaddr; }; static void age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct age_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct age_dmamap_arg *)arg; ctx->age_busaddr = segs[0].ds_addr; } /* * Attansic L1 controller have single register to specify high * address part of DMA blocks. So all descriptor structures and * DMA memory blocks should have the same high address of given * 4GB address space(i.e. crossing 4GB boundary is not allowed). */ static int age_check_boundary(struct age_softc *sc) { bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end; bus_addr_t cmb_block_end, smb_block_end; /* Tx/Rx descriptor queue should reside within 4GB boundary. */ tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ; rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ; rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ; cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ; smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ; if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) || (AGE_ADDR_HI(rx_ring_end) != AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) || (AGE_ADDR_HI(rr_ring_end) != AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) || (AGE_ADDR_HI(cmb_block_end) != AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) || (AGE_ADDR_HI(smb_block_end) != AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) return (EFBIG); if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) || (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) || (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) || (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end))) return (EFBIG); return (0); } static int age_dma_alloc(struct age_softc *sc) { struct age_txdesc *txd; struct age_rxdesc *rxd; bus_addr_t lowaddr; struct age_dmamap_arg ctx; int error, i; lowaddr = BUS_SPACE_MAXADDR; again: /* Create parent ring/DMA block tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->age_dev), /* parent */ 1, 0, /* alignment, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_parent_tag); if (error != 0) { device_printf(sc->age_dev, "could not create parent DMA tag.\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create( sc->age_cdata.age_parent_tag, /* parent */ AGE_TX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ AGE_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_tx_ring_tag); if (error != 0) { device_printf(sc->age_dev, "could not create Tx ring DMA tag.\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create( sc->age_cdata.age_parent_tag, /* parent */ AGE_RX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ AGE_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_rx_ring_tag); if (error != 0) { device_printf(sc->age_dev, "could not create Rx ring DMA tag.\n"); goto fail; } /* Create tag for Rx return ring. */ error = bus_dma_tag_create( sc->age_cdata.age_parent_tag, /* parent */ AGE_RR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_RR_RING_SZ, /* maxsize */ 1, /* nsegments */ AGE_RR_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_rr_ring_tag); if (error != 0) { device_printf(sc->age_dev, "could not create Rx return ring DMA tag.\n"); goto fail; } /* Create tag for coalesing message block. */ error = bus_dma_tag_create( sc->age_cdata.age_parent_tag, /* parent */ AGE_CMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_CMB_BLOCK_SZ, /* maxsize */ 1, /* nsegments */ AGE_CMB_BLOCK_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_cmb_block_tag); if (error != 0) { device_printf(sc->age_dev, "could not create CMB DMA tag.\n"); goto fail; } /* Create tag for statistics message block. */ error = bus_dma_tag_create( sc->age_cdata.age_parent_tag, /* parent */ AGE_SMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_SMB_BLOCK_SZ, /* maxsize */ 1, /* nsegments */ AGE_SMB_BLOCK_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_smb_block_tag); if (error != 0) { device_printf(sc->age_dev, "could not create SMB DMA tag.\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map. */ error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag, (void **)&sc->age_rdata.age_tx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->age_cdata.age_tx_ring_map); if (error != 0) { device_printf(sc->age_dev, "could not allocate DMA'able memory for Tx ring.\n"); goto fail; } ctx.age_busaddr = 0; error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0); if (error != 0 || ctx.age_busaddr == 0) { device_printf(sc->age_dev, "could not load DMA'able memory for Tx ring.\n"); goto fail; } sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr; /* Rx ring */ error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag, (void **)&sc->age_rdata.age_rx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->age_cdata.age_rx_ring_map); if (error != 0) { device_printf(sc->age_dev, "could not allocate DMA'able memory for Rx ring.\n"); goto fail; } ctx.age_busaddr = 0; error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag, sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0); if (error != 0 || ctx.age_busaddr == 0) { device_printf(sc->age_dev, "could not load DMA'able memory for Rx ring.\n"); goto fail; } sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr; /* Rx return ring */ error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag, (void **)&sc->age_rdata.age_rr_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->age_cdata.age_rr_ring_map); if (error != 0) { device_printf(sc->age_dev, "could not allocate DMA'able memory for Rx return ring.\n"); goto fail; } ctx.age_busaddr = 0; error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag, sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, age_dmamap_cb, &ctx, 0); if (error != 0 || ctx.age_busaddr == 0) { device_printf(sc->age_dev, "could not load DMA'able memory for Rx return ring.\n"); goto fail; } sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr; /* CMB block */ error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag, (void **)&sc->age_rdata.age_cmb_block, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->age_cdata.age_cmb_block_map); if (error != 0) { device_printf(sc->age_dev, "could not allocate DMA'able memory for CMB block.\n"); goto fail; } ctx.age_busaddr = 0; error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); if (error != 0 || ctx.age_busaddr == 0) { device_printf(sc->age_dev, "could not load DMA'able memory for CMB block.\n"); goto fail; } sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr; /* SMB block */ error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag, (void **)&sc->age_rdata.age_smb_block, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->age_cdata.age_smb_block_map); if (error != 0) { device_printf(sc->age_dev, "could not allocate DMA'able memory for SMB block.\n"); goto fail; } ctx.age_busaddr = 0; error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag, sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); if (error != 0 || ctx.age_busaddr == 0) { device_printf(sc->age_dev, "could not load DMA'able memory for SMB block.\n"); goto fail; } sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr; /* * All ring buffer and DMA blocks should have the same * high address part of 64bit DMA address space. */ if (lowaddr != BUS_SPACE_MAXADDR_32BIT && (error = age_check_boundary(sc)) != 0) { device_printf(sc->age_dev, "4GB boundary crossed, " "switching to 32bit DMA addressing mode.\n"); age_dma_free(sc); /* Limit DMA address space to 32bit and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } /* * Create Tx/Rx buffer parent tag. * L1 supports full 64bit DMA addressing in Tx/Rx buffers * so it needs separate parent DMA tag. * XXX * It seems enabling 64bit DMA causes data corruption. Limit * DMA address space to 32bit. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->age_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_buffer_tag); if (error != 0) { device_printf(sc->age_dev, "could not create parent buffer DMA tag.\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create( sc->age_cdata.age_buffer_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AGE_TSO_MAXSIZE, /* maxsize */ AGE_MAXTXSEGS, /* nsegments */ AGE_TSO_MAXSEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_tx_tag); if (error != 0) { device_printf(sc->age_dev, "could not create Tx DMA tag.\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create( sc->age_cdata.age_buffer_tag, /* parent */ AGE_RX_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->age_cdata.age_rx_tag); if (error != 0) { device_printf(sc->age_dev, "could not create Rx DMA tag.\n"); goto fail; } /* Create DMA maps for Tx buffers. */ for (i = 0; i < AGE_TX_RING_CNT; i++) { txd = &sc->age_cdata.age_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->age_dev, "could not create Tx dmamap.\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, &sc->age_cdata.age_rx_sparemap)) != 0) { device_printf(sc->age_dev, "could not create spare Rx dmamap.\n"); goto fail; } for (i = 0; i < AGE_RX_RING_CNT; i++) { rxd = &sc->age_cdata.age_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->age_dev, "could not create Rx dmamap.\n"); goto fail; } } fail: return (error); } static void age_dma_free(struct age_softc *sc) { struct age_txdesc *txd; struct age_rxdesc *rxd; int i; /* Tx buffers */ if (sc->age_cdata.age_tx_tag != NULL) { for (i = 0; i < AGE_TX_RING_CNT; i++) { txd = &sc->age_cdata.age_txdesc[i]; if (txd->tx_dmamap != NULL) { bus_dmamap_destroy(sc->age_cdata.age_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->age_cdata.age_tx_tag); sc->age_cdata.age_tx_tag = NULL; } /* Rx buffers */ if (sc->age_cdata.age_rx_tag != NULL) { for (i = 0; i < AGE_RX_RING_CNT; i++) { rxd = &sc->age_cdata.age_rxdesc[i]; if (rxd->rx_dmamap != NULL) { bus_dmamap_destroy(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->age_cdata.age_rx_sparemap != NULL) { bus_dmamap_destroy(sc->age_cdata.age_rx_tag, sc->age_cdata.age_rx_sparemap); sc->age_cdata.age_rx_sparemap = NULL; } bus_dma_tag_destroy(sc->age_cdata.age_rx_tag); sc->age_cdata.age_rx_tag = NULL; } /* Tx ring. */ if (sc->age_cdata.age_tx_ring_tag != NULL) { if (sc->age_rdata.age_tx_ring_paddr != 0) bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map); if (sc->age_rdata.age_tx_ring != NULL) bus_dmamem_free(sc->age_cdata.age_tx_ring_tag, sc->age_rdata.age_tx_ring, sc->age_cdata.age_tx_ring_map); sc->age_rdata.age_tx_ring_paddr = 0; sc->age_rdata.age_tx_ring = NULL; bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag); sc->age_cdata.age_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->age_cdata.age_rx_ring_tag != NULL) { if (sc->age_rdata.age_rx_ring_paddr != 0) bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag, sc->age_cdata.age_rx_ring_map); if (sc->age_rdata.age_rx_ring != NULL) bus_dmamem_free(sc->age_cdata.age_rx_ring_tag, sc->age_rdata.age_rx_ring, sc->age_cdata.age_rx_ring_map); sc->age_rdata.age_rx_ring_paddr = 0; sc->age_rdata.age_rx_ring = NULL; bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag); sc->age_cdata.age_rx_ring_tag = NULL; } /* Rx return ring. */ if (sc->age_cdata.age_rr_ring_tag != NULL) { if (sc->age_rdata.age_rr_ring_paddr != 0) bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag, sc->age_cdata.age_rr_ring_map); if (sc->age_rdata.age_rr_ring != NULL) bus_dmamem_free(sc->age_cdata.age_rr_ring_tag, sc->age_rdata.age_rr_ring, sc->age_cdata.age_rr_ring_map); sc->age_rdata.age_rr_ring_paddr = 0; sc->age_rdata.age_rr_ring = NULL; bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag); sc->age_cdata.age_rr_ring_tag = NULL; } /* CMB block */ if (sc->age_cdata.age_cmb_block_tag != NULL) { if (sc->age_rdata.age_cmb_block_paddr != 0) bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map); if (sc->age_rdata.age_cmb_block != NULL) bus_dmamem_free(sc->age_cdata.age_cmb_block_tag, sc->age_rdata.age_cmb_block, sc->age_cdata.age_cmb_block_map); sc->age_rdata.age_cmb_block_paddr = 0; sc->age_rdata.age_cmb_block = NULL; bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag); sc->age_cdata.age_cmb_block_tag = NULL; } /* SMB block */ if (sc->age_cdata.age_smb_block_tag != NULL) { if (sc->age_rdata.age_smb_block_paddr != 0) bus_dmamap_unload(sc->age_cdata.age_smb_block_tag, sc->age_cdata.age_smb_block_map); if (sc->age_rdata.age_smb_block != NULL) bus_dmamem_free(sc->age_cdata.age_smb_block_tag, sc->age_rdata.age_smb_block, sc->age_cdata.age_smb_block_map); sc->age_rdata.age_smb_block_paddr = 0; sc->age_rdata.age_smb_block = NULL; bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag); sc->age_cdata.age_smb_block_tag = NULL; } if (sc->age_cdata.age_buffer_tag != NULL) { bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag); sc->age_cdata.age_buffer_tag = NULL; } if (sc->age_cdata.age_parent_tag != NULL) { bus_dma_tag_destroy(sc->age_cdata.age_parent_tag); sc->age_cdata.age_parent_tag = NULL; } } /* * Make sure the interface is stopped at reboot time. */ static int age_shutdown(device_t dev) { return (age_suspend(dev)); } static void age_setwol(struct age_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint32_t reg, pmcs; uint16_t pmstat; int aneg, i, pmc; AGE_LOCK_ASSERT(sc); if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) { CSR_WRITE_4(sc, AGE_WOL_CFG, 0); /* * No PME capability, PHY power down. * XXX * Due to an unknown reason powering down PHY resulted * in unexpected results such as inaccessbility of * hardware of freshly rebooted system. Disable * powering down PHY until I got more information for * Attansic/Atheros PHY hardwares. */ #ifdef notyet age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_PDOWN); #endif return; } ifp = sc->age_ifp; if ((ifp->if_capenable & IFCAP_WOL) != 0) { /* * Note, this driver resets the link speed to 10/100Mbps with * auto-negotiation but we don't know whether that operation * would succeed or not as it have no control after powering * off. If the renegotiation fail WOL may not work. Running * at 1Gbps will draw more power than 375mA at 3.3V which is * specified in PCI specification and that would result in * complete shutdowning power to ethernet controller. * * TODO * Save current negotiated media speed/duplex/flow-control * to softc and restore the same link again after resuming. * PHY handling such as power down/resetting to 100Mbps * may be better handled in suspend method in phy driver. */ mii = device_get_softc(sc->age_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & IFM_AVALID) != 0) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: goto got_link; case IFM_1000_T: aneg++; default: break; } } age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_100T2CR, 0); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* Poll link state until age(4) get a 10/100 link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & IFM_AVALID) != 0) { switch (IFM_SUBTYPE( mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: age_mac_config(sc); goto got_link; default: break; } } AGE_UNLOCK(sc); pause("agelnk", hz); AGE_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->age_dev, "establishing link failed, " "WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; age_mac_config(sc); } got_link: pmcs = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs); reg = CSR_READ_4(sc, AGE_MAC_CFG); reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC); reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST); if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) { reg |= MAC_CFG_RX_ENB; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } /* Request PME. */ pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); #ifdef notyet /* See above for powering down PHY issues. */ if ((ifp->if_capenable & IFCAP_WOL) == 0) { /* No WOL, PHY power down. */ age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_PDOWN); } #endif } static int age_suspend(device_t dev) { struct age_softc *sc; sc = device_get_softc(dev); AGE_LOCK(sc); age_stop(sc); age_setwol(sc); AGE_UNLOCK(sc); return (0); } static int age_resume(device_t dev) { struct age_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AGE_LOCK(sc); age_phy_reset(sc); ifp = sc->age_ifp; if ((ifp->if_flags & IFF_UP) != 0) age_init_locked(sc); AGE_UNLOCK(sc); return (0); } static int age_encap(struct age_softc *sc, struct mbuf **m_head) { struct age_txdesc *txd, *txd_last; struct tx_desc *desc; struct mbuf *m; struct ip *ip; struct tcphdr *tcp; bus_dma_segment_t txsegs[AGE_MAXTXSEGS]; bus_dmamap_t map; uint32_t cflags, hdrlen, ip_off, poff, vtag; int error, i, nsegs, prod, si; AGE_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); m = *m_head; ip = NULL; tcp = NULL; cflags = vtag = 0; ip_off = poff = 0; if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) { /* * L1 requires offset of TCP/UDP payload in its Tx * descriptor to perform hardware Tx checksum offload. * Additionally, TSO requires IP/TCP header size and * modification of IP/TCP header in order to make TSO * engine work. This kind of operation takes many CPU * cycles on FreeBSD so fast host CPU is needed to get * smooth TSO performance. */ struct ether_header *eh; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); /* Release original mbufs. */ m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* * Check if hardware VLAN insertion is off. * Additional check for LLC/SNAP frame? */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * L1 requires IP/TCP header size and offset as * well as TCP pseudo checksum which complicates * TSO configuration. I guess this comes from the * adherence to Microsoft NDIS Large Send * specification which requires insertion of * pseudo checksum by upper stack. The pseudo * checksum that NDIS refers to doesn't include * TCP payload length so age(4) should recompute * the pseudo checksum here. Hopefully this wouldn't * be much burden on modern CPUs. * Reset IP checksum and recompute TCP pseudo * checksum as NDIS specification said. */ ip = (struct ip *)(mtod(m, char *) + ip_off); tcp = (struct tcphdr *)(mtod(m, char *) + poff); ip->ip_sum = 0; tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } *m_head = m; } si = prod = sc->age_cdata.age_tx_prod; txd = &sc->age_cdata.age_txdesc[prod]; txd_last = txd; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check descriptor overrun. */ if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { bus_dmamap_unload(sc->age_cdata.age_tx_tag, map); return (ENOBUFS); } m = *m_head; /* Configure VLAN hardware tag insertion. */ if ((m->m_flags & M_VLANTAG) != 0) { vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); cflags |= AGE_TD_INSERT_VLAN_TAG; } desc = NULL; i = 0; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* Request TSO and set MSS. */ cflags |= AGE_TD_TSO_IPV4; cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM; cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << AGE_TD_TSO_MSS_SHIFT); /* Set IP/TCP header size. */ cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT; cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT; /* * L1 requires the first buffer should only hold IP/TCP * header data. TCP payload should be handled in other * descriptors. */ hdrlen = poff + (tcp->th_off << 2); desc = &sc->age_rdata.age_tx_ring[prod]; desc->addr = htole64(txsegs[0].ds_addr); desc->len = htole32(AGE_TX_BYTES(hdrlen) | vtag); desc->flags = htole32(cflags); sc->age_cdata.age_tx_cnt++; AGE_DESC_INC(prod, AGE_TX_RING_CNT); if (m->m_len - hdrlen > 0) { /* Handle remaining payload of the 1st fragment. */ desc = &sc->age_rdata.age_tx_ring[prod]; desc->addr = htole64(txsegs[0].ds_addr + hdrlen); desc->len = htole32(AGE_TX_BYTES(m->m_len - hdrlen) | vtag); desc->flags = htole32(cflags); sc->age_cdata.age_tx_cnt++; AGE_DESC_INC(prod, AGE_TX_RING_CNT); } /* Handle remaining fragments. */ i = 1; } else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { /* Configure Tx IP/TCP/UDP checksum offload. */ cflags |= AGE_TD_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= AGE_TD_TCPCSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= AGE_TD_UDPCSUM; /* Set checksum start offset. */ cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); /* Set checksum insertion position of TCP/UDP. */ cflags |= ((poff + m->m_pkthdr.csum_data) << AGE_TD_CSUM_XSUMOFFSET_SHIFT); } for (; i < nsegs; i++) { desc = &sc->age_rdata.age_tx_ring[prod]; desc->addr = htole64(txsegs[i].ds_addr); desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag); desc->flags = htole32(cflags); sc->age_cdata.age_tx_cnt++; AGE_DESC_INC(prod, AGE_TX_RING_CNT); } /* Update producer index. */ sc->age_cdata.age_tx_prod = prod; /* Set EOP on the last descriptor. */ prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; desc = &sc->age_rdata.age_tx_ring[prod]; desc->flags |= htole32(AGE_TD_EOP); /* Lastly set TSO header and modify IP/TCP header for TSO operation. */ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { desc = &sc->age_rdata.age_tx_ring[si]; desc->flags |= htole32(AGE_TD_TSO_HDR); } /* Swap dmamap of the first and the last. */ txd = &sc->age_cdata.age_txdesc[prod]; map = txd_last->tx_dmamap; txd_last->tx_dmamap = txd->tx_dmamap; txd->tx_dmamap = map; txd->tx_m = m; /* Sync descriptors. */ bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void age_start(struct ifnet *ifp) { struct age_softc *sc; sc = ifp->if_softc; AGE_LOCK(sc); age_start_locked(ifp); AGE_UNLOCK(sc); } static void age_start_locked(struct ifnet *ifp) { struct age_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; AGE_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (age_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Update mbox. */ AGE_COMMIT_MBOX(sc); /* Set a timeout in case the chip goes out to lunch. */ sc->age_watchdog_timer = AGE_TX_TIMEOUT; } } static void age_watchdog(struct age_softc *sc) { struct ifnet *ifp; AGE_LOCK_ASSERT(sc); if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer) return; ifp = sc->age_ifp; if ((sc->age_flags & AGE_FLAG_LINK) == 0) { if_printf(sc->age_ifp, "watchdog timeout (missed link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; age_init_locked(sc); return; } if (sc->age_cdata.age_tx_cnt == 0) { if_printf(sc->age_ifp, "watchdog timeout (missed Tx interrupts) -- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) age_start_locked(ifp); return; } if_printf(sc->age_ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; age_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) age_start_locked(ifp); } static int age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct age_softc *sc; struct ifreq *ifr; struct mii_data *mii; uint32_t reg; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { AGE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; age_init_locked(sc); } AGE_UNLOCK(sc); } break; case SIOCSIFFLAGS: AGE_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->age_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) age_rxfilter(sc); } else { if ((sc->age_flags & AGE_FLAG_DETACH) == 0) age_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) age_stop(sc); } sc->age_if_flags = ifp->if_flags; AGE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: AGE_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) age_rxfilter(sc); AGE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->age_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: AGE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= AGE_CSUM_FEATURES; else ifp->if_hwassist &= ~AGE_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reg = CSR_READ_4(sc, AGE_MAC_CFG); reg &= ~MAC_CFG_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) reg |= MAC_CFG_RXCSUM_ENB; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((ifp->if_capenable & IFCAP_TSO4) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if ((mask & IFCAP_WOL_MCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; age_rxvlan(sc); } AGE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void age_mac_config(struct age_softc *sc) { struct mii_data *mii; uint32_t reg; AGE_LOCK_ASSERT(sc); mii = device_get_softc(sc->age_miibus); reg = CSR_READ_4(sc, AGE_MAC_CFG); reg &= ~MAC_CFG_FULL_DUPLEX; reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); reg &= ~MAC_CFG_SPEED_MASK; /* Reprogram MAC with resolved speed/duplex. */ switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: reg |= MAC_CFG_SPEED_10_100; break; case IFM_1000_T: reg |= MAC_CFG_SPEED_1000; break; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { reg |= MAC_CFG_FULL_DUPLEX; #ifdef notyet if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) reg |= MAC_CFG_TX_FC; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) reg |= MAC_CFG_RX_FC; #endif } CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } static void age_link_task(void *arg, int pending) { struct age_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t reg; sc = (struct age_softc *)arg; AGE_LOCK(sc); mii = device_get_softc(sc->age_miibus); ifp = sc->age_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { AGE_UNLOCK(sc); return; } sc->age_flags &= ~AGE_FLAG_LINK; if ((mii->mii_media_status & IFM_AVALID) != 0) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: case IFM_1000_T: sc->age_flags |= AGE_FLAG_LINK; break; default: break; } } /* Stop Rx/Tx MACs. */ age_stop_rxmac(sc); age_stop_txmac(sc); /* Program MACs with resolved speed/duplex/flow-control. */ if ((sc->age_flags & AGE_FLAG_LINK) != 0) { age_mac_config(sc); reg = CSR_READ_4(sc, AGE_MAC_CFG); /* Restart DMA engine and Tx/Rx MAC. */ CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } AGE_UNLOCK(sc); } static void age_stats_update(struct age_softc *sc) { struct age_stats *stat; struct smb *smb; struct ifnet *ifp; AGE_LOCK_ASSERT(sc); stat = &sc->age_stat; bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, sc->age_cdata.age_smb_block_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); smb = sc->age_rdata.age_smb_block; if (smb->updated == 0) return; ifp = sc->age_ifp; /* Rx stats. */ stat->rx_frames += smb->rx_frames; stat->rx_bcast_frames += smb->rx_bcast_frames; stat->rx_mcast_frames += smb->rx_mcast_frames; stat->rx_pause_frames += smb->rx_pause_frames; stat->rx_control_frames += smb->rx_control_frames; stat->rx_crcerrs += smb->rx_crcerrs; stat->rx_lenerrs += smb->rx_lenerrs; stat->rx_bytes += smb->rx_bytes; stat->rx_runts += smb->rx_runts; stat->rx_fragments += smb->rx_fragments; stat->rx_pkts_64 += smb->rx_pkts_64; stat->rx_pkts_65_127 += smb->rx_pkts_65_127; stat->rx_pkts_128_255 += smb->rx_pkts_128_255; stat->rx_pkts_256_511 += smb->rx_pkts_256_511; stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; stat->rx_pkts_truncated += smb->rx_pkts_truncated; stat->rx_fifo_oflows += smb->rx_fifo_oflows; stat->rx_desc_oflows += smb->rx_desc_oflows; stat->rx_alignerrs += smb->rx_alignerrs; stat->rx_bcast_bytes += smb->rx_bcast_bytes; stat->rx_mcast_bytes += smb->rx_mcast_bytes; stat->rx_pkts_filtered += smb->rx_pkts_filtered; /* Tx stats. */ stat->tx_frames += smb->tx_frames; stat->tx_bcast_frames += smb->tx_bcast_frames; stat->tx_mcast_frames += smb->tx_mcast_frames; stat->tx_pause_frames += smb->tx_pause_frames; stat->tx_excess_defer += smb->tx_excess_defer; stat->tx_control_frames += smb->tx_control_frames; stat->tx_deferred += smb->tx_deferred; stat->tx_bytes += smb->tx_bytes; stat->tx_pkts_64 += smb->tx_pkts_64; stat->tx_pkts_65_127 += smb->tx_pkts_65_127; stat->tx_pkts_128_255 += smb->tx_pkts_128_255; stat->tx_pkts_256_511 += smb->tx_pkts_256_511; stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; stat->tx_single_colls += smb->tx_single_colls; stat->tx_multi_colls += smb->tx_multi_colls; stat->tx_late_colls += smb->tx_late_colls; stat->tx_excess_colls += smb->tx_excess_colls; stat->tx_underrun += smb->tx_underrun; stat->tx_desc_underrun += smb->tx_desc_underrun; stat->tx_lenerrs += smb->tx_lenerrs; stat->tx_pkts_truncated += smb->tx_pkts_truncated; stat->tx_bcast_bytes += smb->tx_bcast_bytes; stat->tx_mcast_bytes += smb->tx_mcast_bytes; /* Update counters in ifnet. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls + smb->tx_multi_colls + smb->tx_late_colls + smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_excess_colls + smb->tx_late_colls + smb->tx_underrun + smb->tx_pkts_truncated); if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames); if_inc_counter(ifp, IFCOUNTER_IERRORS, smb->rx_crcerrs + smb->rx_lenerrs + smb->rx_runts + smb->rx_pkts_truncated + smb->rx_fifo_oflows + smb->rx_desc_oflows + smb->rx_alignerrs); /* Update done, clear. */ smb->updated = 0; bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int age_intr(void *arg) { struct age_softc *sc; uint32_t status; sc = (struct age_softc *)arg; status = CSR_READ_4(sc, AGE_INTR_STATUS); if (status == 0 || (status & AGE_INTRS) == 0) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); taskqueue_enqueue(sc->age_tq, &sc->age_int_task); return (FILTER_HANDLED); } static void age_int_task(void *arg, int pending) { struct age_softc *sc; struct ifnet *ifp; struct cmb *cmb; uint32_t status; sc = (struct age_softc *)arg; AGE_LOCK(sc); bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cmb = sc->age_rdata.age_cmb_block; status = le32toh(cmb->intr_status); if (sc->age_morework != 0) status |= INTR_CMB_RX; if ((status & AGE_INTRS) == 0) goto done; sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> TPD_CONS_SHIFT; sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> RRD_PROD_SHIFT; /* Let hardware know CMB was served. */ cmb->intr_status = 0; bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); #if 0 printf("INTR: 0x%08x\n", status); status &= ~INTR_DIS_DMA; CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); #endif ifp = sc->age_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((status & INTR_CMB_RX) != 0) sc->age_morework = age_rxintr(sc, sc->age_rr_prod, sc->age_process_limit); if ((status & INTR_CMB_TX) != 0) age_txintr(sc, sc->age_tpd_cons); if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { if ((status & INTR_DMA_RD_TO_RST) != 0) device_printf(sc->age_dev, "DMA read error! -- resetting\n"); if ((status & INTR_DMA_WR_TO_RST) != 0) device_printf(sc->age_dev, "DMA write error! -- resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; age_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) age_start_locked(ifp); if ((status & INTR_SMB) != 0) age_stats_update(sc); } /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); status = le32toh(cmb->intr_status); if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) { taskqueue_enqueue(sc->age_tq, &sc->age_int_task); AGE_UNLOCK(sc); return; } done: /* Re-enable interrupts. */ CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); AGE_UNLOCK(sc); } static void age_txintr(struct age_softc *sc, int tpd_cons) { struct ifnet *ifp; struct age_txdesc *txd; int cons, prog; AGE_LOCK_ASSERT(sc); ifp = sc->age_ifp; bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Go through our Tx list and free mbufs for those * frames which have been transmitted. */ cons = sc->age_cdata.age_tx_cons; for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { if (sc->age_cdata.age_tx_cnt <= 0) break; prog++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->age_cdata.age_tx_cnt--; txd = &sc->age_cdata.age_txdesc[cons]; /* * Clear Tx descriptors, it's not required but would * help debugging in case of Tx issues. */ txd->tx_desc->addr = 0; txd->tx_desc->len = 0; txd->tx_desc->flags = 0; if (txd->tx_m == NULL) continue; /* Reclaim transmitted mbufs. */ bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } if (prog > 0) { sc->age_cdata.age_tx_cons = cons; /* * Unarm watchdog timer only when there are no pending * Tx descriptors in queue. */ if (sc->age_cdata.age_tx_cnt == 0) sc->age_watchdog_timer = 0; bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * age_fixup_rx(struct ifnet *ifp, struct mbuf *m) { struct mbuf *n; int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 3; if (m->m_next == NULL) { for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= 6; return (m); } /* * Append a new mbuf to received mbuf chain and copy ethernet * header from the mbuf chain. This can save lots of CPU * cycles for jumbo frame. */ MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; return (n); } #endif /* Receive a frame. */ static void age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) { struct age_rxdesc *rxd; struct ifnet *ifp; struct mbuf *mp, *m; uint32_t status, index, vtag; int count, nsegs; int rx_cons; AGE_LOCK_ASSERT(sc); ifp = sc->age_ifp; status = le32toh(rxrd->flags); index = le32toh(rxrd->index); rx_cons = AGE_RX_CONS(index); nsegs = AGE_RX_NSEGS(index); sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); if ((status & (AGE_RRD_ERROR | AGE_RRD_LENGTH_NOK)) != 0) { /* * We want to pass the following frames to upper * layer regardless of error status of Rx return * ring. * * o IP/TCP/UDP checksum is bad. * o frame length and protocol specific length * does not match. */ status |= AGE_RRD_IPCSUM_NOK | AGE_RRD_TCP_UDPCSUM_NOK; if ((status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) return; } for (count = 0; count < nsegs; count++, AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { rxd = &sc->age_cdata.age_rxdesc[rx_cons]; mp = rxd->rx_m; /* Add a new receive buffer to the ring. */ if (age_newbuf(sc, rxd) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Reuse Rx buffers. */ if (sc->age_cdata.age_rxhead != NULL) m_freem(sc->age_cdata.age_rxhead); break; } /* * Assume we've received a full sized frame. * Actual size is fixed when we encounter the end of * multi-segmented frame. */ mp->m_len = AGE_RX_BUF_SIZE; /* Chain received mbufs. */ if (sc->age_cdata.age_rxhead == NULL) { sc->age_cdata.age_rxhead = mp; sc->age_cdata.age_rxtail = mp; } else { mp->m_flags &= ~M_PKTHDR; sc->age_cdata.age_rxprev_tail = sc->age_cdata.age_rxtail; sc->age_cdata.age_rxtail->m_next = mp; sc->age_cdata.age_rxtail = mp; } if (count == nsegs - 1) { /* Last desc. for this frame. */ m = sc->age_cdata.age_rxhead; m->m_flags |= M_PKTHDR; /* * It seems that L1 controller has no way * to tell hardware to strip CRC bytes. */ m->m_pkthdr.len = sc->age_cdata.age_rxlen - ETHER_CRC_LEN; if (nsegs > 1) { /* Set last mbuf size. */ mp->m_len = sc->age_cdata.age_rxlen - ((nsegs - 1) * AGE_RX_BUF_SIZE); /* Remove the CRC bytes in chained mbufs. */ if (mp->m_len <= ETHER_CRC_LEN) { sc->age_cdata.age_rxtail = sc->age_cdata.age_rxprev_tail; sc->age_cdata.age_rxtail->m_len -= (ETHER_CRC_LEN - mp->m_len); sc->age_cdata.age_rxtail->m_next = NULL; m_freem(mp); } else { mp->m_len -= ETHER_CRC_LEN; } } else m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; /* * Set checksum information. * It seems that L1 controller can compute partial * checksum. The partial checksum value can be used * to accelerate checksum computation for fragmented * TCP/UDP packets. Upper network stack already * takes advantage of the partial checksum value in * IP reassembly stage. But I'm not sure the * correctness of the partial hardware checksum * assistance due to lack of data sheet. If it is * proven to work on L1 I'll enable it. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (status & AGE_RRD_IPV4) != 0) { if ((status & AGE_RRD_IPCSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } /* * Don't mark bad checksum for TCP/UDP frames * as fragmented frames may always have set * bad checksummed bit of descriptor status. */ } /* Check for VLAN tagged frames. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (status & AGE_RRD_VLAN) != 0) { vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag); m->m_flags |= M_VLANTAG; } #ifndef __NO_STRICT_ALIGNMENT m = age_fixup_rx(ifp, m); if (m != NULL) #endif { /* Pass it on. */ AGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); AGE_LOCK(sc); } } } /* Reset mbuf chains. */ AGE_RXCHAIN_RESET(sc); } static int age_rxintr(struct age_softc *sc, int rr_prod, int count) { struct rx_rdesc *rxrd; int rr_cons, nsegs, pktlen, prog; AGE_LOCK_ASSERT(sc); rr_cons = sc->age_cdata.age_rr_cons; if (rr_cons == rr_prod) return (0); bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE); for (prog = 0; rr_cons != rr_prod; prog++) { if (count-- <= 0) break; rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); if (nsegs == 0) break; /* * Check number of segments against received bytes. * Non-matching value would indicate that hardware * is still trying to update Rx return descriptors. * I'm not sure whether this check is really needed. */ pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); - if (nsegs != (pktlen + (AGE_RX_BUF_SIZE - 1)) / AGE_RX_BUF_SIZE) + if (nsegs != howmany(pktlen, AGE_RX_BUF_SIZE)) break; /* Received a frame. */ age_rxeof(sc, rxrd); /* Clear return ring. */ rxrd->index = 0; AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); sc->age_cdata.age_rx_cons += nsegs; sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; } if (prog > 0) { /* Update the consumer index. */ sc->age_cdata.age_rr_cons = rr_cons; bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE); /* Sync descriptors. */ bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Notify hardware availability of new Rx buffers. */ AGE_COMMIT_MBOX(sc); } return (count > 0 ? 0 : EAGAIN); } static void age_tick(void *arg) { struct age_softc *sc; struct mii_data *mii; sc = (struct age_softc *)arg; AGE_LOCK_ASSERT(sc); mii = device_get_softc(sc->age_miibus); mii_tick(mii); age_watchdog(sc); callout_reset(&sc->age_tick_ch, hz, age_tick, sc); } static void age_reset(struct age_softc *sc) { uint32_t reg; int i; CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); CSR_READ_4(sc, AGE_MASTER_CFG); DELAY(1000); for (i = AGE_RESET_TIMEOUT; i > 0; i--) { if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg); /* Initialize PCIe module. From Linux. */ CSR_WRITE_4(sc, 0x12FC, 0x6500); CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); } static void age_init(void *xsc) { struct age_softc *sc; sc = (struct age_softc *)xsc; AGE_LOCK(sc); age_init_locked(sc); AGE_UNLOCK(sc); } static void age_init_locked(struct age_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint8_t eaddr[ETHER_ADDR_LEN]; bus_addr_t paddr; uint32_t reg, fsize; uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; int error; AGE_LOCK_ASSERT(sc); ifp = sc->age_ifp; mii = device_get_softc(sc->age_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel any pending I/O. */ age_stop(sc); /* * Reset the chip to a known state. */ age_reset(sc); /* Initialize descriptors. */ error = age_init_rx_ring(sc); if (error != 0) { device_printf(sc->age_dev, "no memory for Rx buffers.\n"); age_stop(sc); return; } age_init_rr_ring(sc); age_init_tx_ring(sc); age_init_cmb_block(sc); age_init_smb_block(sc); /* Reprogram the station address. */ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, AGE_PAR0, eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); /* Set descriptor base addresses. */ paddr = sc->age_rdata.age_tx_ring_paddr; CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); paddr = sc->age_rdata.age_rx_ring_paddr; CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); paddr = sc->age_rdata.age_rr_ring_paddr; CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); paddr = sc->age_rdata.age_tx_ring_paddr; CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); paddr = sc->age_rdata.age_cmb_block_paddr; CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); paddr = sc->age_rdata.age_smb_block_paddr; CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); /* Set Rx/Rx return descriptor counter. */ CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & DESC_RRD_CNT_MASK) | ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); /* Set Tx descriptor counter. */ CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); /* Tell hardware that we're ready to load descriptors. */ CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); /* * Initialize mailbox register. * Updated producer/consumer index information is exchanged * through this mailbox register. However Tx producer and * Rx return consumer/Rx producer are all shared such that * it's hard to separate code path between Tx and Rx without * locking. If L1 hardware have a separate mail box register * for Tx and Rx consumer/producer management we could have * indepent Tx/Rx handler which in turn Rx handler could have * been run without any locking. */ AGE_COMMIT_MBOX(sc); /* Configure IPG/IFG parameters. */ CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); /* Set parameters for half-duplex media. */ CSR_WRITE_4(sc, AGE_HDPX_CFG, ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & HDPX_CFG_LCOL_MASK) | ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & HDPX_CFG_ABEBT_MASK) | ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & HDPX_CFG_JAMIPG_MASK)); /* Configure interrupt moderation timer. */ CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); reg = CSR_READ_4(sc, AGE_MASTER_CFG); reg &= ~MASTER_MTIMER_ENB; if (AGE_USECS(sc->age_int_mod) == 0) reg &= ~MASTER_ITIMER_ENB; else reg |= MASTER_ITIMER_ENB; CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); if (bootverbose) device_printf(sc->age_dev, "interrupt moderation is %d us.\n", sc->age_int_mod); CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ if (ifp->if_mtu < ETHERMTU) sc->age_max_frame_size = ETHERMTU; else sc->age_max_frame_size = ifp->if_mtu; sc->age_max_frame_size += ETHER_HDR_LEN + sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); /* Configure jumbo frame. */ fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, (((fsize / sizeof(uint64_t)) << RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | ((RXQ_JUMBO_CFG_LKAH_DEFAULT << RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & RXQ_JUMBO_CFG_RRD_TIMER_MASK)); /* Configure flow-control parameters. From Linux. */ if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { /* * Magic workaround for old-L1. * Don't know which hw revision requires this magic. */ CSR_WRITE_4(sc, 0x12FC, 0x6500); /* * Another magic workaround for flow-control mode * change. From Linux. */ CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); } /* * TODO * Should understand pause parameter relationships between FIFO * size and number of Rx descriptors and Rx return descriptors. * * Magic parameters came from Linux. */ switch (sc->age_chip_rev) { case 0x8001: case 0x9001: case 0x9002: case 0x9003: rxf_hi = AGE_RX_RING_CNT / 16; rxf_lo = (AGE_RX_RING_CNT * 7) / 8; rrd_hi = (AGE_RR_RING_CNT * 7) / 8; rrd_lo = AGE_RR_RING_CNT / 16; break; default: reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); rxf_lo = reg / 16; if (rxf_lo < 192) rxf_lo = 192; rxf_hi = (reg * 7) / 8; if (rxf_hi < rxf_lo) rxf_hi = rxf_lo + 16; reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); rrd_lo = reg / 8; rrd_hi = (reg * 7) / 8; if (rrd_lo < 2) rrd_lo = 2; if (rrd_hi < rrd_lo) rrd_hi = rrd_lo + 3; break; } CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & RXQ_FIFO_PAUSE_THRESH_LO_MASK) | ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & RXQ_FIFO_PAUSE_THRESH_HI_MASK)); CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & RXQ_RRD_PAUSE_THRESH_LO_MASK) | ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & RXQ_RRD_PAUSE_THRESH_HI_MASK)); /* Configure RxQ. */ CSR_WRITE_4(sc, AGE_RXQ_CFG, ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & RXQ_CFG_RD_BURST_MASK) | ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); /* Configure TxQ. */ CSR_WRITE_4(sc, AGE_TXQ_CFG, ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & TXQ_CFG_TPD_BURST_MASK) | ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK) | ((TXQ_CFG_TPD_FETCH_DEFAULT << TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | TXQ_CFG_ENB); CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG, (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) & TX_JUMBO_TPD_TH_MASK) | ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) & TX_JUMBO_TPD_IPG_MASK)); /* Configure DMA parameters. */ CSR_WRITE_4(sc, AGE_DMA_CFG, DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | sc->age_dma_rd_burst | DMA_CFG_RD_ENB | sc->age_dma_wr_burst | DMA_CFG_WR_ENB); /* Configure CMB DMA write threshold. */ CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & CMB_WR_THRESH_RRD_MASK) | ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & CMB_WR_THRESH_TPD_MASK)); /* Set CMB/SMB timer and enable them. */ CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); /* Request SMB updates for every seconds. */ CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); /* * Disable all WOL bits as WOL can interfere normal Rx * operation. */ CSR_WRITE_4(sc, AGE_WOL_CFG, 0); /* * Configure Tx/Rx MACs. * - Auto-padding for short frames. * - Enable CRC generation. * Start with full-duplex/1000Mbps media. Actual reconfiguration * of MAC is followed after link establishment. */ CSR_WRITE_4(sc, AGE_MAC_CFG, MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & MAC_CFG_PREAMBLE_MASK)); /* Set up the receive filter. */ age_rxfilter(sc); age_rxvlan(sc); reg = CSR_READ_4(sc, AGE_MAC_CFG); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) reg |= MAC_CFG_RXCSUM_ENB; /* Ack all pending interrupts and clear it. */ CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); /* Finally enable Tx/Rx MAC. */ CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); sc->age_flags &= ~AGE_FLAG_LINK; /* Switch to the current media. */ mii_mediachg(mii); callout_reset(&sc->age_tick_ch, hz, age_tick, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void age_stop(struct age_softc *sc) { struct ifnet *ifp; struct age_txdesc *txd; struct age_rxdesc *rxd; uint32_t reg; int i; AGE_LOCK_ASSERT(sc); /* * Mark the interface down and cancel the watchdog timer. */ ifp = sc->age_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->age_flags &= ~AGE_FLAG_LINK; callout_stop(&sc->age_tick_ch); sc->age_watchdog_timer = 0; /* * Disable interrupts. */ CSR_WRITE_4(sc, AGE_INTR_MASK, 0); CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); /* Stop CMB/SMB updates. */ CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); /* Stop Rx/Tx MAC. */ age_stop_rxmac(sc); age_stop_txmac(sc); /* Stop DMA. */ CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); /* Stop TxQ/RxQ. */ CSR_WRITE_4(sc, AGE_TXQ_CFG, CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); CSR_WRITE_4(sc, AGE_RXQ_CFG, CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); for (i = AGE_RESET_TIMEOUT; i > 0; i--) { if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->age_dev, "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg); /* Reclaim Rx buffers that have been processed. */ if (sc->age_cdata.age_rxhead != NULL) m_freem(sc->age_cdata.age_rxhead); AGE_RXCHAIN_RESET(sc); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < AGE_RX_RING_CNT; i++) { rxd = &sc->age_cdata.age_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < AGE_TX_RING_CNT; i++) { txd = &sc->age_cdata.age_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } static void age_stop_txmac(struct age_softc *sc) { uint32_t reg; int i; AGE_LOCK_ASSERT(sc); reg = CSR_READ_4(sc, AGE_MAC_CFG); if ((reg & MAC_CFG_TX_ENB) != 0) { reg &= ~MAC_CFG_TX_ENB; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } /* Stop Tx DMA engine. */ reg = CSR_READ_4(sc, AGE_DMA_CFG); if ((reg & DMA_CFG_RD_ENB) != 0) { reg &= ~DMA_CFG_RD_ENB; CSR_WRITE_4(sc, AGE_DMA_CFG, reg); } for (i = AGE_RESET_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->age_dev, "stopping TxMAC timeout!\n"); } static void age_stop_rxmac(struct age_softc *sc) { uint32_t reg; int i; AGE_LOCK_ASSERT(sc); reg = CSR_READ_4(sc, AGE_MAC_CFG); if ((reg & MAC_CFG_RX_ENB) != 0) { reg &= ~MAC_CFG_RX_ENB; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } /* Stop Rx DMA engine. */ reg = CSR_READ_4(sc, AGE_DMA_CFG); if ((reg & DMA_CFG_WR_ENB) != 0) { reg &= ~DMA_CFG_WR_ENB; CSR_WRITE_4(sc, AGE_DMA_CFG, reg); } for (i = AGE_RESET_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->age_dev, "stopping RxMAC timeout!\n"); } static void age_init_tx_ring(struct age_softc *sc) { struct age_ring_data *rd; struct age_txdesc *txd; int i; AGE_LOCK_ASSERT(sc); sc->age_cdata.age_tx_prod = 0; sc->age_cdata.age_tx_cons = 0; sc->age_cdata.age_tx_cnt = 0; rd = &sc->age_rdata; bzero(rd->age_tx_ring, AGE_TX_RING_SZ); for (i = 0; i < AGE_TX_RING_CNT; i++) { txd = &sc->age_cdata.age_txdesc[i]; txd->tx_desc = &rd->age_tx_ring[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int age_init_rx_ring(struct age_softc *sc) { struct age_ring_data *rd; struct age_rxdesc *rxd; int i; AGE_LOCK_ASSERT(sc); sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; sc->age_morework = 0; rd = &sc->age_rdata; bzero(rd->age_rx_ring, AGE_RX_RING_SZ); for (i = 0; i < AGE_RX_RING_CNT; i++) { rxd = &sc->age_cdata.age_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_desc = &rd->age_rx_ring[i]; if (age_newbuf(sc, rxd) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE); return (0); } static void age_init_rr_ring(struct age_softc *sc) { struct age_ring_data *rd; AGE_LOCK_ASSERT(sc); sc->age_cdata.age_rr_cons = 0; AGE_RXCHAIN_RESET(sc); rd = &sc->age_rdata; bzero(rd->age_rr_ring, AGE_RR_RING_SZ); bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void age_init_cmb_block(struct age_softc *sc) { struct age_ring_data *rd; AGE_LOCK_ASSERT(sc); rd = &sc->age_rdata; bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void age_init_smb_block(struct age_softc *sc) { struct age_ring_data *rd; AGE_LOCK_ASSERT(sc); rd = &sc->age_rdata; bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) { struct rx_desc *desc; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; AGE_LOCK_ASSERT(sc); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifndef __NO_STRICT_ALIGNMENT m_adj(m, AGE_RX_BUF_ALIGN); #endif if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag, sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; sc->age_cdata.age_rx_sparemap = map; bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->rx_desc; desc->addr = htole64(segs[0].ds_addr); desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) << AGE_RD_LEN_SHIFT); return (0); } static void age_rxvlan(struct age_softc *sc) { struct ifnet *ifp; uint32_t reg; AGE_LOCK_ASSERT(sc); ifp = sc->age_ifp; reg = CSR_READ_4(sc, AGE_MAC_CFG); reg &= ~MAC_CFG_VLAN_TAG_STRIP; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) reg |= MAC_CFG_VLAN_TAG_STRIP; CSR_WRITE_4(sc, AGE_MAC_CFG, reg); } static void age_rxfilter(struct age_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t crc; uint32_t mchash[2]; uint32_t rxcfg; AGE_LOCK_ASSERT(sc); ifp = sc->age_ifp; rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); if ((ifp->if_flags & IFF_BROADCAST) != 0) rxcfg |= MAC_CFG_BCAST; if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxcfg |= MAC_CFG_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxcfg |= MAC_CFG_ALLMULTI; CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF); CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); return; } /* Program new filter. */ bzero(mchash, sizeof(mchash)); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); } if_maddr_runlock(ifp); CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); } static int sysctl_age_stats(SYSCTL_HANDLER_ARGS) { struct age_softc *sc; struct age_stats *stats; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result != 1) return (error); sc = (struct age_softc *)arg1; stats = &sc->age_stat; printf("%s statistics:\n", device_get_nameunit(sc->age_dev)); printf("Transmit good frames : %ju\n", (uintmax_t)stats->tx_frames); printf("Transmit good broadcast frames : %ju\n", (uintmax_t)stats->tx_bcast_frames); printf("Transmit good multicast frames : %ju\n", (uintmax_t)stats->tx_mcast_frames); printf("Transmit pause control frames : %u\n", stats->tx_pause_frames); printf("Transmit control frames : %u\n", stats->tx_control_frames); printf("Transmit frames with excessive deferrals : %u\n", stats->tx_excess_defer); printf("Transmit deferrals : %u\n", stats->tx_deferred); printf("Transmit good octets : %ju\n", (uintmax_t)stats->tx_bytes); printf("Transmit good broadcast octets : %ju\n", (uintmax_t)stats->tx_bcast_bytes); printf("Transmit good multicast octets : %ju\n", (uintmax_t)stats->tx_mcast_bytes); printf("Transmit frames 64 bytes : %ju\n", (uintmax_t)stats->tx_pkts_64); printf("Transmit frames 65 to 127 bytes : %ju\n", (uintmax_t)stats->tx_pkts_65_127); printf("Transmit frames 128 to 255 bytes : %ju\n", (uintmax_t)stats->tx_pkts_128_255); printf("Transmit frames 256 to 511 bytes : %ju\n", (uintmax_t)stats->tx_pkts_256_511); printf("Transmit frames 512 to 1024 bytes : %ju\n", (uintmax_t)stats->tx_pkts_512_1023); printf("Transmit frames 1024 to 1518 bytes : %ju\n", (uintmax_t)stats->tx_pkts_1024_1518); printf("Transmit frames 1519 to MTU bytes : %ju\n", (uintmax_t)stats->tx_pkts_1519_max); printf("Transmit single collisions : %u\n", stats->tx_single_colls); printf("Transmit multiple collisions : %u\n", stats->tx_multi_colls); printf("Transmit late collisions : %u\n", stats->tx_late_colls); printf("Transmit abort due to excessive collisions : %u\n", stats->tx_excess_colls); printf("Transmit underruns due to FIFO underruns : %u\n", stats->tx_underrun); printf("Transmit descriptor write-back errors : %u\n", stats->tx_desc_underrun); printf("Transmit frames with length mismatched frame size : %u\n", stats->tx_lenerrs); printf("Transmit frames with truncated due to MTU size : %u\n", stats->tx_lenerrs); printf("Receive good frames : %ju\n", (uintmax_t)stats->rx_frames); printf("Receive good broadcast frames : %ju\n", (uintmax_t)stats->rx_bcast_frames); printf("Receive good multicast frames : %ju\n", (uintmax_t)stats->rx_mcast_frames); printf("Receive pause control frames : %u\n", stats->rx_pause_frames); printf("Receive control frames : %u\n", stats->rx_control_frames); printf("Receive CRC errors : %u\n", stats->rx_crcerrs); printf("Receive frames with length errors : %u\n", stats->rx_lenerrs); printf("Receive good octets : %ju\n", (uintmax_t)stats->rx_bytes); printf("Receive good broadcast octets : %ju\n", (uintmax_t)stats->rx_bcast_bytes); printf("Receive good multicast octets : %ju\n", (uintmax_t)stats->rx_mcast_bytes); printf("Receive frames too short : %u\n", stats->rx_runts); printf("Receive fragmented frames : %ju\n", (uintmax_t)stats->rx_fragments); printf("Receive frames 64 bytes : %ju\n", (uintmax_t)stats->rx_pkts_64); printf("Receive frames 65 to 127 bytes : %ju\n", (uintmax_t)stats->rx_pkts_65_127); printf("Receive frames 128 to 255 bytes : %ju\n", (uintmax_t)stats->rx_pkts_128_255); printf("Receive frames 256 to 511 bytes : %ju\n", (uintmax_t)stats->rx_pkts_256_511); printf("Receive frames 512 to 1024 bytes : %ju\n", (uintmax_t)stats->rx_pkts_512_1023); printf("Receive frames 1024 to 1518 bytes : %ju\n", (uintmax_t)stats->rx_pkts_1024_1518); printf("Receive frames 1519 to MTU bytes : %ju\n", (uintmax_t)stats->rx_pkts_1519_max); printf("Receive frames too long : %ju\n", (uint64_t)stats->rx_pkts_truncated); printf("Receive frames with FIFO overflow : %u\n", stats->rx_fifo_oflows); printf("Receive frames with return descriptor overflow : %u\n", stats->rx_desc_oflows); printf("Receive frames with alignment errors : %u\n", stats->rx_alignerrs); printf("Receive frames dropped due to address filtering : %ju\n", (uint64_t)stats->rx_pkts_filtered); return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, AGE_PROC_MIN, AGE_PROC_MAX)); } static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN, AGE_IM_TIMER_MAX)); } diff --git a/sys/dev/amr/amr.c b/sys/dev/amr/amr.c index 0b52d9f8f065..16cbeec35e31 100644 --- a/sys/dev/amr/amr.c +++ b/sys/dev/amr/amr.c @@ -1,2454 +1,2454 @@ /*- * Copyright (c) 1999,2000 Michael Smith * Copyright (c) 2000 BSDi * Copyright (c) 2005 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002 Eric Moore * Copyright (c) 2002, 2004 LSI Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The party using or redistributing the source code and binary forms * agrees to the disclaimer below and the terms and conditions set forth * herein. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the AMI MegaRaid family of controllers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define AMR_DEFINE_TABLES #include SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters"); static d_open_t amr_open; static d_close_t amr_close; static d_ioctl_t amr_ioctl; static struct cdevsw amr_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = amr_open, .d_close = amr_close, .d_ioctl = amr_ioctl, .d_name = "amr", }; int linux_no_adapter = 0; /* * Initialisation, bus interface. */ static void amr_startup(void *arg); /* * Command wrappers */ static int amr_query_controller(struct amr_softc *sc); static void *amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status); static void amr_completeio(struct amr_command *ac); static int amr_support_ext_cdb(struct amr_softc *sc); /* * Command buffer allocation. */ static void amr_alloccmd_cluster(struct amr_softc *sc); static void amr_freecmd_cluster(struct amr_command_cluster *acc); /* * Command processing. */ static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp); static int amr_wait_command(struct amr_command *ac) __unused; static int amr_mapcmd(struct amr_command *ac); static void amr_unmapcmd(struct amr_command *ac); static int amr_start(struct amr_command *ac); static void amr_complete(void *context, ac_qhead_t *head); static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void amr_abort_load(struct amr_command *ac); /* * Interface-specific shims */ static int amr_quartz_submit_command(struct amr_command *ac); static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); static int amr_quartz_poll_command(struct amr_command *ac); static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac); static int amr_std_submit_command(struct amr_command *ac); static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); static int amr_std_poll_command(struct amr_command *ac); static void amr_std_attach_mailbox(struct amr_softc *sc); #ifdef AMR_BOARD_INIT static int amr_quartz_init(struct amr_softc *sc); static int amr_std_init(struct amr_softc *sc); #endif /* * Debugging */ static void amr_describe_controller(struct amr_softc *sc); #ifdef AMR_DEBUG #if 0 static void amr_printcommand(struct amr_command *ac); #endif #endif static void amr_init_sysctl(struct amr_softc *sc); static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td); static MALLOC_DEFINE(M_AMR, "amr", "AMR memory"); /******************************************************************************** ******************************************************************************** Inline Glue ******************************************************************************** ********************************************************************************/ /******************************************************************************** ******************************************************************************** Public Interfaces ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Initialise the controller and softc. */ int amr_attach(struct amr_softc *sc) { device_t child; debug_called(1); /* * Initialise per-controller queues. */ amr_init_qhead(&sc->amr_freecmds); amr_init_qhead(&sc->amr_ready); TAILQ_INIT(&sc->amr_cmd_clusters); bioq_init(&sc->amr_bioq); debug(2, "queue init done"); /* * Configure for this controller type. */ if (AMR_IS_QUARTZ(sc)) { sc->amr_submit_command = amr_quartz_submit_command; sc->amr_get_work = amr_quartz_get_work; sc->amr_poll_command = amr_quartz_poll_command; sc->amr_poll_command1 = amr_quartz_poll_command1; } else { sc->amr_submit_command = amr_std_submit_command; sc->amr_get_work = amr_std_get_work; sc->amr_poll_command = amr_std_poll_command; amr_std_attach_mailbox(sc); } #ifdef AMR_BOARD_INIT if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))) return(ENXIO); #endif /* * Allocate initial commands. */ amr_alloccmd_cluster(sc); /* * Quiz controller for features and limits. */ if (amr_query_controller(sc)) return(ENXIO); debug(2, "controller query complete"); /* * preallocate the remaining commands. */ while (sc->amr_nextslot < sc->amr_maxio) amr_alloccmd_cluster(sc); /* * Setup sysctls. */ amr_init_sysctl(sc); /* * Attach our 'real' SCSI channels to CAM. */ child = device_add_child(sc->amr_dev, "amrp", -1); sc->amr_pass = child; if (child != NULL) { device_set_softc(child, sc); device_set_desc(child, "SCSI Passthrough Bus"); bus_generic_attach(sc->amr_dev); } /* * Create the control device. */ sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev)); sc->amr_dev_t->si_drv1 = sc; linux_no_adapter++; if (device_get_unit(sc->amr_dev) == 0) make_dev_alias(sc->amr_dev_t, "megadev0"); /* * Schedule ourselves to bring the controller up once interrupts are * available. */ bzero(&sc->amr_ich, sizeof(struct intr_config_hook)); sc->amr_ich.ich_func = amr_startup; sc->amr_ich.ich_arg = sc; if (config_intrhook_establish(&sc->amr_ich) != 0) { device_printf(sc->amr_dev, "can't establish configuration hook\n"); return(ENOMEM); } /* * Print a little information about the controller. */ amr_describe_controller(sc); debug(2, "attach complete"); return(0); } /******************************************************************************** * Locate disk resources and attach children to them. */ static void amr_startup(void *arg) { struct amr_softc *sc = (struct amr_softc *)arg; struct amr_logdrive *dr; int i, error; debug_called(1); /* pull ourselves off the intrhook chain */ if (sc->amr_ich.ich_func) config_intrhook_disestablish(&sc->amr_ich); sc->amr_ich.ich_func = NULL; /* get up-to-date drive information */ if (amr_query_controller(sc)) { device_printf(sc->amr_dev, "can't scan controller for drives\n"); return; } /* iterate over available drives */ for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) { /* are we already attached to this drive? */ if (dr->al_disk == 0) { /* generate geometry information */ if (dr->al_size > 0x200000) { /* extended translation? */ dr->al_heads = 255; dr->al_sectors = 63; } else { dr->al_heads = 64; dr->al_sectors = 32; } dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors); dr->al_disk = device_add_child(sc->amr_dev, NULL, -1); if (dr->al_disk == 0) device_printf(sc->amr_dev, "device_add_child failed\n"); device_set_ivars(dr->al_disk, dr); } } if ((error = bus_generic_attach(sc->amr_dev)) != 0) device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error); /* mark controller back up */ sc->amr_state &= ~AMR_STATE_SHUTDOWN; /* interrupts will be enabled before we do anything more */ sc->amr_state |= AMR_STATE_INTEN; return; } static void amr_init_sysctl(struct amr_softc *sc) { SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)), OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0, ""); SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)), OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0, ""); SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)), OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0, ""); SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)), OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0, ""); } /******************************************************************************* * Free resources associated with a controller instance */ void amr_free(struct amr_softc *sc) { struct amr_command_cluster *acc; /* detach from CAM */ if (sc->amr_pass != NULL) device_delete_child(sc->amr_dev, sc->amr_pass); /* throw away any command buffers */ while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) { TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link); amr_freecmd_cluster(acc); } /* destroy control device */ if( sc->amr_dev_t != (struct cdev *)NULL) destroy_dev(sc->amr_dev_t); if (mtx_initialized(&sc->amr_hw_lock)) mtx_destroy(&sc->amr_hw_lock); if (mtx_initialized(&sc->amr_list_lock)) mtx_destroy(&sc->amr_list_lock); } /******************************************************************************* * Receive a bio structure from a child device and queue it on a particular * disk resource, then poke the disk resource to start as much work as it can. */ int amr_submit_bio(struct amr_softc *sc, struct bio *bio) { debug_called(2); mtx_lock(&sc->amr_list_lock); amr_enqueue_bio(sc, bio); amr_startio(sc); mtx_unlock(&sc->amr_list_lock); return(0); } /******************************************************************************** * Accept an open operation on the control device. */ static int amr_open(struct cdev *dev, int flags, int fmt, struct thread *td) { int unit = dev2unit(dev); struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); debug_called(1); sc->amr_state |= AMR_STATE_OPEN; return(0); } #ifdef LSI static int amr_del_ld(struct amr_softc *sc, int drv_no, int status) { debug_called(1); sc->amr_state &= ~AMR_STATE_QUEUE_FRZN; sc->amr_state &= ~AMR_STATE_LD_DELETE; sc->amr_state |= AMR_STATE_REMAP_LD; debug(1, "State Set"); if (!status) { debug(1, "disk begin destroyed %d",drv_no); if (--amr_disks_registered == 0) cdevsw_remove(&amrddisk_cdevsw); debug(1, "disk begin destroyed success"); } return 0; } static int amr_prepare_ld_delete(struct amr_softc *sc) { debug_called(1); if (sc->ld_del_supported == 0) return(ENOIOCTL); sc->amr_state |= AMR_STATE_QUEUE_FRZN; sc->amr_state |= AMR_STATE_LD_DELETE; /* 5 minutes for the all the commands to be flushed.*/ tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1); if ( sc->amr_busyslots ) return(ENOIOCTL); return 0; } #endif /******************************************************************************** * Accept the last close on the control device. */ static int amr_close(struct cdev *dev, int flags, int fmt, struct thread *td) { int unit = dev2unit(dev); struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); debug_called(1); sc->amr_state &= ~AMR_STATE_OPEN; return (0); } /******************************************************************************** * Handle controller-specific control operations. */ static void amr_rescan_drives(struct cdev *dev) { struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; int i, error = 0; sc->amr_state |= AMR_STATE_REMAP_LD; while (sc->amr_busyslots) { device_printf(sc->amr_dev, "idle controller\n"); amr_done(sc); } /* mark ourselves as in-shutdown */ sc->amr_state |= AMR_STATE_SHUTDOWN; /* flush controller */ device_printf(sc->amr_dev, "flushing cache..."); printf("%s\n", amr_flush(sc) ? "failed" : "done"); /* delete all our child devices */ for(i = 0 ; i < AMR_MAXLD; i++) { if(sc->amr_drive[i].al_disk != 0) { if((error = device_delete_child(sc->amr_dev, sc->amr_drive[i].al_disk)) != 0) goto shutdown_out; sc->amr_drive[i].al_disk = 0; } } shutdown_out: amr_startup(sc); } /* * Bug-for-bug compatibility with Linux! * Some apps will send commands with inlen and outlen set to 0, * even though they expect data to be transfered to them from the * card. Linux accidentally allows this by allocating a 4KB * buffer for the transfer anyways, but it then throws it away * without copying it back to the app. * * The amr(4) firmware relies on this feature. In fact, it assumes * the buffer is always a power of 2 up to a max of 64k. There is * also at least one case where it assumes a buffer less than 16k is * greater than 16k. However, forcing all buffers to a size of 32k * causes stalls in the firmware. Force each command smaller than * 64k up to the next power of two except that commands between 8k * and 16k are rounded up to 32k instead of 16k. */ static unsigned long amr_ioctl_buffer_length(unsigned long len) { if (len <= 4 * 1024) return (4 * 1024); if (len <= 8 * 1024) return (8 * 1024); if (len <= 32 * 1024) return (32 * 1024); if (len <= 64 * 1024) return (64 * 1024); return (len); } int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; struct amr_command *ac; struct amr_mailbox *mb; struct amr_linux_ioctl ali; void *dp, *temp; int error; int len, ac_flags = 0; int logical_drives_changed = 0; u_int32_t linux_version = 0x02100000; u_int8_t status; struct amr_passthrough *ap; /* 60 bytes */ error = 0; dp = NULL; ac = NULL; ap = NULL; if ((error = copyin(addr, &ali, sizeof(ali))) != 0) return (error); switch (ali.ui.fcs.opcode) { case 0x82: switch(ali.ui.fcs.subopcode) { case 'e': copyout(&linux_version, (void *)(uintptr_t)ali.data, sizeof(linux_version)); error = 0; break; case 'm': copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data, sizeof(linux_no_adapter)); td->td_retval[0] = linux_no_adapter; error = 0; break; default: printf("Unknown subopcode\n"); error = ENOIOCTL; break; } break; case 0x80: case 0x81: if (ali.ui.fcs.opcode == 0x80) len = max(ali.outlen, ali.inlen); else len = ali.ui.fcs.length; mb = (void *)&ali.mbox[0]; if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */ (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */ if (sc->amr_allow_vol_config == 0) { error = EPERM; break; } logical_drives_changed = 1; } if (ali.mbox[0] == AMR_CMD_PASS) { mtx_lock(&sc->amr_list_lock); while ((ac = amr_alloccmd(sc)) == NULL) msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz); mtx_unlock(&sc->amr_list_lock); ap = &ac->ac_ccb->ccb_pthru; error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap, sizeof(struct amr_passthrough)); if (error) break; if (ap->ap_data_transfer_length) dp = malloc(ap->ap_data_transfer_length, M_AMR, M_WAITOK | M_ZERO); if (ali.inlen) { error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address, dp, ap->ap_data_transfer_length); if (error) break; } ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB; bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); ac->ac_mailbox.mb_command = AMR_CMD_PASS; ac->ac_flags = ac_flags; ac->ac_data = dp; ac->ac_length = ap->ap_data_transfer_length; temp = (void *)(uintptr_t)ap->ap_data_transfer_address; mtx_lock(&sc->amr_list_lock); error = amr_wait_command(ac); mtx_unlock(&sc->amr_list_lock); if (error) break; status = ac->ac_status; error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status)); if (error) break; if (ali.outlen) { error = copyout(dp, temp, ap->ap_data_transfer_length); if (error) break; } error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length); if (error) break; error = 0; break; } else if (ali.mbox[0] == AMR_CMD_PASS_64) { printf("No AMR_CMD_PASS_64\n"); error = ENOIOCTL; break; } else if (ali.mbox[0] == AMR_CMD_EXTPASS) { printf("No AMR_CMD_EXTPASS\n"); error = ENOIOCTL; break; } else { len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen)); dp = malloc(len, M_AMR, M_WAITOK | M_ZERO); if (ali.inlen) { error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len); if (error) break; } mtx_lock(&sc->amr_list_lock); while ((ac = amr_alloccmd(sc)) == NULL) msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz); ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT; bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox)); ac->ac_length = len; ac->ac_data = dp; ac->ac_flags = ac_flags; error = amr_wait_command(ac); mtx_unlock(&sc->amr_list_lock); if (error) break; status = ac->ac_status; error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status)); if (ali.outlen) { error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen); if (error) break; } error = 0; if (logical_drives_changed) amr_rescan_drives(dev); break; } break; default: debug(1, "unknown linux ioctl 0x%lx", cmd); printf("unknown linux ioctl 0x%lx\n", cmd); error = ENOIOCTL; break; } /* * At this point, we know that there is a lock held and that these * objects have been allocated. */ mtx_lock(&sc->amr_list_lock); if (ac != NULL) amr_releasecmd(ac); mtx_unlock(&sc->amr_list_lock); if (dp != NULL) free(dp, M_AMR); return(error); } static int amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; union { void *_p; struct amr_user_ioctl *au; #ifdef AMR_IO_COMMAND32 struct amr_user_ioctl32 *au32; #endif int *result; } arg; struct amr_command *ac; struct amr_mailbox_ioctl *mbi; void *dp, *au_buffer; unsigned long au_length, real_length; unsigned char *au_cmd; int *au_statusp; int error; struct amr_passthrough *ap; /* 60 bytes */ int logical_drives_changed = 0; debug_called(1); arg._p = (void *)addr; error = 0; dp = NULL; ac = NULL; ap = NULL; switch(cmd) { case AMR_IO_VERSION: debug(1, "AMR_IO_VERSION"); *arg.result = AMR_IO_VERSION_NUMBER; return(0); #ifdef AMR_IO_COMMAND32 /* * Accept ioctl-s from 32-bit binaries on non-32-bit * platforms, such as AMD. LSI's MEGAMGR utility is * the only example known today... -mi */ case AMR_IO_COMMAND32: debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]); au_cmd = arg.au32->au_cmd; au_buffer = (void *)(u_int64_t)arg.au32->au_buffer; au_length = arg.au32->au_length; au_statusp = &arg.au32->au_status; break; #endif case AMR_IO_COMMAND: debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]); au_cmd = arg.au->au_cmd; au_buffer = (void *)arg.au->au_buffer; au_length = arg.au->au_length; au_statusp = &arg.au->au_status; break; case 0xc0046d00: case 0xc06e6d00: /* Linux emulation */ { devclass_t devclass; struct amr_linux_ioctl ali; int adapter, error; devclass = devclass_find("amr"); if (devclass == NULL) return (ENOENT); error = copyin(addr, &ali, sizeof(ali)); if (error) return (error); if (ali.ui.fcs.opcode == 0x82) adapter = 0; else adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; sc = devclass_get_softc(devclass, adapter); if (sc == NULL) return (ENOENT); return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, td)); } default: debug(1, "unknown ioctl 0x%lx", cmd); return(ENOIOCTL); } if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */ (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */ if (sc->amr_allow_vol_config == 0) { error = EPERM; goto out; } logical_drives_changed = 1; #ifdef LSI if ((error = amr_prepare_ld_delete(sc)) != 0) return (error); #endif } /* handle inbound data buffer */ real_length = amr_ioctl_buffer_length(au_length); dp = malloc(real_length, M_AMR, M_WAITOK|M_ZERO); if (au_length != 0 && au_cmd[0] != 0x06) { if ((error = copyin(au_buffer, dp, au_length)) != 0) { free(dp, M_AMR); return (error); } debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp); } /* Allocate this now before the mutex gets held */ mtx_lock(&sc->amr_list_lock); while ((ac = amr_alloccmd(sc)) == NULL) msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz); /* handle SCSI passthrough command */ if (au_cmd[0] == AMR_CMD_PASS) { int len; ap = &ac->ac_ccb->ccb_pthru; bzero(ap, sizeof(struct amr_passthrough)); /* copy cdb */ len = au_cmd[2]; ap->ap_cdb_length = len; bcopy(au_cmd + 3, ap->ap_cdb, len); /* build passthrough */ ap->ap_timeout = au_cmd[len + 3] & 0x07; ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0; ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0; ap->ap_logical_drive_no = au_cmd[len + 4]; ap->ap_channel = au_cmd[len + 5]; ap->ap_scsi_id = au_cmd[len + 6]; ap->ap_request_sense_length = 14; ap->ap_data_transfer_length = au_length; /* XXX what about the request-sense area? does the caller want it? */ /* build command */ ac->ac_mailbox.mb_command = AMR_CMD_PASS; ac->ac_flags = AMR_CMD_CCB; } else { /* direct command to controller */ mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox; /* copy pertinent mailbox items */ mbi->mb_command = au_cmd[0]; mbi->mb_channel = au_cmd[1]; mbi->mb_param = au_cmd[2]; mbi->mb_pad[0] = au_cmd[3]; mbi->mb_drive = au_cmd[4]; ac->ac_flags = 0; } /* build the command */ ac->ac_data = dp; ac->ac_length = real_length; ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT; /* run the command */ error = amr_wait_command(ac); mtx_unlock(&sc->amr_list_lock); if (error) goto out; /* copy out data and set status */ if (au_length != 0) { error = copyout(dp, au_buffer, au_length); } debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer); debug(2, "%p status 0x%x", dp, ac->ac_status); *au_statusp = ac->ac_status; out: /* * At this point, we know that there is a lock held and that these * objects have been allocated. */ mtx_lock(&sc->amr_list_lock); if (ac != NULL) amr_releasecmd(ac); mtx_unlock(&sc->amr_list_lock); if (dp != NULL) free(dp, M_AMR); #ifndef LSI if (logical_drives_changed) amr_rescan_drives(dev); #endif return(error); } /******************************************************************************** ******************************************************************************** Command Wrappers ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Interrogate the controller for the operational parameters we require. */ static int amr_query_controller(struct amr_softc *sc) { struct amr_enquiry3 *aex; struct amr_prodinfo *ap; struct amr_enquiry *ae; int ldrv; int status; /* * Greater than 10 byte cdb support */ sc->support_ext_cdb = amr_support_ext_cdb(sc); if(sc->support_ext_cdb) { debug(2,"supports extended CDBs."); } /* * Try to issue an ENQUIRY3 command */ if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) { /* * Fetch current state of logical drives. */ for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) { sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv]; sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv]; sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv]; debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); } free(aex, M_AMR); /* * Get product info for channel count. */ if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) { device_printf(sc->amr_dev, "can't obtain product data from controller\n"); return(1); } sc->amr_maxdrives = 40; sc->amr_maxchan = ap->ap_nschan; sc->amr_maxio = ap->ap_maxio; sc->amr_type |= AMR_TYPE_40LD; free(ap, M_AMR); ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status); if (ap != NULL) free(ap, M_AMR); if (!status) { sc->amr_ld_del_supported = 1; device_printf(sc->amr_dev, "delete logical drives supported by controller\n"); } } else { /* failed, try the 8LD ENQUIRY commands */ if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) { if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) { device_printf(sc->amr_dev, "can't obtain configuration data from controller\n"); return(1); } ae->ae_signature = 0; } /* * Fetch current state of logical drives. */ for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) { sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv]; sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv]; sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv]; debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); } sc->amr_maxdrives = 8; sc->amr_maxchan = ae->ae_adapter.aa_channels; sc->amr_maxio = ae->ae_adapter.aa_maxio; free(ae, M_AMR); } /* * Mark remaining drives as unused. */ for (; ldrv < AMR_MAXLD; ldrv++) sc->amr_drive[ldrv].al_size = 0xffffffff; /* * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust * the controller's reported value, and lockups have been seen when we do. */ sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD); return(0); } /******************************************************************************** * Run a generic enquiry-style command. */ static void * amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status) { struct amr_command *ac; void *result; u_int8_t *mbox; int error; debug_called(1); error = 1; result = NULL; /* get ourselves a command buffer */ mtx_lock(&sc->amr_list_lock); ac = amr_alloccmd(sc); mtx_unlock(&sc->amr_list_lock); if (ac == NULL) goto out; /* allocate the response structure */ if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN; /* point the command at our data */ ac->ac_data = result; ac->ac_length = bufsize; /* build the command proper */ mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ mbox[0] = cmd; mbox[2] = cmdsub; mbox[3] = cmdqual; *status = 0; /* can't assume that interrupts are going to work here, so play it safe */ if (sc->amr_poll_command(ac)) goto out; error = ac->ac_status; *status = ac->ac_status; out: mtx_lock(&sc->amr_list_lock); if (ac != NULL) amr_releasecmd(ac); mtx_unlock(&sc->amr_list_lock); if ((error != 0) && (result != NULL)) { free(result, M_AMR); result = NULL; } return(result); } /******************************************************************************** * Flush the controller's internal cache, return status. */ int amr_flush(struct amr_softc *sc) { struct amr_command *ac; int error; /* get ourselves a command buffer */ error = 1; mtx_lock(&sc->amr_list_lock); ac = amr_alloccmd(sc); mtx_unlock(&sc->amr_list_lock); if (ac == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; /* build the command proper */ ac->ac_mailbox.mb_command = AMR_CMD_FLUSH; /* we have to poll, as the system may be going down or otherwise damaged */ if (sc->amr_poll_command(ac)) goto out; error = ac->ac_status; out: mtx_lock(&sc->amr_list_lock); if (ac != NULL) amr_releasecmd(ac); mtx_unlock(&sc->amr_list_lock); return(error); } /******************************************************************************** * Detect extented cdb >> greater than 10 byte cdb support * returns '1' means this support exist * returns '0' means this support doesn't exist */ static int amr_support_ext_cdb(struct amr_softc *sc) { struct amr_command *ac; u_int8_t *mbox; int error; /* get ourselves a command buffer */ error = 0; mtx_lock(&sc->amr_list_lock); ac = amr_alloccmd(sc); mtx_unlock(&sc->amr_list_lock); if (ac == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; /* build the command proper */ mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ mbox[0] = 0xA4; mbox[2] = 0x16; /* we have to poll, as the system may be going down or otherwise damaged */ if (sc->amr_poll_command(ac)) goto out; if( ac->ac_status == AMR_STATUS_SUCCESS ) { error = 1; } out: mtx_lock(&sc->amr_list_lock); if (ac != NULL) amr_releasecmd(ac); mtx_unlock(&sc->amr_list_lock); return(error); } /******************************************************************************** * Try to find I/O work for the controller from one or more of the work queues. * * We make the assumption that if the controller is not ready to take a command * at some given time, it will generate an interrupt at some later time when * it is. */ void amr_startio(struct amr_softc *sc) { struct amr_command *ac; /* spin until something prevents us from doing any work */ for (;;) { /* Don't bother to queue commands no bounce buffers are available. */ if (sc->amr_state & AMR_STATE_QUEUE_FRZN) break; /* try to get a ready command */ ac = amr_dequeue_ready(sc); /* if that failed, build a command from a bio */ if (ac == NULL) (void)amr_bio_command(sc, &ac); /* if that failed, build a command from a ccb */ if ((ac == NULL) && (sc->amr_cam_command != NULL)) sc->amr_cam_command(sc, &ac); /* if we don't have anything to do, give up */ if (ac == NULL) break; /* try to give the command to the controller; if this fails save it for later and give up */ if (amr_start(ac)) { debug(2, "controller busy, command deferred"); amr_requeue_ready(ac); /* XXX schedule retry very soon? */ break; } } } /******************************************************************************** * Handle completion of an I/O command. */ static void amr_completeio(struct amr_command *ac) { struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1; static struct timeval lastfail; static int curfail; if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */ ac->ac_bio->bio_error = EIO; ac->ac_bio->bio_flags |= BIO_ERROR; if (ppsratecheck(&lastfail, &curfail, 1)) device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status); /* amr_printcommand(ac);*/ } amrd_intr(ac->ac_bio); mtx_lock(&ac->ac_sc->amr_list_lock); amr_releasecmd(ac); mtx_unlock(&ac->ac_sc->amr_list_lock); } /******************************************************************************** ******************************************************************************** Command Processing ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Convert a bio off the top of the bio queue into a command. */ static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp) { struct amr_command *ac; struct amrd_softc *amrd; struct bio *bio; int error; int blkcount; int driveno; int cmd; ac = NULL; error = 0; /* get a command */ if ((ac = amr_alloccmd(sc)) == NULL) return (ENOMEM); /* get a bio to work on */ if ((bio = amr_dequeue_bio(sc)) == NULL) { amr_releasecmd(ac); return (0); } /* connect the bio to the command */ ac->ac_complete = amr_completeio; ac->ac_bio = bio; ac->ac_data = bio->bio_data; ac->ac_length = bio->bio_bcount; cmd = 0; switch (bio->bio_cmd) { case BIO_READ: ac->ac_flags |= AMR_CMD_DATAIN; if (AMR_IS_SG64(sc)) { cmd = AMR_CMD_LREAD64; ac->ac_flags |= AMR_CMD_SG64; } else cmd = AMR_CMD_LREAD; break; case BIO_WRITE: ac->ac_flags |= AMR_CMD_DATAOUT; if (AMR_IS_SG64(sc)) { cmd = AMR_CMD_LWRITE64; ac->ac_flags |= AMR_CMD_SG64; } else cmd = AMR_CMD_LWRITE; break; case BIO_FLUSH: ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; cmd = AMR_CMD_FLUSH; break; } amrd = (struct amrd_softc *)bio->bio_disk->d_drv1; driveno = amrd->amrd_drive - sc->amr_drive; - blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE; + blkcount = howmany(bio->bio_bcount, AMR_BLKSIZE); ac->ac_mailbox.mb_command = cmd; if (bio->bio_cmd == BIO_READ || bio->bio_cmd == BIO_WRITE) { ac->ac_mailbox.mb_blkcount = blkcount; ac->ac_mailbox.mb_lba = bio->bio_pblkno; if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) { device_printf(sc->amr_dev, "I/O beyond end of unit (%lld,%d > %lu)\n", (long long)bio->bio_pblkno, blkcount, (u_long)sc->amr_drive[driveno].al_size); } } ac->ac_mailbox.mb_drive = driveno; if (sc->amr_state & AMR_STATE_REMAP_LD) ac->ac_mailbox.mb_drive |= 0x80; /* we fill in the s/g related data when the command is mapped */ *acp = ac; return(error); } /******************************************************************************** * Take a command, submit it to the controller and sleep until it completes * or fails. Interrupts must be enabled, returns nonzero on error. */ static int amr_wait_command(struct amr_command *ac) { int error = 0; struct amr_softc *sc = ac->ac_sc; debug_called(1); ac->ac_complete = NULL; ac->ac_flags |= AMR_CMD_SLEEP; if ((error = amr_start(ac)) != 0) { return(error); } while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) { error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0); } return(error); } /******************************************************************************** * Take a command, submit it to the controller and busy-wait for it to return. * Returns nonzero on error. Can be safely called with interrupts enabled. */ static int amr_std_poll_command(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int error, count; debug_called(2); ac->ac_complete = NULL; if ((error = amr_start(ac)) != 0) return(error); count = 0; do { /* * Poll for completion, although the interrupt handler may beat us to it. * Note that the timeout here is somewhat arbitrary. */ amr_done(sc); DELAY(1000); } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000)); if (!(ac->ac_flags & AMR_CMD_BUSY)) { error = 0; } else { /* XXX the slot is now marked permanently busy */ error = EIO; device_printf(sc->amr_dev, "polled command timeout\n"); } return(error); } static void amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct amr_command *ac = arg; struct amr_softc *sc = ac->ac_sc; int mb_channel; if (err) { device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); ac->ac_status = AMR_STATUS_ABORTED; return; } amr_setup_sg(arg, segs, nsegs, err); /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; if (AC_IS_SG64(ac)) { ac->ac_sg64_hi = 0; ac->ac_sg64_lo = ac->ac_sgbusaddr; } sc->amr_poll_command1(sc, ac); } /******************************************************************************** * Take a command, submit it to the controller and busy-wait for it to return. * Returns nonzero on error. Can be safely called with interrupts enabled. */ static int amr_quartz_poll_command(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int error; debug_called(2); error = 0; if (AC_IS_SG64(ac)) { ac->ac_tag = sc->amr_buffer64_dmat; ac->ac_datamap = ac->ac_dma64map; } else { ac->ac_tag = sc->amr_buffer_dmat; ac->ac_datamap = ac->ac_dmamap; } /* now we have a slot, we can map the command (unmapped in amr_complete) */ if (ac->ac_data != 0) { if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) { error = 1; } } else { error = amr_quartz_poll_command1(sc, ac); } return (error); } static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac) { int count, error; mtx_lock(&sc->amr_hw_lock); if ((sc->amr_state & AMR_STATE_INTEN) == 0) { count=0; while (sc->amr_busyslots) { msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz); if(count++>10) { break; } } if(sc->amr_busyslots) { device_printf(sc->amr_dev, "adapter is busy\n"); mtx_unlock(&sc->amr_hw_lock); if (ac->ac_data != NULL) { bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); } ac->ac_status=0; return(1); } } bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE); /* clear the poll/ack fields in the mailbox */ sc->amr_mailbox->mb_ident = 0xFE; sc->amr_mailbox->mb_nstatus = 0xFF; sc->amr_mailbox->mb_status = 0xFF; sc->amr_mailbox->mb_poll = 0; sc->amr_mailbox->mb_ack = 0; sc->amr_mailbox->mb_busy = 1; AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); while(sc->amr_mailbox->mb_nstatus == 0xFF) DELAY(1); while(sc->amr_mailbox->mb_status == 0xFF) DELAY(1); ac->ac_status=sc->amr_mailbox->mb_status; error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0; while(sc->amr_mailbox->mb_poll != 0x77) DELAY(1); sc->amr_mailbox->mb_poll = 0; sc->amr_mailbox->mb_ack = 0x77; /* acknowledge that we have the commands */ AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK); while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) DELAY(1); mtx_unlock(&sc->amr_hw_lock); /* unmap the command's data buffer */ if (ac->ac_flags & AMR_CMD_DATAIN) { bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD); } if (ac->ac_flags & AMR_CMD_DATAOUT) { bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); return(error); } static __inline int amr_freeslot(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int slot; debug_called(3); slot = ac->ac_slot; if (sc->amr_busycmd[slot] == NULL) panic("amr: slot %d not busy?\n", slot); sc->amr_busycmd[slot] = NULL; atomic_subtract_int(&sc->amr_busyslots, 1); return (0); } /******************************************************************************** * Map/unmap (ac)'s data in the controller's addressable space as required. * * These functions may be safely called multiple times on a given command. */ static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct amr_command *ac = (struct amr_command *)arg; struct amr_sgentry *sg; struct amr_sg64entry *sg64; int flags, i; debug_called(3); /* get base address of s/g table */ sg = ac->ac_sg.sg32; sg64 = ac->ac_sg.sg64; if (AC_IS_SG64(ac)) { ac->ac_nsegments = nsegments; ac->ac_mb_physaddr = 0xffffffff; for (i = 0; i < nsegments; i++, sg64++) { sg64->sg_addr = segs[i].ds_addr; sg64->sg_count = segs[i].ds_len; } } else { /* decide whether we need to populate the s/g table */ if (nsegments < 2) { ac->ac_nsegments = 0; ac->ac_mb_physaddr = segs[0].ds_addr; } else { ac->ac_nsegments = nsegments; ac->ac_mb_physaddr = ac->ac_sgbusaddr; for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; } } } flags = 0; if (ac->ac_flags & AMR_CMD_DATAIN) flags |= BUS_DMASYNC_PREREAD; if (ac->ac_flags & AMR_CMD_DATAOUT) flags |= BUS_DMASYNC_PREWRITE; bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags); ac->ac_flags |= AMR_CMD_MAPPED; } static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct amr_command *ac = arg; struct amr_softc *sc = ac->ac_sc; int mb_channel; if (err) { device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); amr_abort_load(ac); return; } amr_setup_sg(arg, segs, nsegs, err); /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; if (AC_IS_SG64(ac)) { ac->ac_sg64_hi = 0; ac->ac_sg64_lo = ac->ac_sgbusaddr; } if (sc->amr_submit_command(ac) == EBUSY) { amr_freeslot(ac); amr_requeue_ready(ac); } } static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct amr_command *ac = arg; struct amr_softc *sc = ac->ac_sc; struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru; struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru; if (err) { device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); amr_abort_load(ac); return; } /* Set up the mailbox portion of the command to point at the ccb */ ac->ac_mailbox.mb_nsgelem = 0; ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr; amr_setup_sg(arg, segs, nsegs, err); switch (ac->ac_mailbox.mb_command) { case AMR_CMD_EXTPASS: aep->ap_no_sg_elements = ac->ac_nsegments; aep->ap_data_transfer_address = ac->ac_mb_physaddr; break; case AMR_CMD_PASS: ap->ap_no_sg_elements = ac->ac_nsegments; ap->ap_data_transfer_address = ac->ac_mb_physaddr; break; default: panic("Unknown ccb command"); } if (sc->amr_submit_command(ac) == EBUSY) { amr_freeslot(ac); amr_requeue_ready(ac); } } static int amr_mapcmd(struct amr_command *ac) { bus_dmamap_callback_t *cb; struct amr_softc *sc = ac->ac_sc; debug_called(3); if (AC_IS_SG64(ac)) { ac->ac_tag = sc->amr_buffer64_dmat; ac->ac_datamap = ac->ac_dma64map; } else { ac->ac_tag = sc->amr_buffer_dmat; ac->ac_datamap = ac->ac_dmamap; } if (ac->ac_flags & AMR_CMD_CCB) cb = amr_setup_ccb; else cb = amr_setup_data; /* if the command involves data at all, and hasn't been mapped */ if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) { /* map the data buffers into bus space and build the s/g list */ if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, ac->ac_length, cb, ac, 0) == EINPROGRESS) { sc->amr_state |= AMR_STATE_QUEUE_FRZN; } } else { if (sc->amr_submit_command(ac) == EBUSY) { amr_freeslot(ac); amr_requeue_ready(ac); } } return (0); } static void amr_unmapcmd(struct amr_command *ac) { int flag; debug_called(3); /* if the command involved data at all and was mapped */ if (ac->ac_flags & AMR_CMD_MAPPED) { if (ac->ac_data != NULL) { flag = 0; if (ac->ac_flags & AMR_CMD_DATAIN) flag |= BUS_DMASYNC_POSTREAD; if (ac->ac_flags & AMR_CMD_DATAOUT) flag |= BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag); bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); } ac->ac_flags &= ~AMR_CMD_MAPPED; } } static void amr_abort_load(struct amr_command *ac) { ac_qhead_t head; struct amr_softc *sc = ac->ac_sc; mtx_assert(&sc->amr_list_lock, MA_OWNED); ac->ac_status = AMR_STATUS_ABORTED; amr_init_qhead(&head); amr_enqueue_completed(ac, &head); mtx_unlock(&sc->amr_list_lock); amr_complete(sc, &head); mtx_lock(&sc->amr_list_lock); } /******************************************************************************** * Take a command and give it to the controller, returns 0 if successful, or * EBUSY if the command should be retried later. */ static int amr_start(struct amr_command *ac) { struct amr_softc *sc; int error = 0; int slot; debug_called(3); /* mark command as busy so that polling consumer can tell */ sc = ac->ac_sc; ac->ac_flags |= AMR_CMD_BUSY; /* get a command slot (freed in amr_done) */ slot = ac->ac_slot; if (sc->amr_busycmd[slot] != NULL) panic("amr: slot %d busy?\n", slot); sc->amr_busycmd[slot] = ac; atomic_add_int(&sc->amr_busyslots, 1); /* Now we have a slot, we can map the command (unmapped in amr_complete). */ if ((error = amr_mapcmd(ac)) == ENOMEM) { /* * Memroy resources are short, so free the slot and let this be tried * later. */ amr_freeslot(ac); } return (error); } /******************************************************************************** * Extract one or more completed commands from the controller (sc) * * Returns nonzero if any commands on the work queue were marked as completed. */ int amr_done(struct amr_softc *sc) { ac_qhead_t head; struct amr_command *ac; struct amr_mailbox mbox; int i, idx, result; debug_called(3); /* See if there's anything for us to do */ result = 0; amr_init_qhead(&head); /* loop collecting completed commands */ for (;;) { /* poll for a completed command's identifier and status */ if (sc->amr_get_work(sc, &mbox)) { result = 1; /* iterate over completed commands in this result */ for (i = 0; i < mbox.mb_nstatus; i++) { /* get pointer to busy command */ idx = mbox.mb_completed[i] - 1; ac = sc->amr_busycmd[idx]; /* really a busy command? */ if (ac != NULL) { /* pull the command from the busy index */ amr_freeslot(ac); /* save status for later use */ ac->ac_status = mbox.mb_status; amr_enqueue_completed(ac, &head); debug(3, "completed command with status %x", mbox.mb_status); } else { device_printf(sc->amr_dev, "bad slot %d completed\n", idx); } } } else break; /* no work */ } /* handle completion and timeouts */ amr_complete(sc, &head); return(result); } /******************************************************************************** * Do completion processing on done commands on (sc) */ static void amr_complete(void *context, ac_qhead_t *head) { struct amr_softc *sc = (struct amr_softc *)context; struct amr_command *ac; debug_called(3); /* pull completed commands off the queue */ for (;;) { ac = amr_dequeue_completed(sc, head); if (ac == NULL) break; /* unmap the command's data buffer */ amr_unmapcmd(ac); /* * Is there a completion handler? */ if (ac->ac_complete != NULL) { /* unbusy the command */ ac->ac_flags &= ~AMR_CMD_BUSY; ac->ac_complete(ac); /* * Is someone sleeping on this one? */ } else { mtx_lock(&sc->amr_list_lock); ac->ac_flags &= ~AMR_CMD_BUSY; if (ac->ac_flags & AMR_CMD_SLEEP) { /* unbusy the command */ wakeup(ac); } mtx_unlock(&sc->amr_list_lock); } if(!sc->amr_busyslots) { wakeup(sc); } } mtx_lock(&sc->amr_list_lock); sc->amr_state &= ~AMR_STATE_QUEUE_FRZN; amr_startio(sc); mtx_unlock(&sc->amr_list_lock); } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Get a new command buffer. * * This may return NULL in low-memory cases. * * If possible, we recycle a command buffer that's been used before. */ struct amr_command * amr_alloccmd(struct amr_softc *sc) { struct amr_command *ac; debug_called(3); ac = amr_dequeue_free(sc); if (ac == NULL) { sc->amr_state |= AMR_STATE_QUEUE_FRZN; return(NULL); } /* clear out significant fields */ ac->ac_status = 0; bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox)); ac->ac_flags = 0; ac->ac_bio = NULL; ac->ac_data = NULL; ac->ac_complete = NULL; ac->ac_retries = 0; ac->ac_tag = NULL; ac->ac_datamap = NULL; return(ac); } /******************************************************************************** * Release a command buffer for recycling. */ void amr_releasecmd(struct amr_command *ac) { debug_called(3); amr_enqueue_free(ac); } /******************************************************************************** * Allocate a new command cluster and initialise it. */ static void amr_alloccmd_cluster(struct amr_softc *sc) { struct amr_command_cluster *acc; struct amr_command *ac; int i, nextslot; /* * If we haven't found the real limit yet, let us have a couple of * commands in order to be able to probe. */ if (sc->amr_maxio == 0) sc->amr_maxio = 2; if (sc->amr_nextslot > sc->amr_maxio) return; acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO); if (acc != NULL) { nextslot = sc->amr_nextslot; mtx_lock(&sc->amr_list_lock); TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link); mtx_unlock(&sc->amr_list_lock); for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { ac = &acc->acc_command[i]; ac->ac_sc = sc; ac->ac_slot = nextslot; /* * The SG table for each slot is a fixed size and is assumed to * to hold 64-bit s/g objects when the driver is configured to do * 64-bit DMA. 32-bit DMA commands still use the same table, but * cast down to 32-bit objects. */ if (AMR_IS_SG64(sc)) { ac->ac_sgbusaddr = sc->amr_sgbusaddr + (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry)); ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG); } else { ac->ac_sgbusaddr = sc->amr_sgbusaddr + (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); } ac->ac_ccb = sc->amr_ccb + ac->ac_slot; ac->ac_ccb_busaddr = sc->amr_ccb_busaddr + (ac->ac_slot * sizeof(union amr_ccb)); if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap)) break; if (AMR_IS_SG64(sc) && (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map))) break; amr_releasecmd(ac); if (++nextslot > sc->amr_maxio) break; } sc->amr_nextslot = nextslot; } } /******************************************************************************** * Free a command cluster */ static void amr_freecmd_cluster(struct amr_command_cluster *acc) { struct amr_softc *sc = acc->acc_command[0].ac_sc; int i; for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { if (acc->acc_command[i].ac_sc == NULL) break; bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap); if (AMR_IS_SG64(sc)) bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map); } free(acc, M_AMR); } /******************************************************************************** ******************************************************************************** Interface-specific Shims ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Tell the controller that the mailbox contains a valid command */ static int amr_quartz_submit_command(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; static struct timeval lastfail; static int curfail; int i = 0; mtx_lock(&sc->amr_hw_lock); while (sc->amr_mailbox->mb_busy && (i++ < 10)) { DELAY(1); /* This is a no-op read that flushes pending mailbox updates */ AMR_QGET_ODB(sc); } if (sc->amr_mailbox->mb_busy) { mtx_unlock(&sc->amr_hw_lock); if (ac->ac_retries++ > 1000) { if (ppsratecheck(&lastfail, &curfail, 1)) device_printf(sc->amr_dev, "Too many retries on command %p. " "Controller is likely dead\n", ac); ac->ac_retries = 0; } return (EBUSY); } /* * Save the slot number so that we can locate this command when complete. * Note that ident = 0 seems to be special, so we don't use it. */ ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); sc->amr_mailbox->mb_busy = 1; sc->amr_mailbox->mb_poll = 0; sc->amr_mailbox->mb_ack = 0; sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi; sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo; AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); mtx_unlock(&sc->amr_hw_lock); return(0); } static int amr_std_submit_command(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; static struct timeval lastfail; static int curfail; mtx_lock(&sc->amr_hw_lock); if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) { mtx_unlock(&sc->amr_hw_lock); if (ac->ac_retries++ > 1000) { if (ppsratecheck(&lastfail, &curfail, 1)) device_printf(sc->amr_dev, "Too many retries on command %p. " "Controller is likely dead\n", ac); ac->ac_retries = 0; } return (EBUSY); } /* * Save the slot number so that we can locate this command when complete. * Note that ident = 0 seems to be special, so we don't use it. */ ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); sc->amr_mailbox->mb_busy = 1; sc->amr_mailbox->mb_poll = 0; sc->amr_mailbox->mb_ack = 0; AMR_SPOST_COMMAND(sc); mtx_unlock(&sc->amr_hw_lock); return(0); } /******************************************************************************** * Claim any work that the controller has completed; acknowledge completion, * save details of the completion in (mbsave) */ static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) { int worked, i; u_int32_t outd; u_int8_t nstatus; u_int8_t completed[46]; debug_called(3); worked = 0; /* work waiting for us? */ if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) { /* acknowledge interrupt */ AMR_QPUT_ODB(sc, AMR_QODB_READY); while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff) DELAY(1); sc->amr_mailbox->mb_nstatus = 0xff; /* wait until fw wrote out all completions */ for (i = 0; i < nstatus; i++) { while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff) DELAY(1); sc->amr_mailbox->mb_completed[i] = 0xff; } /* Save information for later processing */ mbsave->mb_nstatus = nstatus; mbsave->mb_status = sc->amr_mailbox->mb_status; sc->amr_mailbox->mb_status = 0xff; for (i = 0; i < nstatus; i++) mbsave->mb_completed[i] = completed[i]; /* acknowledge that we have the commands */ AMR_QPUT_IDB(sc, AMR_QIDB_ACK); #if 0 #ifndef AMR_QUARTZ_GOFASTER /* * This waits for the controller to notice that we've taken the * command from it. It's very inefficient, and we shouldn't do it, * but if we remove this code, we stop completing commands under * load. * * Peter J says we shouldn't do this. The documentation says we * should. Who is right? */ while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) ; /* XXX aiee! what if it dies? */ #endif #endif worked = 1; /* got some work */ } return(worked); } static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) { int worked; u_int8_t istat; debug_called(3); worked = 0; /* check for valid interrupt status */ istat = AMR_SGET_ISTAT(sc); if ((istat & AMR_SINTR_VALID) != 0) { AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */ /* save mailbox, which contains a list of completed commands */ bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */ worked = 1; } return(worked); } /******************************************************************************** * Notify the controller of the mailbox location. */ static void amr_std_attach_mailbox(struct amr_softc *sc) { /* program the mailbox physical address */ AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR); /* clear any outstanding interrupt and enable interrupts proper */ AMR_SACK_INTERRUPT(sc); AMR_SENABLE_INTR(sc); } #ifdef AMR_BOARD_INIT /******************************************************************************** * Initialise the controller */ static int amr_quartz_init(struct amr_softc *sc) { int status, ostatus; device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc)); AMR_QRESET(sc); ostatus = 0xff; while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) { if (status != ostatus) { device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status)); ostatus = status; } switch (status) { case AMR_QINIT_NOMEM: return(ENOMEM); case AMR_QINIT_SCAN: /* XXX we could print channel/target here */ break; } } return(0); } static int amr_std_init(struct amr_softc *sc) { int status, ostatus; device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc)); AMR_SRESET(sc); ostatus = 0xff; while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) { if (status != ostatus) { device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status)); ostatus = status; } switch (status) { case AMR_SINIT_NOMEM: return(ENOMEM); case AMR_SINIT_INPROG: /* XXX we could print channel/target here? */ break; } } return(0); } #endif /******************************************************************************** ******************************************************************************** Debugging ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Identify the controller and print some information about it. */ static void amr_describe_controller(struct amr_softc *sc) { struct amr_prodinfo *ap; struct amr_enquiry *ae; char *prod; int status; /* * Try to get 40LD product info, which tells us what the card is labelled as. */ if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) { device_printf(sc->amr_dev, " Firmware %.16s, BIOS %.16s, %dMB RAM\n", ap->ap_product, ap->ap_firmware, ap->ap_bios, ap->ap_memsize); free(ap, M_AMR); return; } /* * Try 8LD extended ENQUIRY to get controller signature, and use lookup table. */ if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) { prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature); } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) { /* * Try to work it out based on the PCI signatures. */ switch (pci_get_device(sc->amr_dev)) { case 0x9010: prod = "Series 428"; break; case 0x9060: prod = "Series 434"; break; default: prod = "unknown controller"; break; } } else { device_printf(sc->amr_dev, "\n"); return; } /* * HP NetRaid controllers have a special encoding of the firmware and * BIOS versions. The AMI version seems to have it as strings whereas * the HP version does it with a leading uppercase character and two * binary numbers. */ if(ae->ae_adapter.aa_firmware[2] >= 'A' && ae->ae_adapter.aa_firmware[2] <= 'Z' && ae->ae_adapter.aa_firmware[1] < ' ' && ae->ae_adapter.aa_firmware[0] < ' ' && ae->ae_adapter.aa_bios[2] >= 'A' && ae->ae_adapter.aa_bios[2] <= 'Z' && ae->ae_adapter.aa_bios[1] < ' ' && ae->ae_adapter.aa_bios[0] < ' ') { /* this looks like we have an HP NetRaid version of the MegaRaid */ if(ae->ae_signature == AMR_SIG_438) { /* the AMI 438 is a NetRaid 3si in HP-land */ prod = "HP NetRaid 3si"; } device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n", prod, ae->ae_adapter.aa_firmware[2], ae->ae_adapter.aa_firmware[1], ae->ae_adapter.aa_firmware[0], ae->ae_adapter.aa_bios[2], ae->ae_adapter.aa_bios[1], ae->ae_adapter.aa_bios[0], ae->ae_adapter.aa_memorysize); } else { device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n", prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios, ae->ae_adapter.aa_memorysize); } free(ae, M_AMR); } int amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks) { struct amr_command *ac; int error = EIO; debug_called(1); sc->amr_state |= AMR_STATE_INTEN; /* get ourselves a command buffer */ if ((ac = amr_alloccmd(sc)) == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; /* point the command at our data */ ac->ac_data = data; ac->ac_length = blks * AMR_BLKSIZE; /* build the command proper */ ac->ac_mailbox.mb_command = AMR_CMD_LWRITE; ac->ac_mailbox.mb_blkcount = blks; ac->ac_mailbox.mb_lba = lba; ac->ac_mailbox.mb_drive = unit; /* can't assume that interrupts are going to work here, so play it safe */ if (sc->amr_poll_command(ac)) goto out; error = ac->ac_status; out: if (ac != NULL) amr_releasecmd(ac); sc->amr_state &= ~AMR_STATE_INTEN; return (error); } #ifdef AMR_DEBUG /******************************************************************************** * Print the command (ac) in human-readable format */ #if 0 static void amr_printcommand(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; struct amr_sgentry *sg; int i; device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n", ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive); device_printf(sc->amr_dev, "blkcount %d lba %d\n", ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba); device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length); device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n", ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem); device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio); /* get base address of s/g table */ sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++) device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count); } #endif #endif diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c index a7b04800fcfd..644bb2d93e4d 100644 --- a/sys/dev/bwn/if_bwn.c +++ b/sys/dev/bwn/if_bwn.c @@ -1,14087 +1,14087 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); enum { BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ BWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ BWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ BWN_DEBUG_RESET = 0x00000010, /* reset processing */ BWN_DEBUG_OPS = 0x00000020, /* bwn_ops processing */ BWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ BWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ BWN_DEBUG_INTR = 0x00000100, /* ISR */ BWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ BWN_DEBUG_NODE = 0x00000400, /* node management */ BWN_DEBUG_LED = 0x00000800, /* led management */ BWN_DEBUG_CMD = 0x00001000, /* cmd submission */ BWN_DEBUG_LO = 0x00002000, /* LO */ BWN_DEBUG_FW = 0x00004000, /* firmware */ BWN_DEBUG_WME = 0x00008000, /* WME */ BWN_DEBUG_RF = 0x00010000, /* RF */ BWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ BWN_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_msi_disable = 0; /* MSI disabled */ TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static void bwn_reset_core(struct bwn_mac *, uint32_t); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static int bwn_phy_g_attach(struct bwn_mac *); static void bwn_phy_g_detach(struct bwn_mac *); static void bwn_phy_g_init_pre(struct bwn_mac *); static int bwn_phy_g_prepare_hw(struct bwn_mac *); static int bwn_phy_g_init(struct bwn_mac *); static void bwn_phy_g_exit(struct bwn_mac *); static uint16_t bwn_phy_g_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_write(struct bwn_mac *, uint16_t, uint16_t); static uint16_t bwn_phy_g_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_rf_write(struct bwn_mac *, uint16_t, uint16_t); static int bwn_phy_g_hwpctl(struct bwn_mac *); static void bwn_phy_g_rf_onoff(struct bwn_mac *, int); static int bwn_phy_g_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *); static void bwn_phy_g_set_antenna(struct bwn_mac *, int); static int bwn_phy_g_im(struct bwn_mac *, int); static int bwn_phy_g_recalc_txpwr(struct bwn_mac *, int); static void bwn_phy_g_set_txpwr(struct bwn_mac *); static void bwn_phy_g_task_15s(struct bwn_mac *); static void bwn_phy_g_task_60s(struct bwn_mac *); static uint16_t bwn_phy_g_txctl(struct bwn_mac *); static void bwn_phy_switch_analog(struct bwn_mac *, int); static uint16_t bwn_shm_read_2(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint32_t bwn_shm_read_4(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, int); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static uint64_t bwn_hf_read(struct bwn_mac *); static void bwn_hf_write(struct bwn_mac *, uint64_t); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static int bwn_dma_mask2type(uint64_t); static uint64_t bwn_dma_mask(struct bwn_mac *); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static int bwn_dma_gettype(struct bwn_mac *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static void bwn_phy_g_init_sub(struct bwn_mac *); static uint8_t bwn_has_hwpctl(struct bwn_mac *); static void bwn_phy_init_b5(struct bwn_mac *); static void bwn_phy_init_b6(struct bwn_mac *); static void bwn_phy_init_a(struct bwn_mac *); static void bwn_loopback_calcgain(struct bwn_mac *); static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *); static void bwn_lo_g_init(struct bwn_mac *); static void bwn_lo_g_adjust(struct bwn_mac *); static void bwn_lo_get_powervector(struct bwn_mac *); static struct bwn_lo_calib *bwn_lo_calibset(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *); static void bwn_lo_write(struct bwn_mac *, struct bwn_loctl *); static void bwn_phy_hwpctl_init(struct bwn_mac *); static void bwn_phy_g_switch_chan(struct bwn_mac *, int, uint8_t); static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *, uint8_t); static void bwn_phy_g_set_bbatt(struct bwn_mac *, uint16_t); static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *, uint16_t, uint32_t); static void bwn_spu_workaround(struct bwn_mac *, uint8_t); static void bwn_wa_init(struct bwn_mac *); static void bwn_ofdmtab_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_dummy_transmission(struct bwn_mac *, int, int); static void bwn_ofdmtab_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_gtab_write(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_ram_write(struct bwn_mac *, uint16_t, uint32_t); static void bwn_mac_suspend(struct bwn_mac *); static void bwn_mac_enable(struct bwn_mac *); static void bwn_psctl(struct bwn_mac *, uint32_t); static int16_t bwn_nrssi_read(struct bwn_mac *, uint16_t); static void bwn_nrssi_offset(struct bwn_mac *); static void bwn_nrssi_threshold(struct bwn_mac *); static void bwn_nrssi_slope_11g(struct bwn_mac *); static void bwn_set_all_gains(struct bwn_mac *, int16_t, int16_t, int16_t); static void bwn_set_original_gains(struct bwn_mac *); static void bwn_hwpctl_early_init(struct bwn_mac *); static void bwn_hwpctl_init_gphy(struct bwn_mac *); static uint16_t bwn_phy_g_chan2freq(uint8_t); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static int bwn_switch_channel(struct bwn_mac *, int); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static void bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static int bwn_phy_shm_tssi_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_setatt(struct bwn_mac *, int *, int *); static void bwn_phy_lock(struct bwn_mac *); static void bwn_phy_unlock(struct bwn_mac *); static void bwn_rf_lock(struct bwn_mac *); static void bwn_rf_unlock(struct bwn_mac *); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_phy_g_dc_lookup_init(struct bwn_mac *, uint8_t); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static void bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_phy_lp_init_pre(struct bwn_mac *); static int bwn_phy_lp_init(struct bwn_mac *); static uint16_t bwn_phy_lp_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_maskset(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_rf_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_rf_onoff(struct bwn_mac *, int); static int bwn_phy_lp_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *); static void bwn_phy_lp_set_antenna(struct bwn_mac *, int); static void bwn_phy_lp_task_60s(struct bwn_mac *); static void bwn_phy_lp_readsprom(struct bwn_mac *); static void bwn_phy_lp_bbinit(struct bwn_mac *); static void bwn_phy_lp_txpctl_init(struct bwn_mac *); static void bwn_phy_lp_calib(struct bwn_mac *); static void bwn_phy_lp_switch_analog(struct bwn_mac *, int); static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t); static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t); static void bwn_phy_lp_digflt_save(struct bwn_mac *); static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *); static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t); static void bwn_phy_lp_bugfix(struct bwn_mac *); static void bwn_phy_lp_digflt_restore(struct bwn_mac *); static void bwn_phy_lp_tblinit(struct bwn_mac *); static void bwn_phy_lp_bbinit_r2(struct bwn_mac *); static void bwn_phy_lp_bbinit_r01(struct bwn_mac *); static void bwn_phy_lp_b2062_init(struct bwn_mac *); static void bwn_phy_lp_b2063_init(struct bwn_mac *); static void bwn_phy_lp_rxcal_r2(struct bwn_mac *); static void bwn_phy_lp_rccal_r12(struct bwn_mac *); static void bwn_phy_lp_set_rccap(struct bwn_mac *); static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t); static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *); static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *); static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int, const void *); static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *); static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *); static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *); static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *); static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t); static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t); static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t); static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t); static void bwn_phy_lp_tblinit_r01(struct bwn_mac *); static void bwn_phy_lp_tblinit_r2(struct bwn_mac *); static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *); static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t); static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *); static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *); static int bwn_phy_lp_loopback(struct bwn_mac *); static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t); static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int, int); static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t, struct bwn_phy_lp_iq_est *); static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *); static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_override(struct bwn_mac *); static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *); static uint8_t bwn_nbits(int32_t); static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int, struct bwn_txgain_entry *); static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_sysctl_node(struct bwn_softc *); static struct resource_spec bwn_res_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec bwn_res_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; static const uint8_t bwn_b2063_chantable_data[33][12] = { { 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 } }; static const struct bwn_b206x_chan bwn_b2063_chantable[] = { { 1, 2412, bwn_b2063_chantable_data[0] }, { 2, 2417, bwn_b2063_chantable_data[0] }, { 3, 2422, bwn_b2063_chantable_data[0] }, { 4, 2427, bwn_b2063_chantable_data[1] }, { 5, 2432, bwn_b2063_chantable_data[1] }, { 6, 2437, bwn_b2063_chantable_data[1] }, { 7, 2442, bwn_b2063_chantable_data[1] }, { 8, 2447, bwn_b2063_chantable_data[1] }, { 9, 2452, bwn_b2063_chantable_data[2] }, { 10, 2457, bwn_b2063_chantable_data[2] }, { 11, 2462, bwn_b2063_chantable_data[3] }, { 12, 2467, bwn_b2063_chantable_data[3] }, { 13, 2472, bwn_b2063_chantable_data[3] }, { 14, 2484, bwn_b2063_chantable_data[4] }, { 34, 5170, bwn_b2063_chantable_data[5] }, { 36, 5180, bwn_b2063_chantable_data[6] }, { 38, 5190, bwn_b2063_chantable_data[7] }, { 40, 5200, bwn_b2063_chantable_data[8] }, { 42, 5210, bwn_b2063_chantable_data[9] }, { 44, 5220, bwn_b2063_chantable_data[10] }, { 46, 5230, bwn_b2063_chantable_data[11] }, { 48, 5240, bwn_b2063_chantable_data[12] }, { 52, 5260, bwn_b2063_chantable_data[13] }, { 56, 5280, bwn_b2063_chantable_data[14] }, { 60, 5300, bwn_b2063_chantable_data[14] }, { 64, 5320, bwn_b2063_chantable_data[15] }, { 100, 5500, bwn_b2063_chantable_data[16] }, { 104, 5520, bwn_b2063_chantable_data[17] }, { 108, 5540, bwn_b2063_chantable_data[18] }, { 112, 5560, bwn_b2063_chantable_data[19] }, { 116, 5580, bwn_b2063_chantable_data[20] }, { 120, 5600, bwn_b2063_chantable_data[21] }, { 124, 5620, bwn_b2063_chantable_data[21] }, { 128, 5640, bwn_b2063_chantable_data[22] }, { 132, 5660, bwn_b2063_chantable_data[22] }, { 136, 5680, bwn_b2063_chantable_data[22] }, { 140, 5700, bwn_b2063_chantable_data[23] }, { 149, 5745, bwn_b2063_chantable_data[23] }, { 153, 5765, bwn_b2063_chantable_data[23] }, { 157, 5785, bwn_b2063_chantable_data[23] }, { 161, 5805, bwn_b2063_chantable_data[23] }, { 165, 5825, bwn_b2063_chantable_data[23] }, { 184, 4920, bwn_b2063_chantable_data[24] }, { 188, 4940, bwn_b2063_chantable_data[25] }, { 192, 4960, bwn_b2063_chantable_data[26] }, { 196, 4980, bwn_b2063_chantable_data[27] }, { 200, 5000, bwn_b2063_chantable_data[28] }, { 204, 5020, bwn_b2063_chantable_data[29] }, { 208, 5040, bwn_b2063_chantable_data[30] }, { 212, 5060, bwn_b2063_chantable_data[31] }, { 216, 5080, bwn_b2063_chantable_data[32] } }; static const uint8_t bwn_b2062_chantable_data[22][12] = { { 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 }, { 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 } }; static const struct bwn_b206x_chan bwn_b2062_chantable[] = { { 1, 2412, bwn_b2062_chantable_data[0] }, { 2, 2417, bwn_b2062_chantable_data[0] }, { 3, 2422, bwn_b2062_chantable_data[0] }, { 4, 2427, bwn_b2062_chantable_data[0] }, { 5, 2432, bwn_b2062_chantable_data[0] }, { 6, 2437, bwn_b2062_chantable_data[0] }, { 7, 2442, bwn_b2062_chantable_data[0] }, { 8, 2447, bwn_b2062_chantable_data[0] }, { 9, 2452, bwn_b2062_chantable_data[0] }, { 10, 2457, bwn_b2062_chantable_data[0] }, { 11, 2462, bwn_b2062_chantable_data[0] }, { 12, 2467, bwn_b2062_chantable_data[0] }, { 13, 2472, bwn_b2062_chantable_data[0] }, { 14, 2484, bwn_b2062_chantable_data[0] }, { 34, 5170, bwn_b2062_chantable_data[1] }, { 38, 5190, bwn_b2062_chantable_data[2] }, { 42, 5210, bwn_b2062_chantable_data[2] }, { 46, 5230, bwn_b2062_chantable_data[3] }, { 36, 5180, bwn_b2062_chantable_data[4] }, { 40, 5200, bwn_b2062_chantable_data[5] }, { 44, 5220, bwn_b2062_chantable_data[6] }, { 48, 5240, bwn_b2062_chantable_data[3] }, { 52, 5260, bwn_b2062_chantable_data[3] }, { 56, 5280, bwn_b2062_chantable_data[3] }, { 60, 5300, bwn_b2062_chantable_data[7] }, { 64, 5320, bwn_b2062_chantable_data[8] }, { 100, 5500, bwn_b2062_chantable_data[9] }, { 104, 5520, bwn_b2062_chantable_data[10] }, { 108, 5540, bwn_b2062_chantable_data[10] }, { 112, 5560, bwn_b2062_chantable_data[10] }, { 116, 5580, bwn_b2062_chantable_data[11] }, { 120, 5600, bwn_b2062_chantable_data[12] }, { 124, 5620, bwn_b2062_chantable_data[12] }, { 128, 5640, bwn_b2062_chantable_data[12] }, { 132, 5660, bwn_b2062_chantable_data[12] }, { 136, 5680, bwn_b2062_chantable_data[12] }, { 140, 5700, bwn_b2062_chantable_data[12] }, { 149, 5745, bwn_b2062_chantable_data[12] }, { 153, 5765, bwn_b2062_chantable_data[12] }, { 157, 5785, bwn_b2062_chantable_data[12] }, { 161, 5805, bwn_b2062_chantable_data[12] }, { 165, 5825, bwn_b2062_chantable_data[12] }, { 184, 4920, bwn_b2062_chantable_data[13] }, { 188, 4940, bwn_b2062_chantable_data[14] }, { 192, 4960, bwn_b2062_chantable_data[15] }, { 196, 4980, bwn_b2062_chantable_data[16] }, { 200, 5000, bwn_b2062_chantable_data[17] }, { 204, 5020, bwn_b2062_chantable_data[18] }, { 208, 5040, bwn_b2062_chantable_data[19] }, { 212, 5060, bwn_b2062_chantable_data[20] }, { 216, 5080, bwn_b2062_chantable_data[21] } }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_5354[] = { { 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 }, { 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 }, { 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 }, { 13, -66, 13 }, { 14, -66, 13 }, }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_r12[] = { { 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 }, { 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 }, { 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 }, { 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 }, { 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 }, { 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 }, { 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 }, { 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 }, { 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 }, { 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 }, { 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 }, { 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 }, { 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 }, }; static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 }; static const uint8_t bwn_tab_sigsq_tbl[] = { 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, }; static const uint8_t bwn_tab_pllfrac_tbl[] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, }; static const uint16_t bwn_tabl_iqlocal_tbl[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint16_t bwn_tab_noise_g1[] = BWN_TAB_NOISE_G1; static const uint16_t bwn_tab_noise_g2[] = BWN_TAB_NOISE_G2; static const uint16_t bwn_tab_noisescale_g1[] = BWN_TAB_NOISESCALE_G1; static const uint16_t bwn_tab_noisescale_g2[] = BWN_TAB_NOISESCALE_G2; static const uint16_t bwn_tab_noisescale_g3[] = BWN_TAB_NOISESCALE_G3; const uint8_t bwn_bitrev_table[256] = BWN_BITREV_TABLE; #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; static const struct siba_devid bwn_devs[] = { SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"), SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"), SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"), SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"), SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"), SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"), SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"), SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"), SIBA_DEV(BROADCOM, 80211, 16, "Revision 16") }; static int bwn_probe(device_t dev) { int i; for (i = 0; i < nitems(bwn_devs); i++) { if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor && siba_get_device(dev) == bwn_devs[i].sd_device && siba_get_revid(dev) == bwn_devs[i].sd_rev) return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc = device_get_softc(dev); int error, i, msic, reg; sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } if (!TAILQ_EMPTY(&sc->sc_maclist)) { if (siba_get_pci_device(dev) != 0x4313 && siba_get_pci_device(dev) != 0x431a && siba_get_pci_device(dev) != 0x4321) { device_printf(sc->sc_dev, "skip 802.11 cores\n"); return (ENODEV); } } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail0; bwn_led_attach(mac); device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_method.dma.dmatype); else device_printf(sc->sc_dev, "PIO\n"); /* * setup PCI resources and interrupt. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(sc->sc_dev, "MSI count : %d\n", msic); } else msic = 0; mac->mac_intr_spec = bwn_res_spec_legacy; if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { device_printf(sc->sc_dev, "Using %d MSI messages\n", msic); mac->mac_intr_spec = bwn_res_spec_msi; mac->mac_msi = 1; } } error = bus_alloc_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (error) { device_printf(sc->sc_dev, "couldn't allocate IRQ resources (%d)\n", error); goto fail1; } if (mac->mac_msi == 0) error = bus_setup_intr(dev, mac->mac_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[0]); else { for (i = 0; i < BWN_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, mac->mac_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[i]); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); break; } } } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail1: if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) pci_release_msi(dev); fail0: free(mac, M_DEVBUF); return (error); } static int bwn_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ IEEE80211_ADDR_COPY(ic->ic_macaddr, bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ? siba_sprom_get_mac_80211a(sc->sc_dev) : siba_sprom_get_mac_80211bg(sc->sc_dev)); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; int i; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); for (i = 0; i < BWN_MSI_MESSAGES; i++) { if (mac->mac_intrhand[i] != NULL) { bus_teardown_intr(dev, mac->mac_res_irq[i], mac->mac_intrhand[i]); mac->mac_intrhand[i] = NULL; } } bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (mac->mac_msi != 0) pci_release_msi(dev); mbufq_drain(&sc->sc_snd); BWN_LOCK_DESTROY(sc); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { #define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \ ((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \ (siba_get_pci_device(dev) == _device) && \ (siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \ (siba_get_pci_subdevice(dev) == _subdevice)) if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE && siba_get_pci_subdevice(dev) == 0x4e && siba_get_pci_revid(dev) > 0x40) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL); if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL && siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST); if (siba_get_type(dev) == SIBA_TYPE_PCI) { if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) || BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) || BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) || BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010)) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST); } #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (siba_get_revid(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint32_t high; KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("unsupported revision %d", siba_get_revid(sc->sc_dev))); siba_powerup(sc->sc_dev, 0); high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); bwn_reset_core(mac, (high & BWN_TGSHIGH_HAVE_2GHZ) ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_phy_getinfo(mac, high); if (error) goto fail; have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0; have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; if (siba_get_pci_device(sc->sc_dev) != 0x4312 && siba_get_pci_device(sc->sc_dev) != 0x4319 && siba_get_pci_device(sc->sc_dev) != 0x4324) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* XXX turns off PHY A because it's not supported */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { have_a = 0; have_bg = 1; } if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } bwn_reset_core(mac, have_bg ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); fail: siba_powerdown(sc->sc_dev); return (error); } static void bwn_reset_core(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; uint32_t low, ctl; flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET); siba_dev_up(sc->sc_dev, flags); DELAY(2000); low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) & ~BWN_TGSLOW_PHYRESET; siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, low & ~SIBA_TGSLOW_FGC); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (flags & BWN_TGSLOW_SUPPORT_G) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); } static int bwn_phy_getinfo(struct bwn_mac *mac, int tgshigh) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = (tgshigh & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 4) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ if (siba_get_chipid(sc->sc_dev) == 0x4317) { if (siba_get_chiprev(sc->sc_dev) == 0) tmp = 0x3205017f; else if (siba_get_chiprev(sc->sc_dev) == 1) tmp = 0x4205017f; else tmp = 0x5205017f; } else { BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; } phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((siba_get_revid(sc->sc_dev) >= 3) && (siba_get_revid(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT | IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT | IEEE80211_CHAN_A) static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; if (have_bg) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, IEEE80211_CHAN_G); if (mac->mac_phy.type == BWN_PHYTYPE_N) { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_n, IEEE80211_CHAN_HTA); } else { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, IEEE80211_CHAN_A); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } static uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } static uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } static void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } static void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow) { c->ic_freq = freq; c->ic_flags = flags; c->ic_ieee = ieee; c->ic_minpower = 0; c->ic_maxpower = 2 * txpow; c->ic_maxregpower = txpow; } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, int flags) { struct ieee80211_channel *c; int i; c = &chans[*nchans]; for (i = 0; i < ci->nchannels; i++) { const struct bwn_channel *hc; hc = &ci->channels[i]; if (*nchans >= maxchans) break; bwn_addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow); c++, (*nchans)++; if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) { /* g channel have a separate b-only entry */ if (*nchans >= maxchans) break; c[0] = c[-1]; c[-1].ic_flags = IEEE80211_CHAN_B; c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTG) { /* HT g channel have a separate g-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_G; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTA) { /* HT a channel have a separate a-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_A; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } } } static int bwn_phy_g_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int i; int16_t pab0, pab1, pab2; static int8_t bwn_phy_g_tssi2dbm_table[] = BWN_PHY_G_TSSI2DBM_TABLE; int8_t bg; bg = (int8_t)siba_sprom_get_tssi_bg(sc->sc_dev); pab0 = (int16_t)siba_sprom_get_pa0b0(sc->sc_dev); pab1 = (int16_t)siba_sprom_get_pa0b1(sc->sc_dev); pab2 = (int16_t)siba_sprom_get_pa0b2(sc->sc_dev); if ((siba_get_chipid(sc->sc_dev) == 0x4301) && (phy->rf_ver != 0x2050)) device_printf(sc->sc_dev, "not supported anymore\n"); pg->pg_flags = 0; if (pab0 == 0 || pab1 == 0 || pab2 == 0 || pab0 == -1 || pab1 == -1 || pab2 == -1) { pg->pg_idletssi = 52; pg->pg_tssi2dbm = bwn_phy_g_tssi2dbm_table; return (0); } pg->pg_idletssi = (bg == 0 || bg == -1) ? 62 : bg; pg->pg_tssi2dbm = (uint8_t *)malloc(64, M_DEVBUF, M_NOWAIT | M_ZERO); if (pg->pg_tssi2dbm == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer\n"); return (ENOMEM); } for (i = 0; i < 64; i++) { int32_t m1, m2, f, q, delta; int8_t j = 0; m1 = BWN_TSSI2DBM(16 * pab0 + i * pab1, 32); m2 = MAX(BWN_TSSI2DBM(32768 + i * pab2, 256), 1); f = 256; do { if (j > 15) { device_printf(sc->sc_dev, "failed to generate tssi2dBm\n"); free(pg->pg_tssi2dbm, M_DEVBUF); return (ENOMEM); } q = BWN_TSSI2DBM(f * 4096 - BWN_TSSI2DBM(m2 * f, 16) * f, 2048); delta = abs(q - f); f = q; j++; } while (delta >= 2); pg->pg_tssi2dbm[i] = MIN(MAX(BWN_TSSI2DBM(m1 * f, 8192), -127), 128); } pg->pg_flags |= BWN_PHY_G_FLAG_TSSITABLE_ALLOC; return (0); } static void bwn_phy_g_detach(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; if (pg->pg_flags & BWN_PHY_G_FLAG_TSSITABLE_ALLOC) { free(pg->pg_tssi2dbm, M_DEVBUF); pg->pg_tssi2dbm = NULL; } pg->pg_flags = 0; } static void bwn_phy_g_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; void *tssi2dbm; int idletssi; unsigned int i; tssi2dbm = pg->pg_tssi2dbm; idletssi = pg->pg_idletssi; memset(pg, 0, sizeof(*pg)); pg->pg_tssi2dbm = tssi2dbm; pg->pg_idletssi = idletssi; memset(pg->pg_minlowsig, 0xff, sizeof(pg->pg_minlowsig)); for (i = 0; i < N(pg->pg_nrssi); i++) pg->pg_nrssi[i] = -1000; for (i = 0; i < N(pg->pg_nrssi_lt); i++) pg->pg_nrssi_lt[i] = i; pg->pg_lofcal = 0xffff; pg->pg_initval = 0xffff; pg->pg_immode = BWN_IMMODE_NONE; pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_UNKNOWN; pg->pg_avgtssi = 0xff; pg->pg_loctl.tx_bias = 0xff; TAILQ_INIT(&pg->pg_loctl.calib_list); } static int bwn_phy_g_prepare_hw(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; static const struct bwn_rfatt rfatt0[] = { { 3, 0 }, { 1, 0 }, { 5, 0 }, { 7, 0 }, { 9, 0 }, { 2, 0 }, { 0, 0 }, { 4, 0 }, { 6, 0 }, { 8, 0 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, { 4, 1 } }; static const struct bwn_rfatt rfatt1[] = { { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 10, 1 }, { 12, 1 }, { 14, 1 } }; static const struct bwn_rfatt rfatt2[] = { { 0, 1 }, { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 9, 1 }, { 9, 1 } }; static const struct bwn_bbatt bbatt_0[] = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 } }; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); if (phy->rf_ver == 0x2050 && phy->rf_rev < 6) pg->pg_bbatt.att = 0; else pg->pg_bbatt.att = 2; /* prepare Radio Attenuation */ pg->pg_rfatt.padmix = 0; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G) { if (siba_get_pci_revid(sc->sc_dev) < 0x43) { pg->pg_rfatt.att = 2; goto done; } else if (siba_get_pci_revid(sc->sc_dev) < 0x51) { pg->pg_rfatt.att = 3; goto done; } } if (phy->type == BWN_PHYTYPE_A) { pg->pg_rfatt.att = 0x60; goto done; } switch (phy->rf_ver) { case 0x2050: switch (phy->rf_rev) { case 0: pg->pg_rfatt.att = 5; goto done; case 1: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 3; else pg->pg_rfatt.att = 1; } else { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 7; else pg->pg_rfatt.att = 6; } goto done; case 2: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 5; else if (siba_get_chipid(sc->sc_dev) == 0x4320) pg->pg_rfatt.att = 4; else pg->pg_rfatt.att = 3; } else pg->pg_rfatt.att = 6; goto done; case 3: pg->pg_rfatt.att = 5; goto done; case 4: case 5: pg->pg_rfatt.att = 1; goto done; case 6: case 7: pg->pg_rfatt.att = 5; goto done; case 8: pg->pg_rfatt.att = 0xa; pg->pg_rfatt.padmix = 1; goto done; case 9: default: pg->pg_rfatt.att = 5; goto done; } break; case 0x2053: switch (phy->rf_rev) { case 1: pg->pg_rfatt.att = 6; goto done; } break; } pg->pg_rfatt.att = 5; done: pg->pg_txctl = (bwn_phy_g_txctl(mac) << 4); if (!bwn_has_hwpctl(mac)) { lo->rfatt.array = rfatt0; lo->rfatt.len = N(rfatt0); lo->rfatt.min = 0; lo->rfatt.max = 9; goto genbbatt; } if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { lo->rfatt.array = rfatt1; lo->rfatt.len = N(rfatt1); lo->rfatt.min = 0; lo->rfatt.max = 14; goto genbbatt; } lo->rfatt.array = rfatt2; lo->rfatt.len = N(rfatt2); lo->rfatt.min = 0; lo->rfatt.max = 9; genbbatt: lo->bbatt.array = bbatt_0; lo->bbatt.len = N(bbatt_0); lo->bbatt.min = 0; lo->bbatt.max = 8; BWN_READ_4(mac, BWN_MACCTL); if (phy->rev == 1) { phy->gmode = 0; bwn_reset_core(mac, 0); bwn_phy_g_init_sub(mac); phy->gmode = 1; bwn_reset_core(mac, BWN_TGSLOW_SUPPORT_G); } return (0); } static uint16_t bwn_phy_g_txctl(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rf_ver != 0x2050) return (0); if (phy->rf_rev == 1) return (BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX); if (phy->rf_rev < 6) return (BWN_TXCTL_PA2DB); if (phy->rf_rev == 8) return (BWN_TXCTL_TXMIX); return (0); } static int bwn_phy_g_init(struct bwn_mac *mac) { bwn_phy_g_init_sub(mac); return (0); } static void bwn_phy_g_exit(struct bwn_mac *mac) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *cal, *tmp; if (lo == NULL) return; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } } static uint16_t bwn_phy_g_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_g_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static uint16_t bwn_phy_g_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg | 0x80); return (BWN_READ_2(mac, BWN_RFDATALO)); } static void bwn_phy_g_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static int bwn_phy_g_hwpctl(struct bwn_mac *mac) { return (mac->mac_phy.rev >= 6); } static void bwn_phy_g_rf_onoff(struct bwn_mac *mac, int on) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int channel; uint16_t rfover, rfoverval; if (on) { if (phy->rf_on) return; BWN_PHY_WRITE(mac, 0x15, 0x8000); BWN_PHY_WRITE(mac, 0x15, 0xcc00); BWN_PHY_WRITE(mac, 0x15, (phy->gmode ? 0xc0 : 0x0)); if (pg->pg_flags & BWN_PHY_G_FLAG_RADIOCTX_VALID) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, pg->pg_radioctx_over); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, pg->pg_radioctx_overval); pg->pg_flags &= ~BWN_PHY_G_FLAG_RADIOCTX_VALID; } channel = phy->chan; bwn_phy_g_switch_chan(mac, 6, 1); bwn_phy_g_switch_chan(mac, channel, 0); return; } rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); pg->pg_radioctx_over = rfover; pg->pg_radioctx_overval = rfoverval; pg->pg_flags |= BWN_PHY_G_FLAG_RADIOCTX_VALID; BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover | 0x008c); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval & 0xff73); } static int bwn_phy_g_switch_channel(struct bwn_mac *mac, uint32_t newchan) { if ((newchan < 1) || (newchan > 14)) return (EINVAL); bwn_phy_g_switch_chan(mac, newchan, 0); return (0); } static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *mac) { return (1); } static void bwn_phy_g_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; uint64_t hf; int autodiv = 0; uint16_t tmp; if (antenna == BWN_ANTAUTO0 || antenna == BWN_ANTAUTO1) autodiv = 1; hf = bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); BWN_PHY_WRITE(mac, BWN_PHY_BBANDCFG, (BWN_PHY_READ(mac, BWN_PHY_BBANDCFG) & ~BWN_PHY_BBANDCFG_RXANT) | ((autodiv ? BWN_ANTAUTO1 : antenna) << BWN_PHY_BBANDCFG_RXANT_SHIFT)); if (autodiv) { tmp = BWN_PHY_READ(mac, BWN_PHY_ANTDWELL); if (antenna == BWN_ANTAUTO1) tmp &= ~BWN_PHY_ANTDWELL_AUTODIV1; else tmp |= BWN_PHY_ANTDWELL_AUTODIV1; BWN_PHY_WRITE(mac, BWN_PHY_ANTDWELL, tmp); } tmp = BWN_PHY_READ(mac, BWN_PHY_ANTWRSETT); if (autodiv) tmp |= BWN_PHY_ANTWRSETT_ARXDIV; else tmp &= ~BWN_PHY_ANTWRSETT_ARXDIV; BWN_PHY_WRITE(mac, BWN_PHY_ANTWRSETT, tmp); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM61, BWN_PHY_READ(mac, BWN_PHY_OFDM61) | BWN_PHY_OFDM61_10); BWN_PHY_WRITE(mac, BWN_PHY_DIVSRCHGAINBACK, (BWN_PHY_READ(mac, BWN_PHY_DIVSRCHGAINBACK) & 0xff00) | 0x15); if (phy->rev == 2) BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, 8); else BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, (BWN_PHY_READ(mac, BWN_PHY_ADIVRELATED) & 0xff00) | 8); } if (phy->rev >= 6) BWN_PHY_WRITE(mac, BWN_PHY_OFDM9B, 0xdc); hf |= BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); } static int bwn_phy_g_im(struct bwn_mac *mac, int mode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); KASSERT(mode == BWN_IMMODE_NONE, ("%s: fail", __func__)); if (phy->rev == 0 || !phy->gmode) return (ENODEV); pg->pg_aci_wlan_automatic = 0; return (0); } static int bwn_phy_g_recalc_txpwr(struct bwn_mac *mac, int ignore_tssi) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; unsigned int tssi; int cck, ofdm; int power; int rfatt, bbatt; unsigned int max; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); cck = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_CCK); ofdm = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_OFDM_G); if (cck < 0 && ofdm < 0) { if (ignore_tssi == 0) return (BWN_TXPWR_RES_DONE); cck = 0; ofdm = 0; } tssi = (cck < 0) ? ofdm : ((ofdm < 0) ? cck : (cck + ofdm) / 2); if (pg->pg_avgtssi != 0xff) tssi = (tssi + pg->pg_avgtssi) / 2; pg->pg_avgtssi = tssi; KASSERT(tssi < BWN_TSSI_MAX, ("%s:%d: fail", __func__, __LINE__)); max = siba_sprom_get_maxpwr_bg(sc->sc_dev); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) max -= 3; if (max >= 120) { device_printf(sc->sc_dev, "invalid max TX-power value\n"); max = 80; siba_sprom_set_maxpwr_bg(sc->sc_dev, max); } power = MIN(MAX((phy->txpower < 0) ? 0 : (phy->txpower << 2), 0), max) - (pg->pg_tssi2dbm[MIN(MAX(pg->pg_idletssi - pg->pg_curtssi + tssi, 0x00), 0x3f)]); if (power == 0) return (BWN_TXPWR_RES_DONE); rfatt = -((power + 7) / 8); bbatt = (-(power / 2)) - (4 * rfatt); if ((rfatt == 0) && (bbatt == 0)) return (BWN_TXPWR_RES_DONE); pg->pg_bbatt_delta = bbatt; pg->pg_rfatt_delta = rfatt; return (BWN_TXPWR_RES_NEED_ADJUST); } static void bwn_phy_g_set_txpwr(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int rfatt, bbatt; uint8_t txctl; bwn_mac_suspend(mac); BWN_ASSERT_LOCKED(sc); bbatt = pg->pg_bbatt.att; bbatt += pg->pg_bbatt_delta; rfatt = pg->pg_rfatt.att; rfatt += pg->pg_rfatt_delta; bwn_phy_g_setatt(mac, &bbatt, &rfatt); txctl = pg->pg_txctl; if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 2)) { if (rfatt <= 1) { if (txctl == 0) { txctl = BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX; rfatt += 2; bbatt += 2; } else if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { bbatt += 4 * (rfatt - 2); rfatt = 2; } } else if (rfatt > 4 && txctl) { txctl = 0; if (bbatt < 3) { rfatt -= 3; bbatt += 2; } else { rfatt -= 2; bbatt -= 2; } } } pg->pg_txctl = txctl; bwn_phy_g_setatt(mac, &bbatt, &rfatt); pg->pg_rfatt.att = rfatt; pg->pg_bbatt.att = bbatt; DPRINTF(sc, BWN_DEBUG_TXPOW, "%s: adjust TX power\n", __func__); bwn_phy_lock(mac); bwn_rf_lock(mac); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); bwn_rf_unlock(mac); bwn_phy_unlock(mac); bwn_mac_enable(mac); } static void bwn_phy_g_task_15s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; unsigned long expire, now; struct bwn_lo_calib *cal, *tmp; uint8_t expired = 0; bwn_mac_suspend(mac); if (lo == NULL) goto fail; BWN_GETTIME(now); if (bwn_has_hwpctl(mac)) { expire = now - BWN_LO_PWRVEC_EXPIRE; if (ieee80211_time_before(lo->pwr_vec_read_time, expire)) { bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 0); } goto fail; } expire = now - BWN_LO_CALIB_EXPIRE; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { if (!ieee80211_time_before(cal->calib_time, expire)) continue; if (BWN_BBATTCMP(&cal->bbatt, &pg->pg_bbatt) && BWN_RFATTCMP(&cal->rfatt, &pg->pg_rfatt)) { KASSERT(!expired, ("%s:%d: fail", __func__, __LINE__)); expired = 1; } DPRINTF(sc, BWN_DEBUG_LO, "expired BB %u RF %u %u I %d Q %d\n", cal->bbatt.att, cal->rfatt.att, cal->rfatt.padmix, cal->ctl.i, cal->ctl.q); TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } if (expired || TAILQ_EMPTY(&lo->calib_list)) { cal = bwn_lo_calibset(mac, &pg->pg_bbatt, &pg->pg_rfatt); if (cal == NULL) { device_printf(sc->sc_dev, "failed to recalibrate LO\n"); goto fail; } TAILQ_INSERT_TAIL(&lo->calib_list, cal, list); bwn_lo_write(mac, &cal->ctl); } fail: bwn_mac_enable(mac); } static void bwn_phy_g_task_60s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint8_t old = phy->chan; if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) return; bwn_mac_suspend(mac); bwn_nrssi_slope_11g(mac); if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) { bwn_switch_channel(mac, (old >= 8) ? 1 : 13); bwn_switch_channel(mac, old); } bwn_mac_enable(mac); } static void bwn_phy_switch_analog(struct bwn_mac *mac, int on) { BWN_WRITE_2(mac, BWN_PHY0, on ? 0 : 0xf4); } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct wmeParams *wmep; int i; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); siba_powerup(sc->sc_dev, 0); if (!siba_dev_isup(sc->sc_dev)) bwn_reset_core(mac, mac->mac_phy.gmode ? BWN_TGSLOW_SUPPORT_G : 0); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); siba_pcicore_intr(sc->sc_dev); siba_fix_imcfglobug(sc->sc_dev); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, siba_get_revid(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) && (siba_get_pcicore_revid(sc->sc_dev) <= 10)) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); siba_powerup(sc->sc_dev, !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; return (error); fail0: siba_powerdown(sc->sc_dev); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (siba_get_revid(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); siba_powerdown(sc->sc_dev); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (siba_get_revid(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (siba_get_revid(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); siba_write_4(sc->sc_dev, SIBA_TGSLOW, siba_read_4(sc->sc_dev, SIBA_TGSLOW) | 0x00100000); BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev)); return (error); } /* read hostflags */ static uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } static void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (siba_get_revid(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (siba_get_revid(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = siba_get_revid(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; int type; uint16_t base; type = bwn_dma_mask2type(bwn_dma_mask(mac)); base = bwn_dma_base(type, idx); if (type == BWN_DMA_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static uint64_t bwn_dma_mask(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_BIT_MASK(64)); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_BIT_MASK(32)); return (BWN_DMA_BIT_MASK(30)); } static int bwn_dma_mask2type(uint64_t dmamask) { if (dmamask == BWN_DMA_BIT_MASK(30)) return (BWN_DMA_30BIT); if (dmamask == BWN_DMA_BIT_MASK(32)) return (BWN_DMA_32BIT); if (dmamask == BWN_DMA_BIT_MASK(64)) return (BWN_DMA_64BIT); KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (BWN_DMA_30BIT); } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BWN_DMA_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx, int type) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = type; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(type, controller_index); dr->dr_index = controller_index; if (type == BWN_DMA_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET; } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = malloc((dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(dr->dr_txhdr_cache != NULL, ("%s:%d: fail", __func__, __LINE__)); /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail1; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: free(dr->dr_txhdr_cache, M_DEVBUF); fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); free((*dr)->dr_txhdr_cache, M_DEVBUF); free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; uint32_t addr, addrext, ctl; int slot; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK); addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30; addr |= siba_dma_translation(sc->sc_dev); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; int slot; uint32_t ctl0 = 0, ctl1 = 0; uint32_t addrlo, addrhi; uint32_t addrext; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addrlo = (uint32_t) (dmaaddr & 0xffffffff); addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK); addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; addrhi |= (siba_dma_translation(sc->sc_dev) << 1); if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_softc *sc = dr->dr_mac->mac_sc; uint64_t ring64; uint32_t addrext, ring32, value; uint32_t trans = siba_dma_translation(sc->sc_dev); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA64_TXENABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA32_TXENABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (siba_get_revid(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; bwn_phy_exit(mac); siba_gpio_set(sc->sc_dev, 0); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t mask = 0x1f, set = 0xf, value; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f); if (siba_get_chipid(sc->sc_dev) == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (siba_get_revid(sc->sc_dev) >= 2) mask |= 0x0010; value = siba_gpio_get(sc->sc_dev); if (value == -1) return (0); siba_gpio_set(sc->sc_dev, (value & mask) | set); return (0); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (siba_get_revid(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chiprev(sc->sc_dev) == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static int bwn_dma_gettype(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_64BIT); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_32BIT); return (BWN_DMA_30BIT); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static void bwn_phy_g_init_sub(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t i, tmp; if (phy->rev == 1) bwn_phy_init_b5(mac); else bwn_phy_init_b6(mac); if (phy->rev >= 2 || phy->gmode) bwn_phy_init_a(mac); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, 0); } if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->rev > 5) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x400); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->gmode || phy->rev >= 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); tmp &= BWN_PHYVER_VERSION; if (tmp == 3 || tmp == 5) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc2), 0x1816); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc3), 0x8006); } if (tmp == 5) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xcc), 0x00ff, 0x1f00); } } if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x7e), 0x78); if (phy->rf_rev == 8) { BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x80); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x3e), 0x4); } if (BWN_HAS_LOOPBACK(phy)) bwn_loopback_calcgain(mac); if (phy->rf_rev != 8) { if (pg->pg_initval == 0xffff) pg->pg_initval = bwn_rf_init_bcm2050(mac); else BWN_RF_WRITE(mac, 0x0078, pg->pg_initval); } bwn_lo_g_init(mac); if (BWN_HAS_TXMAG(phy)) { BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | pg->pg_loctl.tx_bias | pg->pg_loctl.tx_magn); } else { BWN_RF_SETMASK(mac, 0x52, 0xfff0, pg->pg_loctl.tx_bias); } if (phy->rev >= 6) { BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x36), 0x0fff, (pg->pg_loctl.tx_bias << 12)); } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8075); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x807f); if (phy->rev < 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x101); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x202); if (phy->gmode || phy->rev >= 2) { bwn_lo_g_adjust(mac); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { for (i = 0; i < 64; i++) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, i); BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_DATA, (uint16_t)MIN(MAX(bwn_nrssi_read(mac, i) - 0xffff, -32), 31)); } bwn_nrssi_threshold(mac); } else if (phy->gmode || phy->rev >= 2) { if (pg->pg_nrssi[0] == -1000) { KASSERT(pg->pg_nrssi[1] == -1000, ("%s:%d: fail", __func__, __LINE__)); bwn_nrssi_slope_11g(mac); } else bwn_nrssi_threshold(mac); } if (phy->rf_rev == 8) BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x05), 0x3230); bwn_phy_hwpctl_init(mac); if ((siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chippkg(sc->sc_dev) == 2) || 0) { BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0xbfff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xc3), 0x7fff); } } static uint8_t bwn_has_hwpctl(struct bwn_mac *mac) { if (mac->mac_phy.hwpctl == 0 || mac->mac_phy.use_hwpctl == NULL) return (0); return (mac->mac_phy.use_hwpctl(mac)); } static void bwn_phy_init_b5(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, value; uint8_t old_channel; if (phy->analog == 1) BWN_RF_SET(mac, 0x007a, 0x0050); if ((siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306)) { value = 0x2120; for (offset = 0x00a8; offset < 0x00c7; offset++) { BWN_PHY_WRITE(mac, offset, value); value += 0x202; } } BWN_PHY_SETMASK(mac, 0x0035, 0xf0ff, 0x0700); if (phy->rf_ver == 0x2050) BWN_PHY_WRITE(mac, 0x0038, 0x0667); if (phy->gmode || phy->rev >= 2) { if (phy->rf_ver == 0x2050) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); } BWN_WRITE_2(mac, BWN_PHY_RADIO, 0x0000); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x001c, 0x186a); BWN_PHY_SETMASK(mac, 0x0013, 0x00ff, 0x1900); BWN_PHY_SETMASK(mac, 0x0035, 0xffc0, 0x0064); BWN_PHY_SETMASK(mac, 0x005d, 0xff80, 0x000a); } if (mac->mac_flags & BWN_MAC_FLAG_BADFRAME_PREEMP) BWN_PHY_SET(mac, BWN_PHY_RADIO_BITFIELD, (1 << 11)); if (phy->analog == 1) { BWN_PHY_WRITE(mac, 0x0026, 0xce00); BWN_PHY_WRITE(mac, 0x0021, 0x3763); BWN_PHY_WRITE(mac, 0x0022, 0x1bc3); BWN_PHY_WRITE(mac, 0x0023, 0x06f9); BWN_PHY_WRITE(mac, 0x0024, 0x037e); } else BWN_PHY_WRITE(mac, 0x0026, 0xcc00); BWN_PHY_WRITE(mac, 0x0030, 0x00c6); BWN_WRITE_2(mac, 0x03ec, 0x3f22); if (phy->analog == 1) BWN_PHY_WRITE(mac, 0x0020, 0x3e1c); else BWN_PHY_WRITE(mac, 0x0020, 0x301c); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e4, 0x3000); old_channel = phy->chan; bwn_phy_g_switch_chan(mac, 7, 0); if (phy->rf_ver != 0x2050) { BWN_RF_WRITE(mac, 0x0075, 0x0080); BWN_RF_WRITE(mac, 0x0079, 0x0081); } BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); if (phy->rf_ver == 0x2050) { BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x005a, 0x0070); } BWN_RF_WRITE(mac, 0x005b, 0x007b); BWN_RF_WRITE(mac, 0x005c, 0x00b0); BWN_RF_SET(mac, 0x007a, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0080); BWN_PHY_WRITE(mac, 0x0032, 0x00ca); BWN_PHY_WRITE(mac, 0x002a, 0x88a3); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_ver == 0x2050) BWN_RF_WRITE(mac, 0x005d, 0x000d); BWN_WRITE_2(mac, 0x03e4, (BWN_READ_2(mac, 0x03e4) & 0xffc0) | 0x0004); } static void bwn_loopback_calcgain(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t backup_phy[16] = { 0 }; uint16_t backup_radio[3]; uint16_t backup_bband; uint16_t i, j, loop_i_max; uint16_t trsw_rx; uint16_t loop1_outer_done, loop1_inner_done; backup_phy[0] = BWN_PHY_READ(mac, BWN_PHY_CRS0); backup_phy[1] = BWN_PHY_READ(mac, BWN_PHY_CCKBBANDCFG); backup_phy[2] = BWN_PHY_READ(mac, BWN_PHY_RFOVER); backup_phy[3] = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); if (phy->rev != 1) { backup_phy[4] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); backup_phy[5] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); } backup_phy[6] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); backup_phy[7] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); backup_phy[8] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); backup_phy[9] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x0a)); backup_phy[10] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x03)); backup_phy[11] = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); backup_phy[12] = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); backup_phy[13] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2b)); backup_phy[14] = BWN_PHY_READ(mac, BWN_PHY_PGACTL); backup_phy[15] = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); backup_bband = pg->pg_bbatt.att; backup_radio[0] = BWN_RF_READ(mac, 0x52); backup_radio[1] = BWN_RF_READ(mac, 0x43); backup_radio[2] = BWN_RF_READ(mac, 0x7a); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x3fff); BWN_PHY_SET(mac, BWN_PHY_CCKBBANDCFG, 0x8000); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffd); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffe); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffd); } BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0030); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xffcf, 0x10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0780); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); BWN_PHY_SET(mac, BWN_PHY_CCK(0x0a), 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0004); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffb); } BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xff9f, 0x40); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x000f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x9); } bwn_phy_g_set_bbatt(mac, 11); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xffc0, 0x01); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xc0ff, 0x800); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0100); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xcfff); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) { if (phy->rev >= 7) { BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0800); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x8000); } } BWN_RF_MASK(mac, 0x7a, 0x00f7); j = 0; loop_i_max = (phy->rf_rev == 8) ? 15 : 9; for (i = 0; i < loop_i_max; i++) { for (j = 0; j < 16; j++) { BWN_RF_WRITE(mac, 0x43, i); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, (j << 8)); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done0; } } done0: loop1_outer_done = i; loop1_inner_done = j; if (j >= 8) { BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x30); trsw_rx = 0x1b; for (j = j - 8; j < 16; j++) { BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, j << 8); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); trsw_rx -= 3; if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done1; } } else trsw_rx = 0x18; done1: if (phy->rev != 1) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, backup_phy[4]); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, backup_phy[5]); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), backup_phy[6]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), backup_phy[7]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), backup_phy[8]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x0a), backup_phy[9]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x03), backup_phy[10]); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, backup_phy[11]); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, backup_phy[12]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), backup_phy[13]); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, backup_phy[14]); bwn_phy_g_set_bbatt(mac, backup_bband); BWN_RF_WRITE(mac, 0x52, backup_radio[0]); BWN_RF_WRITE(mac, 0x43, backup_radio[1]); BWN_RF_WRITE(mac, 0x7a, backup_radio[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2] | 0x0003); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, backup_phy[3]); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, backup_phy[0]); BWN_PHY_WRITE(mac, BWN_PHY_CCKBBANDCFG, backup_phy[1]); pg->pg_max_lb_gain = ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; pg->pg_trsw_rx_gain = trsw_rx * 2; } static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint32_t tmp1 = 0, tmp2 = 0; uint16_t rcc, i, j, pgactl, cck0, cck1, cck2, cck3, rfover, rfoverval, analogover, analogoverval, crs0, classctl, lomask, loctl, syncctl, radio0, radio1, radio2, reg0, reg1, reg2, radio78, reg, index; static const uint8_t rcc_table[] = { 0x02, 0x03, 0x01, 0x0f, 0x06, 0x07, 0x05, 0x0f, 0x0a, 0x0b, 0x09, 0x0f, 0x0e, 0x0f, 0x0d, 0x0f, }; loctl = lomask = reg0 = classctl = crs0 = analogoverval = analogover = rfoverval = rfover = cck3 = 0; radio0 = BWN_RF_READ(mac, 0x43); radio1 = BWN_RF_READ(mac, 0x51); radio2 = BWN_RF_READ(mac, 0x52); pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); if (phy->type == BWN_PHYTYPE_B) { cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); reg0 = BWN_READ_2(mac, 0x3ec); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0xff); BWN_WRITE_2(mac, 0x3ec, 0x3f3f); } else if (phy->gmode || phy->rev >= 2) { rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); if (BWN_HAS_LOOPBACK(phy)) { lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); loctl = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); } BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVER, 0)); } BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); BWN_PHY_MASK(mac, BWN_PHY_SYNCCTL, 0xff7f); reg1 = BWN_READ_2(mac, 0x3e6); reg2 = BWN_READ_2(mac, 0x3f4); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e6, 0x0122); else { if (phy->analog >= 2) BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xffbf, 0x40); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, (BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000)); } reg = BWN_RF_READ(mac, 0x60); index = (reg & 0x001e) >> 1; rcc = (((rcc_table[index] << 1) | (reg & 0x0001)) | 0x0020); if (phy->type == BWN_PHYTYPE_B) BWN_RF_WRITE(mac, 0x78, 0x26); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfaf); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1403); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfa0); BWN_RF_SET(mac, 0x51, 0x0004); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x1f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x0009); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); for (i = 0; i < 16; i++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0480); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(20); tmp1 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); tmp1++; tmp1 >>= 9; for (i = 0; i < 16; i++) { radio78 = (BWN_BITREV4(i) << 1) | 0x0020; BWN_RF_WRITE(mac, 0x78, radio78); DELAY(10); for (j = 0; j < 16; j++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0d80); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(10); tmp2 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } tmp2++; tmp2 >>= 8; if (tmp1 < tmp2) break; } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pgactl); BWN_RF_WRITE(mac, 0x51, radio1); BWN_RF_WRITE(mac, 0x52, radio2); BWN_RF_WRITE(mac, 0x43, radio0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), cck1); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), cck2); BWN_WRITE_2(mac, 0x3e6, reg1); if (phy->analog != 0) BWN_WRITE_2(mac, 0x3f4, reg2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, syncctl); bwn_spu_workaround(mac, phy->chan); if (phy->type == BWN_PHYTYPE_B) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), cck3); BWN_WRITE_2(mac, 0x3ec, reg0); } else if (phy->gmode) { BWN_WRITE_2(mac, BWN_PHY_RADIO, BWN_READ_2(mac, BWN_PHY_RADIO) & 0x7fff); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, crs0); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, classctl); if (BWN_HAS_LOOPBACK(phy)) { BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, lomask); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, loctl); } } return ((i > 15) ? radio78 : rcc); } static void bwn_phy_init_b6(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, val; uint8_t old_channel; KASSERT(!(phy->rf_rev == 6 || phy->rf_rev == 7), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, 0x003e, 0x817a); BWN_RF_WRITE(mac, 0x007a, BWN_RF_READ(mac, 0x007a) | 0x0058); if (phy->rf_rev == 4 || phy->rf_rev == 5) { BWN_RF_WRITE(mac, 0x51, 0x37); BWN_RF_WRITE(mac, 0x52, 0x70); BWN_RF_WRITE(mac, 0x53, 0xb3); BWN_RF_WRITE(mac, 0x54, 0x9b); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x88); BWN_RF_WRITE(mac, 0x5d, 0x88); BWN_RF_WRITE(mac, 0x5e, 0x88); BWN_RF_WRITE(mac, 0x7d, 0x88); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_TSSI_RESET_PSM_WORKAROUN); } if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x51, 0); BWN_RF_WRITE(mac, 0x52, 0x40); BWN_RF_WRITE(mac, 0x53, 0xb7); BWN_RF_WRITE(mac, 0x54, 0x98); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x6b); BWN_RF_WRITE(mac, 0x5c, 0x0f); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_ALTIQ) { BWN_RF_WRITE(mac, 0x5d, 0xfa); BWN_RF_WRITE(mac, 0x5e, 0xd8); } else { BWN_RF_WRITE(mac, 0x5d, 0xf5); BWN_RF_WRITE(mac, 0x5e, 0xb8); } BWN_RF_WRITE(mac, 0x0073, 0x0003); BWN_RF_WRITE(mac, 0x007d, 0x00a8); BWN_RF_WRITE(mac, 0x007c, 0x0001); BWN_RF_WRITE(mac, 0x007e, 0x0008); } for (val = 0x1e1f, offset = 0x0088; offset < 0x0098; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x3e3f, offset = 0x0098; offset < 0x00a8; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x2120, offset = 0x00a8; offset < 0x00c8; offset++) { BWN_PHY_WRITE(mac, offset, (val & 0x3f3f)); val += 0x0202; } if (phy->type == BWN_PHYTYPE_G) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x5b, 0); BWN_PHY_WRITE(mac, 0x5c, 0); } old_channel = phy->chan; bwn_phy_g_switch_chan(mac, (old_channel >= 8) ? 1 : 13, 0); BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); DELAY(40); if (phy->rf_rev < 6 || phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x7c, BWN_RF_READ(mac, 0x7c) | 0x0002); BWN_RF_WRITE(mac, 0x50, 0x20); } if (phy->rf_rev <= 2) { BWN_RF_WRITE(mac, 0x7c, 0x20); BWN_RF_WRITE(mac, 0x5a, 0x70); BWN_RF_WRITE(mac, 0x5b, 0x7b); BWN_RF_WRITE(mac, 0x5c, 0xb0); } BWN_RF_SETMASK(mac, 0x007a, 0x00f8, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0200); if (phy->rf_rev >= 6) BWN_PHY_WRITE(mac, 0x2a, 0x88c2); else BWN_PHY_WRITE(mac, 0x2a, 0x8ac0); BWN_PHY_WRITE(mac, 0x0038, 0x0668); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_rev <= 5) BWN_PHY_SETMASK(mac, 0x5d, 0xff80, 0x0003); if (phy->rf_rev <= 2) BWN_RF_WRITE(mac, 0x005d, 0x000d); if (phy->analog == 4) { BWN_WRITE_2(mac, 0x3e4, 9); BWN_PHY_MASK(mac, 0x61, 0x0fff); } else BWN_PHY_SETMASK(mac, 0x0002, 0xffc0, 0x0004); if (phy->type == BWN_PHYTYPE_B) KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); else if (phy->type == BWN_PHYTYPE_G) BWN_WRITE_2(mac, 0x03e6, 0x0); } static void bwn_phy_init_a(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_A || phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rev >= 6) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x1000); if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) BWN_PHY_SET(mac, BWN_PHY_ENCORE, 0x0010); else BWN_PHY_MASK(mac, BWN_PHY_ENCORE, ~0x1010); } bwn_wa_init(mac); if (phy->type == BWN_PHYTYPE_G && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)) BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x6e), 0xe000, 0x3cf); } static void bwn_wa_write_noisescale(struct bwn_mac *mac, const uint16_t *nst) { int i; for (i = 0; i < BWN_TAB_NOISESCALE_SIZE; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_NOISESCALE, i, nst[i]); } static void bwn_wa_agc(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rev == 1) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 3, 25); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 0, 0x2710); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 1, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 2, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 3, 0x0f8d); BWN_PHY_WRITE(mac, BWN_PHY_LMS, 4); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 3, 25); } BWN_PHY_SETMASK(mac, BWN_PHY_CCKSHIFTBITS_WA, (uint16_t)~0xff00, 0x5700); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x007f, 0x000f); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x3f80, 0x2b80); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, 0xf0ff, 0x0300); BWN_RF_SET(mac, 0x7a, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x000f, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_P1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x0f00, 0x0700); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x0f00, 0x0100); if (phy->rev == 1) BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x000f, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x00ff, 0x0020); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x82), ~0x00ff, 0x002e); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), (uint16_t)~0xff00, 0x1a00); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), ~0x00ff, 0x0028); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), (uint16_t)~0xff00, 0x2c00); if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_PEAK_COUNT, 0x092b); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e, 0x0002); } else { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x1f), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, ~0x000f, 0x0004); if (phy->rev >= 6) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x22), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, (uint16_t)~0xf000, 0x3000); } } BWN_PHY_SETMASK(mac, BWN_PHY_DIVSRCHIDX, 0x8080, 0x7874); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8e), 0x1c00); if (phy->rev == 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DIVP1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8b), 0x005e); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, ~0x00ff, 0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8d), 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 3, 28); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 3, 28); } if (phy->rev >= 6) { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x0003); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x1000); } BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); } static void bwn_wa_grev1(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_finefreqg[] = BWN_TAB_FINEFREQ_G; static const uint32_t bwn_tab_retard[] = BWN_TAB_RETARD; static const uint32_t bwn_tab_rotor[] = BWN_TAB_ROTOR; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); } else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } BWN_PHY_SETMASK(mac, BWN_PHY_CRS0, ~0x03c0, 0xd000); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x2c), 0x005a); BWN_PHY_WRITE(mac, BWN_PHY_CCKSHIFTBITS, 0x0026); /* XXX support PHY-A??? */ for (i = 0; i < N(bwn_tab_finefreqg); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DACRFPABB, i, bwn_tab_finefreqg[i]); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); for (i = 0; i < N(bwn_tab_rotor); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ROTOR, i, bwn_tab_rotor[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_retard); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ADVRETARD, i, bwn_tab_retard[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); } static void bwn_wa_grev26789(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_sigmasqr2[] = BWN_TAB_SIGMASQR2; uint16_t ofdmrev; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); bwn_gtab_write(mac, BWN_GTAB_ORIGTR, 0, 0xc480); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } for (i = 0; i < 64; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_RSSI, i, i); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_sigmasqr2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_MINSIGSQ, i, bwn_tab_sigmasqr2[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); ofdmrev = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM) & BWN_PHYVER_VERSION; if (ofdmrev > 2) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1808); else BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1000); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 3, 0x1044); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 4, 0x7201); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 6, 0x0040); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 2, 15); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 3, 20); } static void bwn_wa_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); switch (phy->rev) { case 1: bwn_wa_grev1(mac); break; case 2: case 6: case 7: case 8: case 9: bwn_wa_grev26789(mac); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM || siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306 || siba_get_pci_revid(sc->sc_dev) != 0x17) { if (phy->rev < 2) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 2, 0x0001); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 2, 0x0001); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && (phy->rev >= 7)) { BWN_PHY_MASK(mac, BWN_PHY_EXTG(0x11), 0xf7ff); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0020, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0021, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0022, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0023, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0000, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0003, 0x0002); } } } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, 0x3120); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, 0xc480); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 1, 0); } static void bwn_ofdmtab_write_2(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); } static void bwn_ofdmtab_write_4(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint32_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); BWN_PHY_WRITE(mac, BWN_PHY_OTABLEQ, (value >> 16)); } static void bwn_gtab_write(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, table + offset); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, value); } static void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = ((phy->type == BWN_PHYTYPE_A) ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0502, 0x0050); else BWN_WRITE_2(mac, 0x0502, 0x0030); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } static void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } static void bwn_lo_write(struct bwn_mac *mac, struct bwn_loctl *ctl) { uint16_t value; KASSERT(mac->mac_phy.type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); value = (uint8_t) (ctl->q); value |= ((uint8_t) (ctl->i)) << 8; BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, value); } static uint16_t bwn_lo_calcfeed(struct bwn_mac *mac, uint16_t lna, uint16_t pga, uint16_t trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint16_t rfover; uint16_t feedthrough; if (phy->gmode) { lna <<= BWN_PHY_RFOVERVAL_LNA_SHIFT; pga <<= BWN_PHY_RFOVERVAL_PGA_SHIFT; KASSERT((lna & ~BWN_PHY_RFOVERVAL_LNA) == 0, ("%s:%d: fail", __func__, __LINE__)); KASSERT((pga & ~BWN_PHY_RFOVERVAL_PGA) == 0, ("%s:%d: fail", __func__, __LINE__)); trsw_rx &= (BWN_PHY_RFOVERVAL_TRSWRX | BWN_PHY_RFOVERVAL_BW); rfover = BWN_PHY_RFOVERVAL_UNK | pga | lna | trsw_rx; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && phy->rev > 6) rfover |= BWN_PHY_RFOVERVAL_EXTLNA; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LBW; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LPF; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xf300); } else { pga |= BWN_PHY_PGACTL_UNKNOWN; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LOWBANDW; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LPF; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); } DELAY(21); feedthrough = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); return (feedthrough); } static uint16_t bwn_lo_txctl_regtable(struct bwn_mac *mac, uint16_t *value, uint16_t *pad_mix_gain) { struct bwn_phy *phy = &mac->mac_phy; uint16_t reg, v, padmix; if (phy->type == BWN_PHYTYPE_B) { v = 0x30; if (phy->rf_rev <= 5) { reg = 0x43; padmix = 0; } else { reg = 0x52; padmix = 5; } } else { if (phy->rev >= 2 && phy->rf_rev == 8) { reg = 0x43; v = 0x10; padmix = 2; } else { reg = 0x52; v = 0x30; padmix = 5; } } if (value) *value = v; if (pad_mix_gain) *pad_mix_gain = padmix; return (reg); } static void bwn_lo_measure_txctl_values(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t reg, mask; uint16_t trsw_rx, pga; uint16_t rf_pctl_reg; static const uint8_t tx_bias_values[] = { 0x09, 0x08, 0x0a, 0x01, 0x00, 0x02, 0x05, 0x04, 0x06, }; static const uint8_t tx_magn_values[] = { 0x70, 0x40, }; if (!BWN_HAS_LOOPBACK(phy)) { rf_pctl_reg = 6; trsw_rx = 2; pga = 0; } else { int lb_gain; trsw_rx = 0; lb_gain = pg->pg_max_lb_gain / 2; if (lb_gain > 10) { rf_pctl_reg = 0; pga = abs(10 - lb_gain) / 6; pga = MIN(MAX(pga, 0), 15); } else { int cmp_val; int tmp; pga = 0; cmp_val = 0x24; if ((phy->rev >= 2) && (phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) cmp_val = 0x3c; tmp = lb_gain; if ((10 - lb_gain) < cmp_val) tmp = (10 - lb_gain); if (tmp < 0) tmp += 6; else tmp += 3; cmp_val /= 4; tmp /= 4; if (tmp >= cmp_val) rf_pctl_reg = cmp_val; else rf_pctl_reg = tmp; } } BWN_RF_SETMASK(mac, 0x43, 0xfff0, rf_pctl_reg); bwn_phy_g_set_bbatt(mac, 2); reg = bwn_lo_txctl_regtable(mac, &mask, NULL); mask = ~mask; BWN_RF_MASK(mac, reg, mask); if (BWN_HAS_TXMAG(phy)) { int i, j; int feedthrough; int min_feedth = 0xffff; uint8_t tx_magn, tx_bias; for (i = 0; i < N(tx_magn_values); i++) { tx_magn = tx_magn_values[i]; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tx_magn); for (j = 0; j < N(tx_bias_values); j++) { tx_bias = tx_bias_values[j]; BWN_RF_SETMASK(mac, 0x52, 0xfff0, tx_bias); feedthrough = bwn_lo_calcfeed(mac, 0, pga, trsw_rx); if (feedthrough < min_feedth) { lo->tx_bias = tx_bias; lo->tx_magn = tx_magn; min_feedth = feedthrough; } if (lo->tx_bias == 0) break; } BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | lo->tx_bias | lo-> tx_magn); } } else { lo->tx_magn = 0; lo->tx_bias = 0; BWN_RF_MASK(mac, 0x52, 0xfff0); } BWN_GETTIME(lo->txctl_measured_time); } static void bwn_lo_get_powervector(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint64_t tmp; uint64_t power_vector = 0; for (i = 0; i < 8; i += 2) { tmp = bwn_shm_read_2(mac, BWN_SHARED, 0x310 + i); power_vector |= (tmp << (i * 8)); bwn_shm_write_2(mac, BWN_SHARED, 0x310 + i, 0); } if (power_vector) lo->power_vector = power_vector; BWN_GETTIME(lo->pwr_vec_read_time); } static void bwn_lo_measure_gain_values(struct bwn_mac *mac, int16_t max_rx_gain, int use_trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (max_rx_gain < 0) max_rx_gain = 0; if (BWN_HAS_LOOPBACK(phy)) { int trsw_rx = 0; int trsw_rx_gain; if (use_trsw_rx) { trsw_rx_gain = pg->pg_trsw_rx_gain / 2; if (max_rx_gain >= trsw_rx_gain) { trsw_rx_gain = max_rx_gain - trsw_rx_gain; trsw_rx = 0x20; } } else trsw_rx_gain = max_rx_gain; if (trsw_rx_gain < 9) { pg->pg_lna_lod_gain = 0; } else { pg->pg_lna_lod_gain = 1; trsw_rx_gain -= 8; } trsw_rx_gain = MIN(MAX(trsw_rx_gain, 0), 0x2d); pg->pg_pga_gain = trsw_rx_gain / 3; if (pg->pg_pga_gain >= 5) { pg->pg_pga_gain -= 5; pg->pg_lna_gain = 2; } else pg->pg_lna_gain = 0; } else { pg->pg_lna_gain = 0; pg->pg_trsw_rx_gain = 0x20; if (max_rx_gain >= 0x14) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 2; } else if (max_rx_gain >= 0x12) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 1; } else if (max_rx_gain >= 0xf) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 0; } else { pg->pg_lna_lod_gain = 0; pg->pg_pga_gain = 0; } } tmp = BWN_RF_READ(mac, 0x7a); if (pg->pg_lna_lod_gain == 0) tmp &= ~0x0008; else tmp |= 0x0008; BWN_RF_WRITE(mac, 0x7a, tmp); } static void bwn_lo_save(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; struct timespec ts; uint16_t tmp; if (bwn_has_hwpctl(mac)) { sav->phy_lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); sav->phy_extg = BWN_PHY_READ(mac, BWN_PHY_EXTG(0x01)); sav->phy_dacctl_hwpctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); sav->phy_cck4 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x14)); sav->phy_hpwr_tssictl = BWN_PHY_READ(mac, BWN_PHY_HPWR_TSSICTL); BWN_PHY_SET(mac, BWN_PHY_HPWR_TSSICTL, 0x100); BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x40); BWN_PHY_SET(mac, BWN_PHY_DACCTL, 0x40); BWN_PHY_SET(mac, BWN_PHY_CCK(0x14), 0x200); } if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev < 6) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x16), 0x410); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x17), 0x820); } if (phy->rev >= 2) { sav->phy_analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); sav->phy_analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); sav->phy_rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); sav->phy_rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); sav->phy_classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); sav->phy_cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x3e)); sav->phy_crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); if (phy->type == BWN_PHYTYPE_G) { if ((phy->rev >= 7) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x933); } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x133); } } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), 0); } sav->reg0 = BWN_READ_2(mac, 0x3f4); sav->reg1 = BWN_READ_2(mac, 0x3e2); sav->rf0 = BWN_RF_READ(mac, 0x43); sav->rf1 = BWN_RF_READ(mac, 0x7a); sav->phy_pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); sav->phy_cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2a)); sav->phy_syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); sav->phy_dacctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); if (!BWN_HAS_TXMAG(phy)) { sav->rf2 = BWN_RF_READ(mac, 0x52); sav->rf2 &= 0x00f0; } if (phy->type == BWN_PHYTYPE_B) { sav->phy_cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); sav->phy_cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x06)); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0x00ff); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), 0x3f3f); } else { BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); } BWN_WRITE_2(mac, 0x3f4, BWN_READ_2(mac, 0x3f4) & 0xf000); tmp = (phy->type == BWN_PHYTYPE_G) ? BWN_PHY_LO_MASK : BWN_PHY_CCK(0x2e); BWN_PHY_WRITE(mac, tmp, 0x007f); tmp = sav->phy_syncctl; BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, tmp & 0xff7f); tmp = sav->rf1; BWN_RF_WRITE(mac, 0x007a, tmp & 0xfff0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), 0x8a3); if (phy->type == BWN_PHYTYPE_G || (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev >= 6)) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1003); } else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x0802); if (phy->rev >= 2) bwn_dummy_transmission(mac, 0, 1); bwn_phy_g_switch_chan(mac, 6, 0); BWN_RF_READ(mac, 0x51); if (phy->type == BWN_PHYTYPE_G) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0); nanouptime(&ts); if (ieee80211_time_before(lo->txctl_measured_time, (ts.tv_nsec / 1000000 + ts.tv_sec * 1000) - BWN_LO_TXCTL_EXPIRE)) bwn_lo_measure_txctl_values(mac); if (phy->type == BWN_PHYTYPE_G && phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc078); else { if (phy->type == BWN_PHYTYPE_B) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } } static void bwn_lo_restore(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); tmp = (pg->pg_pga_gain << 8); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa0); DELAY(5); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa2); DELAY(2); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa3); } else { tmp = (pg->pg_pga_gain | 0xefa0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, tmp); } if (phy->type == BWN_PHYTYPE_G) { if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0xc078); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); if (phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0202); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0101); } BWN_WRITE_2(mac, 0x3f4, sav->reg0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, sav->phy_pgactl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), sav->phy_cck2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, sav->phy_syncctl); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl); BWN_RF_WRITE(mac, 0x43, sav->rf0); BWN_RF_WRITE(mac, 0x7a, sav->rf1); if (!BWN_HAS_TXMAG(phy)) { tmp = sav->rf2; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tmp); } BWN_WRITE_2(mac, 0x3e2, sav->reg1); if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev <= 5) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), sav->phy_cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), sav->phy_cck1); } if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, sav->phy_analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, sav->phy_analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, sav->phy_classctl); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, sav->phy_rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, sav->phy_rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), sav->phy_cck3); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, sav->phy_crs0); } if (bwn_has_hwpctl(mac)) { tmp = (sav->phy_lomask & 0xbfff); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, tmp); BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x01), sav->phy_extg); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl_hwpctl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x14), sav->phy_cck4); BWN_PHY_WRITE(mac, BWN_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); } bwn_phy_g_switch_chan(mac, sav->old_channel, 1); } static int bwn_lo_probe_loctl(struct bwn_mac *mac, struct bwn_loctl *probe, struct bwn_lo_g_sm *d) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl orig, test; struct bwn_loctl prev = { -100, -100 }; static const struct bwn_loctl modifiers[] = { { 1, 1,}, { 1, 0,}, { 1, -1,}, { 0, -1,}, { -1, -1,}, { -1, 0,}, { -1, 1,}, { 0, 1,} }; int begin, end, lower = 0, i; uint16_t feedth; if (d->curstate == 0) { begin = 1; end = 8; } else if (d->curstate % 2 == 0) { begin = d->curstate - 1; end = d->curstate + 1; } else { begin = d->curstate - 2; end = d->curstate + 2; } if (begin < 1) begin += 8; if (end > 8) end -= 8; memcpy(&orig, probe, sizeof(struct bwn_loctl)); i = begin; d->curstate = i; while (1) { KASSERT(i >= 1 && i <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&test, &orig, sizeof(struct bwn_loctl)); test.i += modifiers[i - 1].i * d->multipler; test.q += modifiers[i - 1].q * d->multipler; if ((test.i != prev.i || test.q != prev.q) && (abs(test.i) <= 16 && abs(test.q) <= 16)) { bwn_lo_write(mac, &test); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < d->feedth) { memcpy(probe, &test, sizeof(struct bwn_loctl)); lower = 1; d->feedth = feedth; if (d->nmeasure < 2 && !BWN_HAS_LOOPBACK(phy)) break; } } memcpy(&prev, &test, sizeof(prev)); if (i == end) break; if (i == 8) i = 1; else i++; d->curstate = i; } return (lower); } static void bwn_lo_probe_sm(struct bwn_mac *mac, struct bwn_loctl *loctl, int *rxgain) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_lo_g_sm d; struct bwn_loctl probe; int lower, repeat, cnt = 0; uint16_t feedth; d.nmeasure = 0; d.multipler = 1; if (BWN_HAS_LOOPBACK(phy)) d.multipler = 3; memcpy(&d.loctl, loctl, sizeof(struct bwn_loctl)); repeat = (BWN_HAS_LOOPBACK(phy)) ? 4 : 1; do { bwn_lo_write(mac, &d.loctl); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < 0x258) { if (feedth >= 0x12c) *rxgain += 6; else *rxgain += 3; feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); } d.feedth = feedth; d.curstate = 0; do { KASSERT(d.curstate >= 0 && d.curstate <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&probe, &d.loctl, sizeof(struct bwn_loctl)); lower = bwn_lo_probe_loctl(mac, &probe, &d); if (!lower) break; if ((probe.i == d.loctl.i) && (probe.q == d.loctl.q)) break; memcpy(&d.loctl, &probe, sizeof(struct bwn_loctl)); d.nmeasure++; } while (d.nmeasure < 24); memcpy(loctl, &d.loctl, sizeof(struct bwn_loctl)); if (BWN_HAS_LOOPBACK(phy)) { if (d.feedth > 0x1194) *rxgain -= 6; else if (d.feedth < 0x5dc) *rxgain += 3; if (cnt == 0) { if (d.feedth <= 0x5dc) { d.multipler = 1; cnt++; } else d.multipler = 2; } else if (cnt == 2) d.multipler = 1; } bwn_lo_measure_gain_values(mac, *rxgain, BWN_HAS_LOOPBACK(phy)); } while (++cnt < repeat); } static struct bwn_lo_calib * bwn_lo_calibset(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl loctl = { 0, 0 }; struct bwn_lo_calib *cal; struct bwn_lo_g_value sval = { 0 }; int rxgain; uint16_t pad, reg, value; sval.old_channel = phy->chan; bwn_mac_suspend(mac); bwn_lo_save(mac, &sval); reg = bwn_lo_txctl_regtable(mac, &value, &pad); BWN_RF_SETMASK(mac, 0x43, 0xfff0, rfatt->att); BWN_RF_SETMASK(mac, reg, ~value, (rfatt->padmix ? value :0)); rxgain = (rfatt->att * 2) + (bbatt->att / 2); if (rfatt->padmix) rxgain -= pad; if (BWN_HAS_LOOPBACK(phy)) rxgain += pg->pg_max_lb_gain; bwn_lo_measure_gain_values(mac, rxgain, BWN_HAS_LOOPBACK(phy)); bwn_phy_g_set_bbatt(mac, bbatt->att); bwn_lo_probe_sm(mac, &loctl, &rxgain); bwn_lo_restore(mac, &sval); bwn_mac_enable(mac); cal = malloc(sizeof(*cal), M_DEVBUF, M_NOWAIT | M_ZERO); if (!cal) { device_printf(mac->mac_sc->sc_dev, "out of memory\n"); return (NULL); } memcpy(&cal->bbatt, bbatt, sizeof(*bbatt)); memcpy(&cal->rfatt, rfatt, sizeof(*rfatt)); memcpy(&cal->ctl, &loctl, sizeof(loctl)); BWN_GETTIME(cal->calib_time); return (cal); } static struct bwn_lo_calib * bwn_lo_get_calib(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *c; TAILQ_FOREACH(c, &lo->calib_list, list) { if (!BWN_BBATTCMP(&c->bbatt, bbatt)) continue; if (!BWN_RFATTCMP(&c->rfatt, rfatt)) continue; return (c); } c = bwn_lo_calibset(mac, bbatt, rfatt); if (!c) return (NULL); TAILQ_INSERT_TAIL(&lo->calib_list, c, list); return (c); } static void bwn_phy_g_dc_lookup_init(struct bwn_mac *mac, uint8_t update) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; const struct bwn_rfatt *rfatt; const struct bwn_bbatt *bbatt; uint64_t pvector; int i; int rf_offset, bb_offset; uint8_t changed = 0; KASSERT(BWN_DC_LT_SIZE == 32, ("%s:%d: fail", __func__, __LINE__)); KASSERT(lo->rfatt.len * lo->bbatt.len <= 64, ("%s:%d: fail", __func__, __LINE__)); pvector = lo->power_vector; if (!update && !pvector) return; bwn_mac_suspend(mac); for (i = 0; i < BWN_DC_LT_SIZE * 2; i++) { struct bwn_lo_calib *cal; int idx; uint16_t val; if (!update && !(pvector & (((uint64_t)1ULL) << i))) continue; bb_offset = i / lo->rfatt.len; rf_offset = i % lo->rfatt.len; bbatt = &(lo->bbatt.array[bb_offset]); rfatt = &(lo->rfatt.array[rf_offset]); cal = bwn_lo_calibset(mac, bbatt, rfatt); if (!cal) { device_printf(sc->sc_dev, "LO: Could not " "calibrate DC table entry\n"); continue; } val = (uint8_t)(cal->ctl.q); val |= ((uint8_t)(cal->ctl.i)) << 4; free(cal, M_DEVBUF); idx = i / 2; if (i % 2) lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00ff) | ((val & 0x00ff) << 8); else lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xff00) | (val & 0x00ff); changed = 1; } if (changed) { for (i = 0; i < BWN_DC_LT_SIZE; i++) BWN_PHY_WRITE(mac, 0x3a0 + i, lo->dc_lt[i]); } bwn_mac_enable(mac); } static void bwn_lo_fixup_rfatt(struct bwn_rfatt *rf) { if (!rf->padmix) return; if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3)) rf->att = 4; } static void bwn_lo_g_adjust(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; struct bwn_lo_calib *cal; struct bwn_rfatt rf; memcpy(&rf, &pg->pg_rfatt, sizeof(rf)); bwn_lo_fixup_rfatt(&rf); cal = bwn_lo_get_calib(mac, &pg->pg_bbatt, &rf); if (!cal) return; bwn_lo_write(mac, &cal->ctl); } static void bwn_lo_g_init(struct bwn_mac *mac) { if (!bwn_has_hwpctl(mac)) return; bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 1); } static void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } static void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) device_printf(sc->sc_dev, "warn: firmware state (%d)\n", state); mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } static void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (siba_get_revid(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } } static int16_t bwn_nrssi_read(struct bwn_mac *mac, uint16_t offset) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, offset); return ((int16_t)BWN_PHY_READ(mac, BWN_PHY_NRSSI_DATA)); } static void bwn_nrssi_threshold(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int32_t a, b; int16_t tmp16; uint16_t tmpu16; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); if (phy->gmode && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { if (!pg->pg_aci_wlan_automatic && pg->pg_aci_enable) { a = 0x13; b = 0x12; } else { a = 0xe; b = 0x11; } a = a * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); a += (pg->pg_nrssi[0] << 6); a += (a < 32) ? 31 : 32; a = a >> 6; a = MIN(MAX(a, -31), 31); b = b * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); b += (pg->pg_nrssi[0] << 6); if (b < 32) b += 31; else b += 32; b = b >> 6; b = MIN(MAX(b, -31), 31); tmpu16 = BWN_PHY_READ(mac, 0x048a) & 0xf000; tmpu16 |= ((uint32_t)b & 0x0000003f); tmpu16 |= (((uint32_t)a & 0x0000003f) << 6); BWN_PHY_WRITE(mac, 0x048a, tmpu16); return; } tmp16 = bwn_nrssi_read(mac, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; BWN_PHY_SETMASK(mac, 0x048a, 0xf000, (tmp16 < 3) ? 0x09eb : 0x0aed); } static void bwn_nrssi_slope_11g(struct bwn_mac *mac) { #define SAVE_RF_MAX 3 #define SAVE_PHY_COMM_MAX 4 #define SAVE_PHY3_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x52, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x15, 0x5a, 0x59, 0x58 }; static const uint16_t save_phy3_regs[SAVE_PHY3_MAX] = { 0x002e, 0x002f, 0x080f, BWN_PHY_G_LOCTL, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; int32_t i, tmp32, phy3_idx = 0; uint16_t delta, tmp; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy3[SAVE_PHY3_MAX]; uint16_t ant_div, phy0, chan_ex; int16_t nrssi0, nrssi1; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rf_rev >= 9) return; if (phy->rf_rev == 8) bwn_nrssi_offset(mac); BWN_PHY_MASK(mac, BWN_PHY_G_CRS, 0x7fff); BWN_PHY_MASK(mac, 0x0802, 0xfffc); /* * Save RF/PHY registers for later restoration */ ant_div = BWN_READ_2(mac, 0x03e2); BWN_WRITE_2(mac, 0x03e2, BWN_READ_2(mac, 0x03e2) | 0x8000); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); phy0 = BWN_READ_2(mac, BWN_PHY0); chan_ex = BWN_READ_2(mac, BWN_CHANNEL_EXT); if (phy->rev >= 3) { for (i = 0; i < SAVE_PHY3_MAX; ++i) save_phy3[i] = BWN_PHY_READ(mac, save_phy3_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, BWN_PHY_G_LOCTL, 0); switch (phy->rev) { case 4: case 6: case 7: BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); break; case 3: case 5: BWN_PHY_MASK(mac, 0x0801, 0xffbf); break; } BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } /* * Calculate nrssi0 */ BWN_RF_SET(mac, 0x007a, 0x0070); bwn_set_all_gains(mac, 0, 8, 0); BWN_RF_MASK(mac, 0x007a, 0x00f7); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0030); BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0010); } BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(20); nrssi0 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi0 >= 0x0020) nrssi0 -= 0x0040; /* * Calculate nrssi1 */ BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev >= 2) BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000); BWN_RF_SET(mac, 0x007a, 0x000f); BWN_PHY_WRITE(mac, 0x0015, 0xf330); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0020); BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0020); } bwn_set_all_gains(mac, 3, 0, 1); if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x0043, 0x001f); } else { tmp = BWN_RF_READ(mac, 0x0052) & 0xff0f; BWN_RF_WRITE(mac, 0x0052, tmp | 0x0060); tmp = BWN_RF_READ(mac, 0x0043) & 0xfff0; BWN_RF_WRITE(mac, 0x0043, tmp | 0x0009); } BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); DELAY(20); nrssi1 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); /* * Install calculated narrow RSSI values */ if (nrssi1 >= 0x0020) nrssi1 -= 0x0040; if (nrssi0 == nrssi1) pg->pg_nrssi_slope = 0x00010000; else pg->pg_nrssi_slope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 >= -4) { pg->pg_nrssi[0] = nrssi1; pg->pg_nrssi[1] = nrssi0; } /* * Restore saved RF/PHY registers */ if (phy->rev >= 3) { for (phy3_idx = 0; phy3_idx < 4; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } if (phy->rev >= 2) { BWN_PHY_MASK(mac, 0x0812, 0xffcf); BWN_PHY_MASK(mac, 0x0811, 0xffcf); } for (i = 0; i < SAVE_RF_MAX; ++i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_WRITE_2(mac, 0x03e2, ant_div); BWN_WRITE_2(mac, 0x03e6, phy0); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, chan_ex); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); bwn_spu_workaround(mac, phy->chan); BWN_PHY_SET(mac, 0x0802, (0x0001 | 0x0002)); bwn_set_original_gains(mac); BWN_PHY_SET(mac, BWN_PHY_G_CRS, 0x8000); if (phy->rev >= 3) { for (; phy3_idx < SAVE_PHY3_MAX; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } delta = 0x1f - pg->pg_nrssi[0]; for (i = 0; i < 64; i++) { tmp32 = (((i - delta) * pg->pg_nrssi_slope) / 0x10000) + 0x3a; tmp32 = MIN(MAX(tmp32, 0), 0x3f); pg->pg_nrssi_lt[i] = tmp32; } bwn_nrssi_threshold(mac); #undef SAVE_RF_MAX #undef SAVE_PHY_COMM_MAX #undef SAVE_PHY3_MAX } static void bwn_nrssi_offset(struct bwn_mac *mac) { #define SAVE_RF_MAX 2 #define SAVE_PHY_COMM_MAX 10 #define SAVE_PHY6_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x0001, 0x0811, 0x0812, 0x0814, 0x0815, 0x005a, 0x0059, 0x0058, 0x000a, 0x0003 }; static const uint16_t save_phy6_regs[SAVE_PHY6_MAX] = { 0x002e, 0x002f, 0x080f, 0x0810, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; int i, phy6_idx = 0; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy6[SAVE_PHY6_MAX]; int16_t nrssi; uint16_t saved = 0xffff; for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); BWN_PHY_MASK(mac, 0x0429, 0x7fff); BWN_PHY_SETMASK(mac, 0x0001, 0x3fff, 0x4000); BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SETMASK(mac, 0x0812, 0xfff3, 0x0004); BWN_PHY_MASK(mac, 0x0802, ~(0x1 | 0x2)); if (phy->rev >= 6) { for (i = 0; i < SAVE_PHY6_MAX; ++i) save_phy6[i] = BWN_PHY_READ(mac, save_phy6_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, 0x002f, 0); BWN_PHY_WRITE(mac, 0x080f, 0); BWN_PHY_WRITE(mac, 0x0810, 0); BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } BWN_RF_SET(mac, 0x007a, 0x0070); BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == 31) { for (i = 7; i >= 4; i--) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi < 31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 4; } else { BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0001); BWN_PHY_MASK(mac, 0x0815, 0xfffe); } BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SET(mac, 0x0812, 0x000c); BWN_PHY_SET(mac, 0x0811, 0x0030); BWN_PHY_SET(mac, 0x0812, 0x0030); BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); if (phy->rev == 0) BWN_PHY_WRITE(mac, 0x0003, 0x0122); else BWN_PHY_SET(mac, 0x000a, 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0004); BWN_PHY_MASK(mac, 0x0815, 0xfffb); } BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_RF_SET(mac, 0x007a, 0x000f); bwn_set_all_gains(mac, 3, 0, 1); BWN_RF_SETMASK(mac, 0x0043, 0x00f0, 0x000f); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == -32) { for (i = 0; i < 4; i++) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t)((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi > -31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 3; } else saved = 0; } BWN_RF_WRITE(mac, 0x007b, saved); /* * Restore saved RF/PHY registers */ if (phy->rev >= 6) { for (phy6_idx = 0; phy6_idx < 4; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } if (phy->rev != 1) { for (i = 3; i < 5; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); } for (i = 5; i < SAVE_PHY_COMM_MAX; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); for (i = SAVE_RF_MAX - 1; i >= 0; --i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_PHY_WRITE(mac, 0x0802, BWN_PHY_READ(mac, 0x0802) | 0x1 | 0x2); BWN_PHY_SET(mac, 0x0429, 0x8000); bwn_set_original_gains(mac); if (phy->rev >= 6) { for (; phy6_idx < SAVE_PHY6_MAX; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } BWN_PHY_WRITE(mac, save_phy_comm_regs[0], save_phy_comm[0]); BWN_PHY_WRITE(mac, save_phy_comm_regs[2], save_phy_comm[2]); BWN_PHY_WRITE(mac, save_phy_comm_regs[1], save_phy_comm[1]); } static void bwn_set_all_gains(struct bwn_mac *mac, int16_t first, int16_t second, int16_t third) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i; uint16_t start = 0x08, end = 0x18; uint16_t tmp; uint16_t table; if (phy->rev <= 1) { start = 0x10; end = 0x20; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) bwn_ofdmtab_write_2(mac, table, i, first); for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, second); if (third != -1) { tmp = ((uint16_t) third << 14) | ((uint16_t) third << 6); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, tmp); } bwn_dummy_transmission(mac, 0, 1); } static void bwn_set_original_gains(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i, tmp; uint16_t table; uint16_t start = 0x0008, end = 0x0018; if (phy->rev <= 1) { start = 0x0010; end = 0x0020; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) { tmp = (i & 0xfffc); tmp |= (i & 0x0001) << 1; tmp |= (i & 0x0002) >> 1; bwn_ofdmtab_write_2(mac, table, i, tmp); } for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, i - start); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, 0x4000); bwn_dummy_transmission(mac, 0, 1); } static void bwn_phy_hwpctl_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_rfatt old_rfatt, rfatt; struct bwn_bbatt old_bbatt, bbatt; struct bwn_softc *sc = mac->mac_sc; uint8_t old_txctl = 0; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if ((siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306)) return; BWN_PHY_WRITE(mac, 0x0028, 0x8018); BWN_WRITE_2(mac, BWN_PHY0, BWN_READ_2(mac, BWN_PHY0) & 0xffdf); if (!phy->gmode) return; bwn_hwpctl_early_init(mac); if (pg->pg_curtssi == 0) { if (phy->rf_ver == 0x2050 && phy->analog == 0) { BWN_RF_SETMASK(mac, 0x0076, 0x00f7, 0x0084); } else { memcpy(&old_rfatt, &pg->pg_rfatt, sizeof(old_rfatt)); memcpy(&old_bbatt, &pg->pg_bbatt, sizeof(old_bbatt)); old_txctl = pg->pg_txctl; bbatt.att = 11; if (phy->rf_rev == 8) { rfatt.att = 15; rfatt.padmix = 1; } else { rfatt.att = 9; rfatt.padmix = 0; } bwn_phy_g_set_txpwr_sub(mac, &bbatt, &rfatt, 0); } bwn_dummy_transmission(mac, 0, 1); pg->pg_curtssi = BWN_PHY_READ(mac, BWN_PHY_TSSI); if (phy->rf_ver == 0x2050 && phy->analog == 0) BWN_RF_MASK(mac, 0x0076, 0xff7b); else bwn_phy_g_set_txpwr_sub(mac, &old_bbatt, &old_rfatt, old_txctl); } bwn_hwpctl_init_gphy(mac); /* clear TSSI */ bwn_shm_write_2(mac, BWN_SHARED, 0x0058, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x005a, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0070, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0072, 0x7f7f); } static void bwn_hwpctl_early_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (!bwn_has_hwpctl(mac)) { BWN_PHY_WRITE(mac, 0x047a, 0xc111); return; } BWN_PHY_MASK(mac, 0x0036, 0xfeff); BWN_PHY_WRITE(mac, 0x002f, 0x0202); BWN_PHY_SET(mac, 0x047c, 0x0002); BWN_PHY_SET(mac, 0x047a, 0xf000); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); BWN_PHY_SET(mac, 0x005d, 0x8000); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SET(mac, 0x0036, 0x0400); } else { BWN_PHY_SET(mac, 0x0036, 0x0200); BWN_PHY_SET(mac, 0x0036, 0x0400); BWN_PHY_MASK(mac, 0x005d, 0x7fff); BWN_PHY_MASK(mac, 0x004f, 0xfffe); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); } } static void bwn_hwpctl_init_gphy(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint16_t nr_written = 0, tmp, value; uint8_t rf, bb; if (!bwn_has_hwpctl(mac)) { bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_HW_POWERCTL); return; } BWN_PHY_SETMASK(mac, 0x0036, 0xffc0, (pg->pg_idletssi - pg->pg_curtssi)); BWN_PHY_SETMASK(mac, 0x0478, 0xff00, (pg->pg_idletssi - pg->pg_curtssi)); for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, 0x3c20, i, pg->pg_tssi2dbm[i]); for (i = 32; i < 64; i++) bwn_ofdmtab_write_2(mac, 0x3c00, i - 32, pg->pg_tssi2dbm[i]); for (i = 0; i < 64; i += 2) { value = (uint16_t) pg->pg_tssi2dbm[i]; value |= ((uint16_t) pg->pg_tssi2dbm[i + 1]) << 8; BWN_PHY_WRITE(mac, 0x380 + (i / 2), value); } for (rf = 0; rf < lo->rfatt.len; rf++) { for (bb = 0; bb < lo->bbatt.len; bb++) { if (nr_written >= 0x40) return; tmp = lo->bbatt.array[bb].att; tmp <<= 8; if (phy->rf_rev == 8) tmp |= 0x50; else tmp |= 0x40; tmp |= lo->rfatt.array[rf].att; BWN_PHY_WRITE(mac, 0x3c0 + nr_written, tmp); nr_written++; } } BWN_PHY_MASK(mac, 0x0060, 0xffbf); BWN_PHY_WRITE(mac, 0x0014, 0x0000); KASSERT(phy->rev >= 6, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_SET(mac, 0x0478, 0x0800); BWN_PHY_MASK(mac, 0x0478, 0xfeff); BWN_PHY_MASK(mac, 0x0801, 0xffbf); bwn_phy_g_dc_lookup_init(mac, 1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_HW_POWERCTL); } static void bwn_phy_g_switch_chan(struct bwn_mac *mac, int channel, uint8_t spu) { struct bwn_softc *sc = mac->mac_sc; if (spu != 0) bwn_spu_workaround(mac, channel); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); if (channel == 14) { if (siba_sprom_get_ccode(sc->sc_dev) == SIBA_CCODE_JAPAN) bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_JAPAN_CHAN14_OFF); else bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_JAPAN_CHAN14_OFF); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | (1 << 11)); return; } BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) & 0xf7bf); } static uint16_t bwn_phy_g_chan2freq(uint8_t channel) { static const uint8_t bwn_phy_g_rf_channels[] = BWN_PHY_G_RF_CHANNELS; KASSERT(channel >= 1 && channel <= 14, ("%s:%d: fail", __func__, __LINE__)); return (bwn_phy_g_rf_channels[channel - 1]); } static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt, uint8_t txctl) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t bb, rf; uint16_t tx_bias, tx_magn; bb = bbatt->att; rf = rfatt->att; tx_bias = lo->tx_bias; tx_magn = lo->tx_magn; if (tx_bias == 0xff) tx_bias = 0; pg->pg_txctl = txctl; memmove(&pg->pg_rfatt, rfatt, sizeof(*rfatt)); pg->pg_rfatt.padmix = (txctl & BWN_TXCTL_TXMIX) ? 1 : 0; memmove(&pg->pg_bbatt, bbatt, sizeof(*bbatt)); bwn_phy_g_set_bbatt(mac, bb); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RADIO_ATT, rf); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, (rf & 0x000f) | (txctl & 0x0070)); else { BWN_RF_SETMASK(mac, 0x43, 0xfff0, (rf & 0x000f)); BWN_RF_SETMASK(mac, 0x52, ~0x0070, (txctl & 0x0070)); } if (BWN_HAS_TXMAG(phy)) BWN_RF_WRITE(mac, 0x52, tx_magn | tx_bias); else BWN_RF_SETMASK(mac, 0x52, 0xfff0, (tx_bias & 0x000f)); bwn_lo_g_adjust(mac); } static void bwn_phy_g_set_bbatt(struct bwn_mac *mac, uint16_t bbatt) { struct bwn_phy *phy = &mac->mac_phy; if (phy->analog == 0) { BWN_WRITE_2(mac, BWN_PHY0, (BWN_READ_2(mac, BWN_PHY0) & 0xfff0) | bbatt); return; } if (phy->analog > 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xffc3, bbatt << 2); return; } BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xff87, bbatt << 3); } static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *mac, uint16_t reg, uint32_t lpd) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int max_lb_gain; uint16_t extlna; uint16_t i; if (phy->gmode == 0) return (0); if (BWN_HAS_LOOPBACK(phy)) { max_lb_gain = pg->pg_max_lb_gain; max_lb_gain += (phy->rf_rev == 8) ? 0x3e : 0x26; if (max_lb_gain >= 0x46) { extlna = 0x3000; max_lb_gain -= 0x46; } else if (max_lb_gain >= 0x3a) { extlna = 0x1000; max_lb_gain -= 0x3a; } else if (max_lb_gain >= 0x2e) { extlna = 0x2000; max_lb_gain -= 0x2e; } else { extlna = 0; max_lb_gain -= 0x10; } for (i = 0; i < 16; i++) { max_lb_gain -= (i * 6); if (max_lb_gain < 6) break; } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0f92); case BWN_LPD(0, 0, 1): case BWN_LPD(1, 0, 1): return (0x0092 | extlna); case BWN_LPD(1, 0, 0): return (0x0093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) return (0x9b3); if (reg == BWN_PHY_RFOVERVAL) { if (extlna) extlna |= 0x8000; extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8f92); case BWN_LPD(0, 0, 1): return (0x8092 | extlna); case BWN_LPD(1, 0, 1): return (0x2092 | extlna); case BWN_LPD(1, 0, 0): return (0x2093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0fb2); case BWN_LPD(0, 0, 1): return (0x00b2); case BWN_LPD(1, 0, 1): return (0x30b2); case BWN_LPD(1, 0, 0): return (0x30b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) { return (0x9b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8fb2); case BWN_LPD(0, 0, 1): return (0x80b2); case BWN_LPD(1, 0, 1): return (0x20b2); case BWN_LPD(1, 0, 0): return (0x20b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } static void bwn_spu_workaround(struct bwn_mac *mac, uint8_t channel) { if (mac->mac_phy.rf_ver != 0x2050 || mac->mac_phy.rf_rev >= 6) return; BWN_WRITE_2(mac, BWN_CHANNEL, (channel <= 10) ? bwn_phy_g_chan2freq(channel + 4) : bwn_phy_g_chan2freq(1)); DELAY(1000); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = siba_get_revid(sc->sc_dev); const char *filename; uint32_t high; int error; /* microcode */ if (rev >= 5 && rev <= 10) filename = "ucode5"; else if (rev >= 11 && rev <= 12) filename = "ucode11"; else if (rev == 13) filename = "ucode13"; else if (rev == 14) filename = "ucode14"; else if (rev >= 15) filename = "ucode15"; else { device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); return (EOPNOTSUPP); } /* initvals */ high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } static int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (siba_get_revid(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); device_printf(sc->sc_dev, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) bwn_phy_reset(down_dev); up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } static void bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; siba_write_4(sc->sc_dev, SIBA_TGSLOW, ((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) | BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, (siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC) | BWN_TGSLOW_PHYRESET); DELAY(1000); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } macstat = le32toh(rxhdr->mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; struct ieee80211_node *ni; struct ieee80211vap *vap; int retrycnt = 0, slot; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (status->ack) { dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } while (1) { dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_islast) { ni = meta->mt_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); break; } slot = bwn_dma_nextslot(dr, slot); } } bwn_dma_handle_txeof(mac, status); } else { if (status->ack) { tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } ni = tp->tp_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); } bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO8_RXDATA); else siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO_RXDATA); len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } macstat = le32toh(rxhdr.mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3), prq->prq_base + BWN_PIO8_RXDATA); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1), prq->prq_base + BWN_PIO_RXDATA); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); static int rx_mac_dec_rpt = 0; BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); phystat3 = le16toh(rxhdr->phy_status3); macstat = le32toh(rxhdr->mac_status); chanstat = le16toh(rxhdr->channel); phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50) device_printf(sc->sc_dev, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); /* XXX calculating RSSI & noise & antenna */ if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); rssi = rxhdr->phy.abg.rssi; /* XXX incorrect RSSI calculation? */ noise = mac->mac_stats.link_noise; BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; int result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; struct ieee80211_frame_cts *cts; struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; unsigned int len; uint32_t macctl = 0; int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; /* * Find TX rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ bwn_plcp_genhdr(BWN_ISOLDFMT(mac) ? (struct bwn_plcp4 *)(&txhdr->body.old.plcp) : (struct bwn_plcp4 *)(&txhdr->body.new.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if (ic->ic_flags & IEEE80211_F_USEPROT) { /* XXX RTS rate is always 1MB??? */ rts_rate = BWN_CCK_RATE_1MB; rts_rate_fb = bwn_get_fbrate(rts_rate); protdur = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { cts = (struct ieee80211_frame_cts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { rts = (struct ieee80211_frame_rts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); protdur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; bwn_plcp_genhdr((struct bwn_plcp4 *)((BWN_ISOLDFMT(mac)) ? &txhdr->body.old.rts_plcp : &txhdr->body.new.rts_plcp), len, rts_rate); bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); protwh = (struct ieee80211_frame *)(BWN_ISOLDFMT(mac) ? (&txhdr->body.old.rts_frame) : (&txhdr->body.new.rts_frame)); txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; } if (BWN_ISOLDFMT(mac)) txhdr->body.old.cookie = htole16(cookie); else txhdr->body.new.cookie = htole16(cookie); txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = siba_sprom_get_ant_bg(sc->sc_dev); else mask = siba_sprom_get_ant_a(sc->sc_dev); if (!(mask & (1 << (n - 1)))) return (0); return (n); } static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_6MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); siba_write_multi_4(sc->sc_dev, data, (len & ~3), tq->tq_base + BWN_PIO8_TXDATA); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); siba_write_multi_2(sc->sc_dev, data, (len & ~1), tq->tq_base + BWN_PIO_TXDATA); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static int bwn_phy_shm_tssi_read(struct bwn_mac *mac, uint16_t shm_offset) { const uint8_t ofdm = (shm_offset != BWN_SHARED_TSSI_CCK); unsigned int a, b, c, d; unsigned int avg; uint32_t tmp; tmp = bwn_shm_read_4(mac, BWN_SHARED, shm_offset); a = tmp & 0xff; b = (tmp >> 8) & 0xff; c = (tmp >> 16) & 0xff; d = (tmp >> 24) & 0xff; if (a == 0 || a == BWN_TSSI_MAX || b == 0 || b == BWN_TSSI_MAX || c == 0 || c == BWN_TSSI_MAX || d == 0 || d == BWN_TSSI_MAX) return (ENOENT); bwn_shm_write_4(mac, BWN_SHARED, shm_offset, BWN_TSSI_MAX | (BWN_TSSI_MAX << 8) | (BWN_TSSI_MAX << 16) | (BWN_TSSI_MAX << 24)); if (ofdm) { a = (a + 32) & 0x3f; b = (b + 32) & 0x3f; c = (c + 32) & 0x3f; d = (d + 32) & 0x3f; } avg = (a + b + c + d + 2) / 4; if (ofdm) { if (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO) & BWN_HF_4DB_CCK_POWERBOOST) avg = (avg >= 13) ? (avg - 13) : 0; } return (avg); } static void bwn_phy_g_setatt(struct bwn_mac *mac, int *bbattp, int *rfattp) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; int rfatt = *rfattp; int bbatt = *bbattp; while (1) { if (rfatt > lo->rfatt.max && bbatt > lo->bbatt.max - 4) break; if (rfatt < lo->rfatt.min && bbatt < lo->bbatt.min + 4) break; if (bbatt > lo->bbatt.max && rfatt > lo->rfatt.max - 1) break; if (bbatt < lo->bbatt.min && rfatt < lo->rfatt.min + 1) break; if (bbatt > lo->bbatt.max) { bbatt -= 4; rfatt += 1; continue; } if (bbatt < lo->bbatt.min) { bbatt += 4; rfatt -= 1; continue; } if (rfatt > lo->rfatt.max) { rfatt -= 1; bbatt += 4; continue; } if (rfatt < lo->rfatt.min) { rfatt += 1; bbatt -= 4; continue; } break; } *rfattp = MIN(MAX(rfatt, lo->rfatt.min), lo->rfatt.max); *bbattp = MIN(MAX(bbatt, lo->bbatt.min), lo->bbatt.max); } static void bwn_phy_lock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, BWN_PS_AWAKE); } static void bwn_phy_unlock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); } static void bwn_rf_lock(struct bwn_mac *mac) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_RADIO_LOCK); BWN_READ_4(mac, BWN_MACCTL); DELAY(10); } static void bwn_rf_unlock(struct bwn_mac *mac) { BWN_READ_2(mac, BWN_PHYVER); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_RADIO_LOCK); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_LOCK(sc); if (mac && mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; tsf += le16toh(rxhdr->mac_time); if (low_mactime_now < le16toh(rxhdr->mac_time)) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; bus_addr_t lowaddr = 0; int error; if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) return (0); KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__)); mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->dmatype = bwn_dma_gettype(mac); if (dma->dmatype == BWN_DMA_30BIT) lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT; else if (dma->dmatype == BWN_DMA_32BIT) lowaddr = BUS_SPACE_MAXADDR_32BIT; else lowaddr = BUS_SPACE_MAXADDR; /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static void bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; uint16_t val[BWN_LED_MAX]; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (siba_get_pci_subvendor(sc->sc_dev) == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; val[0] = siba_sprom_get_gpio0(sc->sc_dev); val[1] = siba_sprom_get_gpio1(sc->sc_dev); val[2] = siba_sprom_get_gpio2(sc->sc_dev); val[3] = siba_sprom_get_gpio3(sc->sc_dev); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->led_act = led_act[i]; } else { if (val[i] & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val[i] & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_phy_lp_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; plp->plp_antenna = BWN_ANT_DEFAULT; } static int bwn_phy_lp_init(struct bwn_mac *mac) { static const struct bwn_stxtable tables[] = { { 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 }, { 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff }, { 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff }, { 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f }, { 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f }, { 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 }, { 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 }, { 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f }, { 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 }, { 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 }, { 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 }, { 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 }, { 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f }, { 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f }, { 2, 11, 0x40, 0, 0x0f } }; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const struct bwn_stxtable *st; struct ieee80211com *ic = &sc->sc_ic; int i, error; uint16_t tmp; bwn_phy_lp_readsprom(mac); /* XXX bad place */ bwn_phy_lp_bbinit(mac); /* initialize RF */ BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2); DELAY(1); BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd); DELAY(1); if (mac->mac_phy.rf_ver == 0x2062) bwn_phy_lp_b2062_init(mac); else { bwn_phy_lp_b2063_init(mac); /* synchronize stx table. */ for (i = 0; i < N(tables); i++) { st = &tables[i]; tmp = BWN_RF_READ(mac, st->st_rfaddr); tmp >>= st->st_rfshift; tmp <<= st->st_physhift; BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xf2 + st->st_phyoffset), ~(st->st_mask << st->st_physhift), tmp); } BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0); } /* calibrate RC */ if (mac->mac_phy.rev >= 2) bwn_phy_lp_rxcal_r2(mac); else if (!plp->plp_rccap) { if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_rccal_r12(mac); } else bwn_phy_lp_set_rccap(mac); error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel 7 (%d)\n", error); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_calib(mac); return (0); } static uint16_t bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static void bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask, uint16_t set) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, (BWN_READ_2(mac, BWN_PHYDATA) & mask) | set); } static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("unaccessible register %d", reg)); if (mac->mac_phy.rev < 2 && reg != 0x4001) reg |= 0x100; if (mac->mac_phy.rev >= 2) reg |= 0x200; BWN_WRITE_2(mac, BWN_RFCTL, reg); return BWN_READ_2(mac, BWN_RFDATALO); } static void bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("unaccessible register %d", reg)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static void bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, (mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7); return; } if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808); return; } BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018); } static int bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; int error; if (phy->rf_ver == 0x2063) { error = bwn_phy_lp_b2063_switch_channel(mac, chan); if (error) return (error); } else { error = bwn_phy_lp_b2062_switch_channel(mac, chan); if (error) return (error); bwn_phy_lp_set_anafilter(mac, chan); bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0)); } plp->plp_chan = chan; BWN_WRITE_2(mac, BWN_CHANNEL, chan); return (0); } static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36); } static void bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; if (phy->rev >= 2 || antenna > BWN_ANTAUTO1) return; bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER); plp->plp_antenna = antenna; } static void bwn_phy_lp_task_60s(struct bwn_mac *mac) { bwn_phy_lp_calib(mac); } static void bwn_phy_lp_readsprom(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev); return; } plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev); plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev); plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev); } static void bwn_phy_lp_bbinit(struct bwn_mac *mac) { bwn_phy_lp_tblinit(mac); if (mac->mac_phy.rev >= 2) bwn_phy_lp_bbinit_r2(mac); else bwn_phy_lp_bbinit_r01(mac); } static void bwn_phy_lp_txpctl_init(struct bwn_mac *mac) { struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 }; struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 }; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; bwn_phy_lp_set_txgain(mac, IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz); bwn_phy_lp_set_bbmult(mac, 150); } static void bwn_phy_lp_calib(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; const struct bwn_rxcompco *rc = NULL; struct bwn_txgain ogain; int i, omode, oafeovr, orf, obbmult; uint8_t mode, fc = 0; if (plp->plp_chanfullcal != plp->plp_chan) { plp->plp_chanfullcal = plp->plp_chan; fc = 1; } bwn_mac_suspend(mac); /* BlueTooth Coexistance Override */ BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3); BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_save(mac); bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_bugfix(mac); if (mac->mac_phy.rev >= 2 && fc == 1) { bwn_phy_lp_get_txpctlmode(mac); omode = plp->plp_txpctlmode; oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40; if (oafeovr) ogain = bwn_phy_lp_get_txgain(mac); orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff; obbmult = bwn_phy_lp_get_bbmult(mac); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (oafeovr) bwn_phy_lp_set_txgain(mac, &ogain); bwn_phy_lp_set_bbmult(mac, obbmult); bwn_phy_lp_set_txpctlmode(mac, omode); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf); } bwn_phy_lp_set_txpctlmode(mac, mode); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_restore(mac); /* do RX IQ Calculation; assumes that noise is true. */ if (siba_get_chipid(sc->sc_dev) == 0x5354) { for (i = 0; i < N(bwn_rxcompco_5354); i++) { if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_5354[i]; } } else if (mac->mac_phy.rev >= 2) rc = &bwn_rxcompco_r2; else { for (i = 0; i < N(bwn_rxcompco_r12); i++) { if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_r12[i]; } } if (rc == NULL) goto fail; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8); bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0); } else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0); } bwn_phy_lp_set_rxgain(mac, 0x2d5d); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); bwn_phy_lp_set_deaf(mac, 0); /* XXX no checking return value? */ (void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0); bwn_phy_lp_clear_deaf(mac, 0); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf); /* disable RX GAIN override. */ BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf); if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7); } } else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff); fail: bwn_mac_enable(mac); } static void bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8); return; } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007); } static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan) { static const struct bwn_b206x_chan *bc = NULL; struct bwn_softc *sc = mac->mac_sc; uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref, tmp[6]; uint16_t old, scale, tmp16; int i, div; for (i = 0; i < N(bwn_b2063_chantable); i++) { if (bwn_b2063_chantable[i].bc_chan == chan) { bc = &bwn_b2063_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]); BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]); old = BWN_RF_READ(mac, BWN_B2063_COM15); BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e); freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2); freqref = freqxtal * 3; div = (freqxtal <= 26000000 ? 1 : 2); timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1; timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) + 999999) / 1000000) + 1; BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6, 0xfff8, timeout >> 2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xff9f,timeout << 5); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref); val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16); val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16); val[2] = bwn_phy_lp_roundup(freqvco, 3, 16); count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) * (timeoutref + 1)) - 1; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xf0, count >> 8); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff); tmp[0] = ((val[2] * 62500) / freqref) << 4; tmp[1] = ((val[2] * 62500) % freqref) << 4; while (tmp[1] >= freqref) { tmp[0]++; tmp[1] -= freqref; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63); tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27; tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16); - if ((tmp[3] + tmp[2] - 1) / tmp[2] > 60) { + if (howmany(tmp[3], tmp[2]) > 60) { scale = 1; tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8; } else { scale = 0; tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6); tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) * (scale + 1); if (tmp[5] > 150) tmp[5] = 0; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4); if (freqxtal > 26000000) BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd); if (val[0] == 45) BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd); BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3); DELAY(1); BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc); /* VCO Calibration */ BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40); tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8; BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7); DELAY(300); BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40); BWN_RF_WRITE(mac, BWN_B2063_COM15, old); return (0); } static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; const struct bwn_b206x_chan *bc = NULL; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; uint32_t tmp[9]; int i; for (i = 0; i < N(bwn_b2062_chantable); i++) { if (bwn_b2062_chantable[i].bc_chan == chan) { bc = &bwn_b2062_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07); bwn_phy_lp_b2062_reset_pllbias(mac); tmp[0] = freqxtal / 1000; tmp[1] = plp->plp_div * 1000; tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0); if (ieee80211_ieee2mhz(chan, 0) < 4000) tmp[2] *= 2; tmp[3] = 48 * tmp[0]; tmp[5] = tmp[2] / tmp[3]; tmp[6] = tmp[2] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29, tmp[5] + ((2 * tmp[6]) / tmp[3])); tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19); tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0); bwn_phy_lp_b2062_reset_pllbias(mac); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (EIO); } } BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (0); } static void bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t tmp = (channel == 14); if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9); if ((mac->mac_phy.rev == 1) && (plp->plp_rccap)) bwn_phy_lp_set_rccap(mac); return; } BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f); } static void bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t iso, tmp[3]; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) iso = plp->plp_txisoband_m; else if (freq <= 5320) iso = plp->plp_txisoband_l; else if (freq <= 5700) iso = plp->plp_txisoband_m; else iso = plp->plp_txisoband_h; tmp[0] = ((iso - 26) / 12) << 12; tmp[1] = tmp[0] + 0x1000; tmp[2] = tmp[0] + 0x2000; bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp); } static void bwn_phy_lp_digflt_save(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; static const uint16_t val[] = { 0xde5e, 0xe832, 0xe331, 0x4d26, 0x0026, 0x1420, 0x0020, 0xfe08, 0x0008, }; for (i = 0; i < N(addr); i++) { plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]); BWN_PHY_WRITE(mac, addr[i], val[i]); } } static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD); switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) { case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW; break; default: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN; device_printf(sc->sc_dev, "unknown command mode\n"); break; } } static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t ctl; uint8_t old; bwn_phy_lp_get_txpctlmode(mac); old = plp->plp_txpctlmode; if (old == mode) return; plp->plp_txpctlmode = mode; if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80, plp->plp_tssiidx); BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM, 0x8fff, ((uint16_t)plp->plp_tssinpt << 16)); /* disable TX GAIN override */ if (mac->mac_phy.rev < 2) BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf); plp->plp_txpwridx = -1; } if (mac->mac_phy.rev >= 2) { if (mode == BWN_PHYLP_TXPCTL_ON_HW) BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2); else BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd); } /* writes TX Power Control mode */ switch (plp->plp_txpctlmode) { case BWN_PHYLP_TXPCTL_OFF: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF; break; case BWN_PHYLP_TXPCTL_ON_HW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW; break; case BWN_PHYLP_TXPCTL_ON_SW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW; break; default: ctl = 0; KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, (uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl); } static void bwn_phy_lp_bugfix(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const unsigned int size = 256; struct bwn_txgain tg; uint32_t rxcomp, txgain, coeff, rfpwr, *tabs; uint16_t tssinpt, tssiidx, value[2]; uint8_t mode; int8_t txpwridx; tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF, M_NOWAIT | M_ZERO); if (tabs == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer.\n"); return; } bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; txpwridx = plp->plp_txpwridx; tssinpt = plp->plp_tssinpt; tssiidx = plp->plp_tssiidx; bwn_tab_read_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); bwn_phy_lp_tblinit(mac); bwn_phy_lp_bbinit(mac); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_rf_onoff(mac, 1); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); bwn_tab_write_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan); plp->plp_tssinpt = tssinpt; plp->plp_tssiidx = tssiidx; bwn_phy_lp_set_anafilter(mac, plp->plp_chan); if (txpwridx != -1) { /* set TX power by index */ plp->plp_txpwridx = txpwridx; bwn_phy_lp_get_txpctlmode(mac); if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW); if (mac->mac_phy.rev >= 2) { rxcomp = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 192)); tg.tg_pad = (txgain >> 16) & 0xff; tg.tg_gm = txgain & 0xff; tg.tg_pga = (txgain >> 8) & 0xff; tg.tg_dac = (rxcomp >> 28) & 0xff; bwn_phy_lp_set_txgain(mac, &tg); } else { rxcomp = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 192)); BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (txgain >> 4) & 0x7fff); bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7); bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f); } bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff); /* set TX IQCC */ value[0] = (rxcomp >> 10) & 0x3ff; value[1] = rxcomp & 0x3ff; bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value); coeff = bwn_tab_read(mac, (mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) : BWN_TAB_4(10, txpwridx + 448)); bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff); if (mac->mac_phy.rev >= 2) { rfpwr = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 576)); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, rfpwr & 0xffff); } bwn_phy_lp_set_txgain_override(mac); } if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); bwn_phy_lp_set_antenna(mac, plp->plp_antenna); bwn_phy_lp_set_txpctlmode(mac, mode); free(tabs, M_DEVBUF); } static void bwn_phy_lp_digflt_restore(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; for (i = 0; i < N(addr); i++) BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]); } static void bwn_phy_lp_tblinit(struct bwn_mac *mac) { uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0); if (mac->mac_phy.rev < 2) { bwn_phy_lp_tblinit_r01(mac); bwn_phy_lp_tblinit_txgain(mac); bwn_phy_lp_set_gaintbl(mac, freq); return; } bwn_phy_lp_tblinit_r2(mac); bwn_phy_lp_tblinit_txgain(mac); } struct bwn_wpair { uint16_t reg; uint16_t value; }; struct bwn_smpair { uint16_t offset; uint16_t mask; uint16_t set; }; static void bwn_phy_lp_bbinit_r2(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_wpair v1[] = { { BWN_PHY_AFE_DAC_CTL, 0x50 }, { BWN_PHY_AFE_CTL, 0x8800 }, { BWN_PHY_AFE_CTL_OVR, 0 }, { BWN_PHY_AFE_CTL_OVRVAL, 0 }, { BWN_PHY_RF_OVERRIDE_0, 0 }, { BWN_PHY_RF_OVERRIDE_2, 0 }, { BWN_PHY_OFDM(0xf9), 0 }, { BWN_PHY_TR_LOOKUP_1, 0 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f }, { BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 }, { BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0xff00, 0x19 }, { BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 }, { BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 }, { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 }, { BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 }, }; int i; for (i = 0; i < N(v1); i++) BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value); BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10); for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1); if (siba_get_pci_revid(sc->sc_dev) >= 0x18) { bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14); } else { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10); } BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100); BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10); BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9); BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0); BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa); } else { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd); } for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0); bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44); BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1, 0x2000 | ((uint16_t)plp->plp_rssigs << 10) | ((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400); } bwn_phy_lp_digflt_save(mac); } static void bwn_phy_lp_bbinit_r01(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_smpair v1[] = { { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 }, { BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 }, { BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 }, { BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a }, { BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 }, { BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 } }; static const struct bwn_smpair v4[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 } }; static const struct bwn_smpair v5[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 } }; int i; uint16_t tmp, tmp2; BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0); BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800); BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400); BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006); BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe); for (i = 0; i < N(v1); i++) BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) && ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) || (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) { siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28); siba_cc_pmu_set_ldoparef(sc->sc_dev, 1); if (mac->mac_phy.rev == 0) BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0010); bwn_tab_write(mac, BWN_TAB_2(11, 7), 60); } else { siba_cc_pmu_set_ldoparef(sc->sc_dev, 0); BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020); bwn_tab_write(mac, BWN_TAB_2(11, 7), 100); } tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000; BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp); if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV) BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa); else BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa); bwn_tab_write(mac, BWN_TAB_2(11, 1), 24); BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfff9, (plp->plp_bxarch << 1)); if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) { for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) || (siba_get_pci_subdevice(sc->sc_dev) == 0x048a) || ((mac->mac_phy.rev == 0) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) { for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); } else if (mac->mac_phy.rev == 1 || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) { for (i = 0; i < N(v4); i++) BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask, v4[i].set); } else { for (i = 0; i < N(v5); i++) BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask, v5[i].set); } if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) { BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4); } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) && (siba_get_chipid(sc->sc_dev) == 0x5354) && (siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else { BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf); } if (mac->mac_phy.rev == 1) { tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH); tmp2 = (tmp & 0x03e0) >> 5; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH); tmp2 = (tmp & 0x1f00) >> 8; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB); tmp2 = tmp & 0x00ff; tmp2 |= tmp << 8; BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2); } } struct bwn_b2062_freq { uint16_t freq; uint8_t value[6]; }; static void bwn_phy_lp_b2062_init(struct bwn_mac *mac) { #define CALC_CTL7(freq, div) \ (((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff) #define CALC_CTL18(freq, div) \ ((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff) #define CALC_CTL19(freq, div) \ ((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff) struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b2062_freq freqdata_tab[] = { { 12000, { 6, 6, 6, 6, 10, 6 } }, { 13000, { 4, 4, 4, 4, 11, 7 } }, { 14400, { 3, 3, 3, 3, 12, 7 } }, { 16200, { 3, 3, 3, 3, 13, 8 } }, { 18000, { 2, 2, 2, 2, 14, 8 } }, { 19200, { 1, 1, 1, 1, 14, 9 } } }; static const struct bwn_wpair v1[] = { { BWN_B2062_N_TXCTL3, 0 }, { BWN_B2062_N_TXCTL4, 0 }, { BWN_B2062_N_TXCTL5, 0 }, { BWN_B2062_N_TXCTL6, 0 }, { BWN_B2062_N_PDNCTL0, 0x40 }, { BWN_B2062_N_PDNCTL0, 0 }, { BWN_B2062_N_CALIB_TS, 0x10 }, { BWN_B2062_N_CALIB_TS, 0 } }; const struct bwn_b2062_freq *f = NULL; uint32_t xtalfreq, ref; unsigned int i; bwn_phy_lp_b2062_tblinit(mac); for (i = 0; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); if (mac->mac_phy.rev > 0) BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1, (BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1); else BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1); KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU, ("%s:%d: fail", __func__, __LINE__)); xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000; KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__)); if (xtalfreq <= 30000000) { plp->plp_div = 1; BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb); } else { plp->plp_div = 2; BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4); } BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7, CALC_CTL7(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18, CALC_CTL18(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19, CALC_CTL19(xtalfreq, plp->plp_div)); ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div); ref &= 0xffff; for (i = 0; i < N(freqdata_tab); i++) { if (ref < freqdata_tab[i].freq) { f = &freqdata_tab[i]; break; } } if (f == NULL) f = &freqdata_tab[N(freqdata_tab) - 1]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8, ((uint16_t)(f->value[1]) << 4) | f->value[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9, ((uint16_t)(f->value[3]) << 4) | f->value[2]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]); #undef CALC_CTL7 #undef CALC_CTL18 #undef CALC_CTL19 } static void bwn_phy_lp_b2063_init(struct bwn_mac *mac) { bwn_phy_lp_b2063_tblinit(mac); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0); BWN_RF_SET(mac, BWN_B2063_COM8, 0x38); BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56); BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2); BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40); if (mac->mac_phy.rev == 2) { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18); } else { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20); } } static void bwn_phy_lp_rxcal_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; static const struct bwn_wpair v1[] = { { BWN_B2063_RX_BB_SP8, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x15 }, { BWN_B2063_RC_CALIB_CTL3, 0x70 }, { BWN_B2063_RC_CALIB_CTL4, 0x52 }, { BWN_B2063_RC_CALIB_CTL5, 0x1 }, { BWN_B2063_RC_CALIB_CTL1, 0x7d } }; static const struct bwn_wpair v2[] = { { BWN_B2063_TX_BB_SP3, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x55 }, { BWN_B2063_RC_CALIB_CTL3, 0x76 } }; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; int i; uint8_t tmp; tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff; for (i = 0; i < 2; i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7); for (i = 2; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp); tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff; for (i = 0; i < N(v2); i++) BWN_RF_WRITE(mac, v2[i].reg, v2[i].value); if (freqxtal == 24000000) { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0); } else { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1); } BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e); } static void bwn_phy_lp_rccal_r12(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp_iq_est ie; struct bwn_txgain tx_gains; static const uint32_t pwrtbl[21] = { 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, 0x0004c, 0x0002c, 0x0001a, }; uint32_t npwr, ipwr, sqpwr, tmp; int loopback, i, j, sum, error; uint16_t save[7]; uint8_t txo, bbmult, txpctlmode; error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel to 7 (%d)\n", error); txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0; bbmult = bwn_phy_lp_get_bbmult(mac); if (txo) tx_gains = bwn_phy_lp_get_txgain(mac); save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0); save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0); save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR); save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL); save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2); save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL); save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL); bwn_phy_lp_get_txpctlmode(mac); txpctlmode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); /* disable CRS */ bwn_phy_lp_set_deaf(mac, 1); bwn_phy_lp_set_trsw_over(mac, 0, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff); loopback = bwn_phy_lp_loopback(mac); if (loopback == -1) goto done; bwn_phy_lp_set_rxgain_idx(mac, loopback); BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0); tmp = 0; memset(&ie, 0, sizeof(ie)); for (i = 128; i <= 159; i++) { BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i); sum = 0; for (j = 5; j <= 25; j++) { bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) goto done; sqpwr = ie.ie_ipwr + ie.ie_qpwr; ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1; npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0, 12); sum += ((ipwr - npwr) * (ipwr - npwr)); if ((i == 128) || (sum < tmp)) { plp->plp_rccap = i; tmp = sum; } } } bwn_phy_lp_ddfs_turnoff(mac); done: /* restore CRS */ bwn_phy_lp_clear_deaf(mac, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]); BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]); bwn_phy_lp_set_bbmult(mac, bbmult); if (txo) bwn_phy_lp_set_txgain(mac, &tx_gains); bwn_phy_lp_set_txpctlmode(mac, txpctlmode); if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); } static void bwn_phy_lp_set_rccap(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1; if (mac->mac_phy.rev == 1) rc_cap = MIN(rc_cap + 5, 15); BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, MAX(plp->plp_rccap - 4, 0x80)); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80); BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16, ((plp->plp_rccap & 0x1f) >> 2) | 0x80); } static uint32_t bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre) { uint32_t i, q, r; if (div == 0) return (0); for (i = 0, q = value / div, r = value % div; i < pre; i++) { q <<= 1; if (r << 1 >= div) { q++; r = (r << 1) - div; } } if (r << 1 >= div) q++; return (q); } static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff); DELAY(20); if (siba_get_chipid(sc->sc_dev) == 0x5354) { BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4); } else { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0); } DELAY(5); } static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62); DELAY(200); } static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = { { BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, }, { BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, }, { BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, }, { BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, }, { BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2062_init_tab); i++) { br = &bwn_b2062_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = { { BWN_B2063_COM1, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM10, 0x1, 0x0, FLAG_A, }, { BWN_B2063_COM16, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM17, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM18, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM19, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM20, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM21, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM22, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM23, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM24, 0x0, 0x0, FLAG_G, }, { BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, }, { BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, }, { BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, }, { BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, }, { BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, }, { BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, }, { BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, }, { BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, }, { BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, }, { BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2063_init_tab); i++) { br = &bwn_b2063_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, void *_data) { unsigned int i; uint32_t offset, type; uint8_t *data = _data; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: *data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; data++; break; case BWN_TAB_16BIT: *((uint16_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 2; break; case BWN_TAB_32BIT: *((uint32_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); *((uint32_t *)data) <<= 16; *((uint32_t *)data) |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 4; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static void bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, const void *_data) { uint32_t offset, type, value; const uint8_t *data = _data; unsigned int i; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: value = *data; data++; KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: value = *((const uint16_t *)data); data += 2; KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: value = *((const uint32_t *)data); data += 4; BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *mac) { struct bwn_txgain tg; uint16_t tmp; tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7; if (mac->mac_phy.rev < 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff; tg.tg_gm = tmp & 0x0007; tg.tg_pga = (tmp & 0x0078) >> 3; tg.tg_pad = (tmp & 0x780) >> 7; return (tg); } tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL); tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff; tg.tg_gm = tmp & 0xff; tg.tg_pga = (tmp >> 8) & 0xff; return (tg); } static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *mac) { return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8; } static void bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg) { uint16_t pa; if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); return; } pa = bwn_phy_lp_get_pa_gain(mac); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000, tg->tg_pad | (pa << 6)); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000, tg->tg_pad | (pa << 8)); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); } static void bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult) { bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8); } static void bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx) { uint16_t trsw = (tx << 1) | rx; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3); } static void bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp; if (mac->mac_phy.rev < 2) { trsw = gain & 0x1; lna = (gain & 0xfffc) | ((gain & 0xc) >> 2); ext_lna = (gain & 2) >> 1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff, ext_lna << 11); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); } else { low_gain = gain & 0xffff; high_gain = (gain >> 16) & 0xf; ext_lna = (gain >> 21) & 0x1; trsw = ~(gain >> 20) & 0x1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff, ext_lna << 9); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { tmp = (gain >> 2) & 0x3; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xe7ff, tmp<<11); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7, tmp << 3); } } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); if (mac->mac_phy.rev >= 2) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400); BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8); } return; } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200); } static void bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; if (user) plp->plp_crsusr_off = 1; else plp->plp_crssys_off = 1; BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80); } static void bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (user) plp->plp_crsusr_off = 0; else plp->plp_crssys_off = 0; if (plp->plp_crsusr_off || plp->plp_crssys_off) return; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60); else BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20); } static unsigned int bwn_sqrt(struct bwn_mac *mac, unsigned int x) { /* Table holding (10 * sqrt(x)) for x between 1 and 256. */ static uint8_t sqrt_table[256] = { 10, 14, 17, 20, 22, 24, 26, 28, 30, 31, 33, 34, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70, 70, 71, 72, 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79, 80, 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 100, 100, 100, 101, 101, 102, 102, 103, 103, 104, 104, 105, 105, 106, 106, 107, 107, 108, 108, 109, 109, 110, 110, 110, 111, 111, 112, 112, 113, 113, 114, 114, 114, 115, 115, 116, 116, 117, 117, 117, 118, 118, 119, 119, 120, 120, 120, 121, 121, 122, 122, 122, 123, 123, 124, 124, 124, 125, 125, 126, 126, 126, 127, 127, 128, 128, 128, 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 133, 133, 133, 134, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, 150, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154, 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160 }; if (x == 0) return (0); if (x >= 256) { unsigned int tmp; for (tmp = 0; x >= (2 * tmp) + 1; x -= (2 * tmp++) + 1) /* do nothing */ ; return (tmp); } return (sqrt_table[x - 1] / 10); } static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample) { #define CALC_COEFF(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 20; \ if (_t >= 0) { \ _v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \ } else { \ _v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \ } \ } while (0) #define CALC_COEFF2(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 11; \ if (_t >= 0) \ _v = (_y << (31 - _x)) / (_z >> _t); \ else \ _v = (_y << (31 - _x)) / (_z << -_t); \ } while (0) struct bwn_phy_lp_iq_est ie; uint16_t v0, v1; int tmp[2], ret; v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S); v0 = v1 >> 8; v1 |= 0xff; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0); BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff); ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie); if (ret == 0) goto done; if (ie.ie_ipwr + ie.ie_qpwr < 2) { ret = 0; goto done; } CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr); CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr); tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0])); v0 = tmp[0] >> 3; v1 = tmp[1] >> 4; done: BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8); return ret; #undef CALC_COEFF #undef CALC_COEFF2 } static void bwn_phy_lp_tblinit_r01(struct bwn_mac *mac) { static const uint16_t noisescale[] = { 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36, }; static const uint16_t crsgainnft[] = { 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, }; static const uint16_t filterctl[] = { 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, 0x0127, }; static const uint32_t psctl[] = { 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, }; static const uint16_t ofdmcckgain_r0[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t ofdmcckgain_r1[] = { 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t gaindelta[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint32_t txpwrctl[] = { 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft); bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); if (mac->mac_phy.rev == 0) { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); } else { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); } bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl); } static void bwn_phy_lp_tblinit_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; static const uint16_t noisescale[] = { 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4 }; static const uint32_t filterctl[] = { 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f }; static const uint32_t psctl[] = { 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002 }; static const uint32_t gainidx[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a }; static const uint16_t auxgainidx[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016 }; static const uint16_t swctl[] = { 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018 }; static const uint8_t hf[] = { 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17 }; static const uint32_t gainval[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, 0x00000000, 0x00000000 }; static const uint16_t gain[] = { 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; static const uint32_t papdeps[] = { 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506 }; static const uint32_t papdmult[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint32_t gainidx_a0[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint16_t auxgainidx_a0[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014 }; static const uint32_t gainval_a0[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, 0x00000000, 0x00000000 }; static const uint16_t gain_a0[] = { 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < 704; i++) bwn_tab_write(mac, BWN_TAB_4(7, i), 0); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx); bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl); bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0), gainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0), auxgainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0), gainval_a0); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0); } } static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static struct bwn_txgain_entry txgain_r2[] = { { 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 }, { 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 }, { 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 }, { 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 }, { 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 }, { 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 }, { 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 }, { 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 }, { 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 }, { 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 }, { 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 }, { 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 }, { 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 }, { 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 }, { 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 }, { 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 }, { 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 }, { 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 }, { 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 }, { 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 }, { 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 }, }; static struct bwn_txgain_entry txgain_2ghz_r2[] = { { 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 }, { 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 }, { 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 }, { 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 }, { 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 }, { 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 }, { 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 }, { 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 }, { 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 }, { 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 }, { 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 }, { 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 }, { 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 }, { 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 }, { 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 }, { 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 }, { 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 }, { 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 }, { 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 }, { 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 }, { 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 }, { 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 }, { 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 }, { 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 }, { 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 }, { 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 }, { 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 }, { 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 }, { 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 }, { 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 }, { 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 }, { 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 }, { 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 }, { 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 }, { 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 }, { 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 }, { 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 }, { 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 }, { 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 }, { 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 }, }; static struct bwn_txgain_entry txgain_5ghz_r2[] = { { 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 }, { 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 }, { 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 }, { 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 }, { 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 }, { 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 }, { 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 }, { 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 }, { 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 }, { 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 }, { 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 }, { 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 }, { 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 }, { 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 }, { 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 }, { 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 }, { 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 }, { 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 }, { 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 }, { 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 }, { 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 } }; static struct bwn_txgain_entry txgain_r0[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r0[] = { { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }, { 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 }, { 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 }, { 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 }, { 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 }, { 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 }, { 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 }, { 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 }, { 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 }, { 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 }, { 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 }, { 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 }, { 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 }, { 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 }, { 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 }, { 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 }, { 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 }, { 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 }, { 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 }, { 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 }, { 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 }, { 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 }, { 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 }, { 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 }, { 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 }, { 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 }, { 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 }, { 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 }, { 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 }, { 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 }, { 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 } }; static struct bwn_txgain_entry txgain_5ghz_r0[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_r1[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r1[] = { { 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 }, { 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 }, { 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 }, { 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 }, { 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 }, { 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 }, { 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 }, { 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 }, { 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 }, { 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 }, { 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 }, { 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 }, { 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 }, { 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 }, { 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 }, { 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 }, { 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 }, { 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 }, { 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 }, { 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 }, { 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 }, { 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 }, { 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 }, { 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 }, { 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 }, { 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 }, { 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 }, { 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 }, { 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 }, { 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 }, { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_5ghz_r1[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) { if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r2); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r2); return; } if (mac->mac_phy.rev == 0) { if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r0); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r0); return; } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1); } static void bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value) { uint32_t offset, type; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static int bwn_phy_lp_loopback(struct bwn_mac *mac) { struct bwn_phy_lp_iq_est ie; int i, index = -1; uint32_t tmp; memset(&ie, 0, sizeof(ie)); bwn_phy_lp_set_trsw_over(mac, 1, 1); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80); for (i = 0; i < 32; i++) { bwn_phy_lp_set_rxgain_idx(mac, i); bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) continue; tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000; if ((tmp > 4000) && (tmp < 10000)) { index = i; break; } } bwn_phy_lp_ddfs_turnoff(mac); return (index); } static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx) { bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx))); } static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on, int incr1, int incr2, int scale_idx) { bwn_phy_lp_ddfs_turnoff(mac); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2); BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20); } static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time, struct bwn_phy_lp_iq_est *ie) { int i; BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7); BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample); BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time); BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff); BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); for (i = 0; i < 500; i++) { if (!(BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) break; DELAY(1000); } if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 0; } ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR); ie->ie_iqprod <<= 16; ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR); ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR); ie->ie_ipwr <<= 16; ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR); ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR); ie->ie_qpwr <<= 16; ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 1; } static uint32_t bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset) { uint32_t offset, type, value; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; break; case BWN_TAB_16BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); value <<= 16; value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); value = 0; } return (value); } static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac) { BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd); BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf); } static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac) { uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f; ctl |= dac << 7; BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl); } static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8); } static void bwn_phy_lp_set_txgain_override(struct bwn_mac *mac) { if (mac->mac_phy.rev < 2) BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000); } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40); } static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *mac) { return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f; } static uint8_t bwn_nbits(int32_t val) { uint32_t tmp; uint8_t nbits = 0; for (tmp = abs(val); tmp != 0; tmp >>= 1) nbits++; return (nbits); } static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count, struct bwn_txgain_entry *table) { int i; for (i = offset; i < count; i++) bwn_phy_lp_gaintbl_write(mac, i, table[i]); } static void bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset, struct bwn_txgain_entry data) { if (mac->mac_phy.rev >= 2) bwn_phy_lp_gaintbl_write_r2(mac, offset, data); else bwn_phy_lp_gaintbl_write_r01(mac, offset, data); } static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__)); tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm; if (mac->mac_phy.rev >= 3) { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x10 << 24) : (0x70 << 24)); } else { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x14 << 24) : (0x7f << 24)); } bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp); bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset), te.te_bbmult << 20 | te.te_dac << 28); } static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset), (te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) | te.te_dac); bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); diff --git a/sys/dev/cy/cy.c b/sys/dev/cy/cy.c index bfb83dfdcf6a..ad4fa145cf7e 100644 --- a/sys/dev/cy/cy.c +++ b/sys/dev/cy/cy.c @@ -1,2242 +1,2242 @@ /*- * cyclades cyclom-y serial driver * Andrew Herbert , 17 August 1993 * * Copyright (c) 1993 Andrew Herbert. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Andrew Herbert may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL I BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" /* * TODO: * Atomic COR change. * Consoles. */ /* * Temporary compile-time configuration options. */ #define RxFifoThreshold (CD1400_RX_FIFO_SIZE / 2) /* Number of chars in the receiver FIFO before an * an interrupt is generated. Should depend on * line speed. Needs to be about 6 on a 486DX33 * for 4 active ports at 115200 bps. Why doesn't * 10 work? */ #define PollMode /* Use polling-based irq service routine, not the * hardware svcack lines. Must be defined for * Cyclom-16Y boards. Less efficient for Cyclom-8Ys, * and stops 4 * 115200 bps from working. */ #undef Smarts /* Enable slightly more CD1400 intelligence. Mainly * the output CR/LF processing, plus we can avoid a * few checks usually done in ttyinput(). * * XXX not fully implemented, and not particularly * worthwhile. */ #undef CyDebug /* Include debugging code (not very expensive). */ /* These will go away. */ #undef SOFT_CTS_OFLOW #define SOFT_HOTCHAR #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NCY 10 /* KLUDGE */ #define NPORTS (NCY * CY_MAX_PORTS) #define CY_MAX_PORTS (CD1400_NO_OF_CHANNELS * CY_MAX_CD1400s) /* We encode the cyclom unit number (cyu) in spare bits in the IVR's. */ #define CD1400_xIVR_CHAN_SHIFT 3 #define CD1400_xIVR_CHAN 0x1F /* * ETC states. com->etc may also contain a hardware ETC command value, * meaning that execution of that command is pending. */ #define ETC_NONE 0 /* we depend on bzero() setting this */ #define ETC_BREAK_STARTING 1 #define ETC_BREAK_STARTED 2 #define ETC_BREAK_ENDING 3 #define ETC_BREAK_ENDED 4 #define LOTS_OF_EVENTS 64 /* helps separate urgent events from input */ /* * com state bits. * (CS_BUSY | CS_TTGO) and (CS_BUSY | CS_TTGO | CS_ODEVREADY) must be higher * than the other bits so that they can be tested as a group without masking * off the low bits. * * The following com and tty flags correspond closely: * CS_BUSY = TS_BUSY (maintained by cystart(), cypoll() and * comstop()) * CS_TTGO = ~TS_TTSTOP (maintained by cyparam() and cystart()) * CS_CTS_OFLOW = CCTS_OFLOW (maintained by cyparam()) * CS_RTS_IFLOW = CRTS_IFLOW (maintained by cyparam()) * TS_FLUSH is not used. * XXX I think TIOCSETA doesn't clear TS_TTSTOP when it clears IXON. * XXX CS_*FLOW should be CF_*FLOW in com->flags (control flags not state). */ #define CS_BUSY 0x80 /* output in progress */ #define CS_TTGO 0x40 /* output not stopped by XOFF */ #define CS_ODEVREADY 0x20 /* external device h/w ready (CTS) */ #define CS_CHECKMSR 1 /* check of MSR scheduled */ #define CS_CTS_OFLOW 2 /* use CTS output flow control */ #define CS_ODONE 4 /* output completed */ #define CS_RTS_IFLOW 8 /* use RTS input flow control */ #define CSE_ODONE 1 /* output transmitted */ static char const * const error_desc[] = { #define CE_OVERRUN 0 "silo overflow", #define CE_INTERRUPT_BUF_OVERFLOW 1 "interrupt-level buffer overflow", #define CE_TTY_BUF_OVERFLOW 2 "tty-level buffer overflow", }; #define CE_NTYPES 3 #define CE_RECORD(com, errnum) (++(com)->delta_error_counts[errnum]) #ifdef SMP #define COM_LOCK() mtx_lock_spin(&cy_lock) #define COM_UNLOCK() mtx_unlock_spin(&cy_lock) #else #define COM_LOCK() #define COM_UNLOCK() #endif /* types. XXX - should be elsewhere */ typedef u_char bool_t; /* boolean */ /* queue of linear buffers */ struct lbq { u_char *l_head; /* next char to process */ u_char *l_tail; /* one past the last char to process */ struct lbq *l_next; /* next in queue */ bool_t l_queued; /* nonzero if queued */ }; /* com device structure */ struct com_s { u_char state; /* miscellaneous flag bits */ u_char etc; /* pending Embedded Transmit Command */ u_char extra_state; /* more flag bits, separate for order trick */ u_char gfrcr_image; /* copy of value read from GFRCR */ u_char mcr_dtr; /* MCR bit that is wired to DTR */ u_char mcr_image; /* copy of value written to MCR */ u_char mcr_rts; /* MCR bit that is wired to RTS */ int unit; /* unit number */ /* * The high level of the driver never reads status registers directly * because there would be too many side effects to handle conveniently. * Instead, it reads copies of the registers stored here by the * interrupt handler. */ u_char last_modem_status; /* last MSR read by intr handler */ u_char prev_modem_status; /* last MSR handled by high level */ u_char *ibuf; /* start of input buffer */ u_char *ibufend; /* end of input buffer */ u_char *ibufold; /* old input buffer, to be freed */ u_char *ihighwater; /* threshold in input buffer */ u_char *iptr; /* next free spot in input buffer */ int ibufsize; /* size of ibuf (not include error bytes) */ int ierroff; /* offset of error bytes in ibuf */ struct lbq obufq; /* head of queue of output buffers */ struct lbq obufs[2]; /* output buffers */ int cy_align; /* index for register alignment */ cy_addr cy_iobase; /* base address of this port's cyclom */ cy_addr iobase; /* base address of this port's cd1400 */ int mcr_rts_reg; /* cd1400 reg number of reg holding mcr_rts */ struct tty *tp; /* cross reference */ u_long bytes_in; /* statistics */ u_long bytes_out; u_int delta_error_counts[CE_NTYPES]; u_long error_counts[CE_NTYPES]; u_int recv_exception; /* exception chars received */ u_int mdm; /* modem signal changes */ #ifdef CyDebug u_int start_count; /* no. of calls to cystart() */ u_int start_real; /* no. of calls that did something */ #endif u_char car; /* CD1400 CAR shadow (if first unit in cd) */ u_char channel_control;/* CD1400 CCR control command shadow */ u_char cor[3]; /* CD1400 COR1-3 shadows */ u_char intr_enable; /* CD1400 SRER shadow */ /* * Data area for output buffers. Someday we should build the output * buffer queue without copying data. */ u_char obuf1[256]; u_char obuf2[256]; }; devclass_t cy_devclass; char cy_driver_name[] = "cy"; static void cd1400_channel_cmd(struct com_s *com, int cmd); static void cd1400_channel_cmd_wait(struct com_s *com); static void cd_etc(struct com_s *com, int etc); static int cd_getreg(struct com_s *com, int reg); static void cd_setreg(struct com_s *com, int reg, int val); static void cyinput(struct com_s *com); static int cyparam(struct tty *tp, struct termios *t); static void cypoll(void *arg); static void cysettimeout(void); static int cysetwater(struct com_s *com, speed_t speed); static int cyspeed(speed_t speed, u_long cy_clock, int *prescaler_io); static void cystart(struct tty *tp); static void comstop(struct tty *tp, int rw); static timeout_t cywakeup; static void disc_optim(struct tty *tp, struct termios *t, struct com_s *com); static t_break_t cybreak; static t_modem_t cymodem; static t_open_t cyopen; static t_close_t cyclose; #ifdef CyDebug void cystatus(int unit); #endif static struct mtx cy_lock; static int cy_inited; /* table and macro for fast conversion from a unit number to its com struct */ static struct com_s *p_cy_addr[NPORTS]; #define cy_addr(unit) (p_cy_addr[unit]) static u_int cy_events; /* input chars + weighted output completions */ static void *cy_fast_ih; static void *cy_slow_ih; static int cy_timeout; static int cy_timeouts_until_log; static struct callout_handle cy_timeout_handle = CALLOUT_HANDLE_INITIALIZER(&cy_timeout_handle); #ifdef CyDebug static u_int cd_inbs; static u_int cy_inbs; static u_int cd_outbs; static u_int cy_outbs; static u_int cy_svrr_probes; static u_int cy_timeouts; #endif static int cy_chip_offset[] = { 0x0000, 0x0400, 0x0800, 0x0c00, 0x0200, 0x0600, 0x0a00, 0x0e00, }; static int cy_nr_cd1400s[NCY]; static int cy_total_devices; #undef RxFifoThreshold static int volatile RxFifoThreshold = (CD1400_RX_FIFO_SIZE / 2); int cy_units(cy_addr cy_iobase, int cy_align) { int cyu; u_char firmware_version; int i; cy_addr iobase; for (cyu = 0; cyu < CY_MAX_CD1400s; ++cyu) { iobase = cy_iobase + (cy_chip_offset[cyu] << cy_align); /* wait for chip to become ready for new command */ for (i = 0; i < 10; i++) { DELAY(50); if (!cd_inb(iobase, CD1400_CCR, cy_align)) break; } /* clear the GFRCR register */ cd_outb(iobase, CD1400_GFRCR, cy_align, 0); /* issue a reset command */ cd_outb(iobase, CD1400_CCR, cy_align, CD1400_CCR_CMDRESET | CD1400_CCR_FULLRESET); /* XXX bogus initialization to avoid a gcc bug/warning. */ firmware_version = 0; /* wait for the CD1400 to initialize itself */ for (i = 0; i < 200; i++) { DELAY(50); /* retrieve firmware version */ firmware_version = cd_inb(iobase, CD1400_GFRCR, cy_align); if ((firmware_version & 0xf0) == 0x40) break; } /* * Anything in the 0x40-0x4F range is fine. * If one CD1400 is bad then we don't support higher * numbered good ones on this board. */ if ((firmware_version & 0xf0) != 0x40) break; } return (cyu); } void * cyattach_common(cy_addr cy_iobase, int cy_align) { int adapter; int cyu; u_char firmware_version; cy_addr iobase; int ncyu; int unit; struct tty *tp; while (cy_inited != 2) if (atomic_cmpset_int(&cy_inited, 0, 1)) { mtx_init(&cy_lock, cy_driver_name, NULL, MTX_SPIN); atomic_store_rel_int(&cy_inited, 2); } adapter = cy_total_devices; if ((u_int)adapter >= NCY) { printf( "cy%d: can't attach adapter: insufficient cy devices configured\n", adapter); return (NULL); } ncyu = cy_units(cy_iobase, cy_align); if (ncyu == 0) return (NULL); cy_nr_cd1400s[adapter] = ncyu; cy_total_devices++; unit = adapter * CY_MAX_PORTS; for (cyu = 0; cyu < ncyu; ++cyu) { int cdu; iobase = (cy_addr) (cy_iobase + (cy_chip_offset[cyu] << cy_align)); firmware_version = cd_inb(iobase, CD1400_GFRCR, cy_align); /* Set up a receive timeout period of than 1+ ms. */ cd_outb(iobase, CD1400_PPR, cy_align, howmany(CY_CLOCK(firmware_version) / CD1400_PPR_PRESCALER, 1000)); for (cdu = 0; cdu < CD1400_NO_OF_CHANNELS; ++cdu, ++unit) { struct com_s *com; int s; com = malloc(sizeof *com, M_DEVBUF, M_NOWAIT | M_ZERO); if (com == NULL) break; com->unit = unit; com->gfrcr_image = firmware_version; if (CY_RTS_DTR_SWAPPED(firmware_version)) { com->mcr_dtr = CD1400_MSVR1_RTS; com->mcr_rts = CD1400_MSVR2_DTR; com->mcr_rts_reg = CD1400_MSVR2; } else { com->mcr_dtr = CD1400_MSVR2_DTR; com->mcr_rts = CD1400_MSVR1_RTS; com->mcr_rts_reg = CD1400_MSVR1; } com->obufs[0].l_head = com->obuf1; com->obufs[1].l_head = com->obuf2; com->cy_align = cy_align; com->cy_iobase = cy_iobase; com->iobase = iobase; com->car = ~CD1400_CAR_CHAN; tp = com->tp = ttyalloc(); tp->t_open = cyopen; tp->t_close = cyclose; tp->t_oproc = cystart; tp->t_stop = comstop; tp->t_param = cyparam; tp->t_break = cybreak; tp->t_modem = cymodem; tp->t_sc = com; if (cysetwater(com, tp->t_init_in.c_ispeed) != 0) { free(com, M_DEVBUF); return (NULL); } s = spltty(); cy_addr(unit) = com; splx(s); if (cy_fast_ih == NULL) { swi_add(&tty_intr_event, "cy", cypoll, NULL, SWI_TTY, 0, &cy_fast_ih); swi_add(&clk_intr_event, "cy", cypoll, NULL, SWI_CLOCK, 0, &cy_slow_ih); } ttycreate(tp, TS_CALLOUT, "c%r%r", adapter, unit % CY_MAX_PORTS); } } /* ensure an edge for the next interrupt */ cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0); return (cy_addr(adapter * CY_MAX_PORTS)); } static int cyopen(struct tty *tp, struct cdev *dev) { struct com_s *com; int s; com = tp->t_sc; s = spltty(); /* * We jump to this label after all non-interrupted sleeps to pick * up any changes of the device state. */ /* Encode per-board unit in LIVR for access in intr routines. */ cd_setreg(com, CD1400_LIVR, (com->unit & CD1400_xIVR_CHAN) << CD1400_xIVR_CHAN_SHIFT); /* * Flush fifos. This requires a full channel reset which * also disables the transmitter and receiver. Recover * from this. */ cd1400_channel_cmd(com, CD1400_CCR_CMDRESET | CD1400_CCR_CHANRESET); cd1400_channel_cmd(com, com->channel_control); critical_enter(); COM_LOCK(); com->prev_modem_status = com->last_modem_status = cd_getreg(com, CD1400_MSVR2); cd_setreg(com, CD1400_SRER, com->intr_enable = CD1400_SRER_MDMCH | CD1400_SRER_RXDATA); COM_UNLOCK(); critical_exit(); cysettimeout(); return (0); } static void cyclose(struct tty *tp) { cy_addr iobase; struct com_s *com; int s; int unit; com = tp->t_sc; unit = com->unit; iobase = com->iobase; s = spltty(); /* XXX */ critical_enter(); COM_LOCK(); com->etc = ETC_NONE; cd_setreg(com, CD1400_COR2, com->cor[1] &= ~CD1400_COR2_ETC); COM_UNLOCK(); critical_exit(); cd_etc(com, CD1400_ETC_STOPBREAK); cd1400_channel_cmd(com, CD1400_CCR_CMDRESET | CD1400_CCR_FTF); { critical_enter(); COM_LOCK(); cd_setreg(com, CD1400_SRER, com->intr_enable = 0); COM_UNLOCK(); critical_exit(); tp = com->tp; if ((tp->t_cflag & HUPCL) /* * XXX we will miss any carrier drop between here and the * next open. Perhaps we should watch DCD even when the * port is closed; it is not sufficient to check it at * the next open because it might go up and down while * we're not watching. */ || (!tp->t_actout && !(com->prev_modem_status & CD1400_MSVR2_CD) && !(tp->t_init_in.c_cflag & CLOCAL)) || !(tp->t_state & TS_ISOPEN)) { (void)cymodem(tp, 0, SER_DTR); /* Disable receiver (leave transmitter enabled). */ com->channel_control = CD1400_CCR_CMDCHANCTL | CD1400_CCR_XMTEN | CD1400_CCR_RCVDIS; cd1400_channel_cmd(com, com->channel_control); ttydtrwaitstart(tp); } } tp->t_actout = FALSE; wakeup(&tp->t_actout); wakeup(TSA_CARR_ON(tp)); /* restart any wopeners */ splx(s); } /* * This function: * a) needs to be called with COM_LOCK() held, and * b) needs to return with COM_LOCK() held. */ static void cyinput(struct com_s *com) { u_char *buf; int incc; u_char line_status; int recv_data; struct tty *tp; buf = com->ibuf; tp = com->tp; if (!(tp->t_state & TS_ISOPEN)) { cy_events -= (com->iptr - com->ibuf); com->iptr = com->ibuf; return; } if (tp->t_state & TS_CAN_BYPASS_L_RINT) { /* * Avoid the grotesquely inefficient lineswitch routine * (ttyinput) in "raw" mode. It usually takes about 450 * instructions (that's without canonical processing or echo!). * slinput is reasonably fast (usually 40 instructions plus * call overhead). */ do { /* * This may look odd, but it is using save-and-enable * semantics instead of the save-and-disable semantics * that are used everywhere else. */ COM_UNLOCK(); critical_exit(); incc = com->iptr - buf; if (tp->t_rawq.c_cc + incc > tp->t_ihiwat && (com->state & CS_RTS_IFLOW || tp->t_iflag & IXOFF) && !(tp->t_state & TS_TBLOCK)) ttyblock(tp); com->delta_error_counts[CE_TTY_BUF_OVERFLOW] += b_to_q((char *)buf, incc, &tp->t_rawq); buf += incc; tk_nin += incc; tk_rawcc += incc; tp->t_rawcc += incc; ttwakeup(tp); if (tp->t_state & TS_TTSTOP && (tp->t_iflag & IXANY || tp->t_cc[VSTART] == tp->t_cc[VSTOP])) { tp->t_state &= ~TS_TTSTOP; tp->t_lflag &= ~FLUSHO; cystart(tp); } critical_enter(); COM_LOCK(); } while (buf < com->iptr); } else { do { /* * This may look odd, but it is using save-and-enable * semantics instead of the save-and-disable semantics * that are used everywhere else. */ COM_UNLOCK(); critical_exit(); line_status = buf[com->ierroff]; recv_data = *buf++; if (line_status & (CD1400_RDSR_BREAK | CD1400_RDSR_FE | CD1400_RDSR_OE | CD1400_RDSR_PE)) { if (line_status & CD1400_RDSR_BREAK) recv_data |= TTY_BI; if (line_status & CD1400_RDSR_FE) recv_data |= TTY_FE; if (line_status & CD1400_RDSR_OE) recv_data |= TTY_OE; if (line_status & CD1400_RDSR_PE) recv_data |= TTY_PE; } ttyld_rint(tp, recv_data); critical_enter(); COM_LOCK(); } while (buf < com->iptr); } cy_events -= (com->iptr - com->ibuf); com->iptr = com->ibuf; /* * There is now room for another low-level buffer full of input, * so enable RTS if it is now disabled and there is room in the * high-level buffer. */ if ((com->state & CS_RTS_IFLOW) && !(com->mcr_image & com->mcr_rts) && !(tp->t_state & TS_TBLOCK)) cd_setreg(com, com->mcr_rts_reg, com->mcr_image |= com->mcr_rts); } int cyintr(void *vcom) { struct com_s *basecom; int baseu; int cy_align; cy_addr cy_iobase; int cyu; cy_addr iobase; u_char status; int unit; COM_LOCK(); /* XXX could this be placed down lower in the loop? */ basecom = (struct com_s *)vcom; baseu = basecom->unit; cy_align = basecom->cy_align; cy_iobase = basecom->cy_iobase; unit = baseu / CY_MAX_PORTS; /* check each CD1400 in turn */ for (cyu = 0; cyu < cy_nr_cd1400s[unit]; ++cyu) { iobase = (cy_addr) (cy_iobase + (cy_chip_offset[cyu] << cy_align)); /* poll to see if it has any work */ status = cd_inb(iobase, CD1400_SVRR, cy_align); if (status == 0) continue; // XXX - FILTER_STRAY? #ifdef CyDebug ++cy_svrr_probes; #endif /* service requests as appropriate, giving priority to RX */ if (status & CD1400_SVRR_RXRDY) { struct com_s *com; u_int count; u_char *ioptr; u_char line_status; u_char recv_data; u_char serv_type; #ifdef PollMode u_char save_rir; #endif #ifdef PollMode save_rir = cd_inb(iobase, CD1400_RIR, cy_align); /* enter rx service */ cd_outb(iobase, CD1400_CAR, cy_align, save_rir); cy_addr(baseu + cyu * CD1400_NO_OF_CHANNELS)->car = save_rir & CD1400_CAR_CHAN; serv_type = cd_inb(iobase, CD1400_RIVR, cy_align); com = cy_addr(baseu + ((serv_type >> CD1400_xIVR_CHAN_SHIFT) & CD1400_xIVR_CHAN)); #else /* ack receive service */ serv_type = cy_inb(iobase, CY8_SVCACKR, cy_align); com = cy_addr(baseu + + ((serv_type >> CD1400_xIVR_CHAN_SHIFT) & CD1400_xIVR_CHAN)); #endif if (serv_type & CD1400_RIVR_EXCEPTION) { ++com->recv_exception; line_status = cd_inb(iobase, CD1400_RDSR, cy_align); /* break/unnattached error bits or real input? */ recv_data = cd_inb(iobase, CD1400_RDSR, cy_align); #ifndef SOFT_HOTCHAR if (line_status & CD1400_RDSR_SPECIAL && com->tp->t_hotchar != 0) swi_sched(cy_fast_ih, 0); #endif #if 1 /* XXX "intelligent" PFO error handling would break O error handling */ if (line_status & (CD1400_RDSR_PE|CD1400_RDSR_FE|CD1400_RDSR_BREAK)) { /* Don't store PE if IGNPAR and BI if IGNBRK, this hack allows "raw" tty optimization works even if IGN* is set. */ if ( com->tp == NULL || !(com->tp->t_state & TS_ISOPEN) || ((line_status & (CD1400_RDSR_PE|CD1400_RDSR_FE)) && (com->tp->t_iflag & IGNPAR)) || ((line_status & CD1400_RDSR_BREAK) && (com->tp->t_iflag & IGNBRK))) goto cont; if ( (line_status & (CD1400_RDSR_PE|CD1400_RDSR_FE)) && (com->tp->t_state & TS_CAN_BYPASS_L_RINT) && ((line_status & CD1400_RDSR_FE) || ((line_status & CD1400_RDSR_PE) && (com->tp->t_iflag & INPCK)))) recv_data = 0; } #endif /* 1 */ ++com->bytes_in; #ifdef SOFT_HOTCHAR if (com->tp->t_hotchar != 0 && recv_data == com->tp->t_hotchar) swi_sched(cy_fast_ih, 0); #endif ioptr = com->iptr; if (ioptr >= com->ibufend) CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW); else { if (com->tp != NULL && com->tp->t_do_timestamp) microtime(&com->tp->t_timestamp); ++cy_events; ioptr[0] = recv_data; ioptr[com->ierroff] = line_status; com->iptr = ++ioptr; if (ioptr == com->ihighwater && com->state & CS_RTS_IFLOW) cd_outb(iobase, com->mcr_rts_reg, cy_align, com->mcr_image &= ~com->mcr_rts); if (line_status & CD1400_RDSR_OE) CE_RECORD(com, CE_OVERRUN); } goto cont; } else { int ifree; count = cd_inb(iobase, CD1400_RDCR, cy_align); if (!count) goto cont; com->bytes_in += count; ioptr = com->iptr; ifree = com->ibufend - ioptr; if (count > ifree) { count -= ifree; cy_events += ifree; if (ifree != 0) { if (com->tp != NULL && com->tp->t_do_timestamp) microtime(&com->tp->t_timestamp); do { recv_data = cd_inb(iobase, CD1400_RDSR, cy_align); #ifdef SOFT_HOTCHAR if (com->tp->t_hotchar != 0 && recv_data == com->tp->t_hotchar) swi_sched(cy_fast_ih, 0); #endif ioptr[0] = recv_data; ioptr[com->ierroff] = 0; ++ioptr; } while (--ifree != 0); } com->delta_error_counts [CE_INTERRUPT_BUF_OVERFLOW] += count; do { recv_data = cd_inb(iobase, CD1400_RDSR, cy_align); #ifdef SOFT_HOTCHAR if (com->tp->t_hotchar != 0 && recv_data == com->tp->t_hotchar) swi_sched(cy_fast_ih, 0); #endif } while (--count != 0); } else { if (com->tp != NULL && com->tp->t_do_timestamp) microtime(&com->tp->t_timestamp); if (ioptr <= com->ihighwater && ioptr + count > com->ihighwater && com->state & CS_RTS_IFLOW) cd_outb(iobase, com->mcr_rts_reg, cy_align, com->mcr_image &= ~com->mcr_rts); cy_events += count; do { recv_data = cd_inb(iobase, CD1400_RDSR, cy_align); #ifdef SOFT_HOTCHAR if (com->tp->t_hotchar != 0 && recv_data == com->tp->t_hotchar) swi_sched(cy_fast_ih, 0); #endif ioptr[0] = recv_data; ioptr[com->ierroff] = 0; ++ioptr; } while (--count != 0); } com->iptr = ioptr; } cont: /* terminate service context */ #ifdef PollMode cd_outb(iobase, CD1400_RIR, cy_align, save_rir & ~(CD1400_RIR_RDIREQ | CD1400_RIR_RBUSY)); #else cd_outb(iobase, CD1400_EOSRR, cy_align, 0); #endif } if (status & CD1400_SVRR_MDMCH) { struct com_s *com; u_char modem_status; #ifdef PollMode u_char save_mir; #else u_char vector; #endif #ifdef PollMode save_mir = cd_inb(iobase, CD1400_MIR, cy_align); /* enter modem service */ cd_outb(iobase, CD1400_CAR, cy_align, save_mir); cy_addr(baseu + cyu * CD1400_NO_OF_CHANNELS)->car = save_mir & CD1400_CAR_CHAN; com = cy_addr(baseu + cyu * CD1400_NO_OF_CHANNELS + (save_mir & CD1400_MIR_CHAN)); #else /* ack modem service */ vector = cy_inb(iobase, CY8_SVCACKM, cy_align); com = cy_addr(baseu + ((vector >> CD1400_xIVR_CHAN_SHIFT) & CD1400_xIVR_CHAN)); #endif ++com->mdm; modem_status = cd_inb(iobase, CD1400_MSVR2, cy_align); if (modem_status != com->last_modem_status) { /* * Schedule high level to handle DCD changes. Note * that we don't use the delta bits anywhere. Some * UARTs mess them up, and it's easy to remember the * previous bits and calculate the delta. */ com->last_modem_status = modem_status; if (!(com->state & CS_CHECKMSR)) { cy_events += LOTS_OF_EVENTS; com->state |= CS_CHECKMSR; swi_sched(cy_fast_ih, 0); } #ifdef SOFT_CTS_OFLOW /* handle CTS change immediately for crisp flow ctl */ if (com->state & CS_CTS_OFLOW) { if (modem_status & CD1400_MSVR2_CTS) { com->state |= CS_ODEVREADY; if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY) && !(com->intr_enable & CD1400_SRER_TXRDY)) cd_outb(iobase, CD1400_SRER, cy_align, com->intr_enable = com->intr_enable & ~CD1400_SRER_TXMPTY | CD1400_SRER_TXRDY); } else { com->state &= ~CS_ODEVREADY; if (com->intr_enable & CD1400_SRER_TXRDY) cd_outb(iobase, CD1400_SRER, cy_align, com->intr_enable = com->intr_enable & ~CD1400_SRER_TXRDY | CD1400_SRER_TXMPTY); } } #endif } /* terminate service context */ #ifdef PollMode cd_outb(iobase, CD1400_MIR, cy_align, save_mir & ~(CD1400_MIR_RDIREQ | CD1400_MIR_RBUSY)); #else cd_outb(iobase, CD1400_EOSRR, cy_align, 0); #endif } if (status & CD1400_SVRR_TXRDY) { struct com_s *com; #ifdef PollMode u_char save_tir; #else u_char vector; #endif #ifdef PollMode save_tir = cd_inb(iobase, CD1400_TIR, cy_align); /* enter tx service */ cd_outb(iobase, CD1400_CAR, cy_align, save_tir); cy_addr(baseu + cyu * CD1400_NO_OF_CHANNELS)->car = save_tir & CD1400_CAR_CHAN; com = cy_addr(baseu + cyu * CD1400_NO_OF_CHANNELS + (save_tir & CD1400_TIR_CHAN)); #else /* ack transmit service */ vector = cy_inb(iobase, CY8_SVCACKT, cy_align); com = cy_addr(baseu + ((vector >> CD1400_xIVR_CHAN_SHIFT) & CD1400_xIVR_CHAN)); #endif if (com->etc != ETC_NONE) { if (com->intr_enable & CD1400_SRER_TXRDY) { /* * Here due to sloppy SRER_TXRDY * enabling. Ignore. Come back when * tx is empty. */ cd_outb(iobase, CD1400_SRER, cy_align, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXRDY) | CD1400_SRER_TXMPTY); goto terminate_tx_service; } switch (com->etc) { case CD1400_ETC_SENDBREAK: case CD1400_ETC_STOPBREAK: /* * Start the command. Come back on * next tx empty interrupt, hopefully * after command has been executed. */ cd_outb(iobase, CD1400_COR2, cy_align, com->cor[1] |= CD1400_COR2_ETC); cd_outb(iobase, CD1400_TDR, cy_align, CD1400_ETC_CMD); cd_outb(iobase, CD1400_TDR, cy_align, com->etc); if (com->etc == CD1400_ETC_SENDBREAK) com->etc = ETC_BREAK_STARTING; else com->etc = ETC_BREAK_ENDING; goto terminate_tx_service; case ETC_BREAK_STARTING: /* * BREAK is now on. Continue with * SRER_TXMPTY processing, hopefully * don't come back. */ com->etc = ETC_BREAK_STARTED; break; case ETC_BREAK_STARTED: /* * Came back due to sloppy SRER_TXMPTY * enabling. Hope again. */ break; case ETC_BREAK_ENDING: /* * BREAK is now off. Continue with * SRER_TXMPTY processing and don't * come back. The SWI handler will * restart tx interrupts if necessary. */ cd_outb(iobase, CD1400_COR2, cy_align, com->cor[1] &= ~CD1400_COR2_ETC); com->etc = ETC_BREAK_ENDED; if (!(com->state & CS_ODONE)) { cy_events += LOTS_OF_EVENTS; com->state |= CS_ODONE; swi_sched(cy_fast_ih, 0); } break; case ETC_BREAK_ENDED: /* * Shouldn't get here. Hope again. */ break; } } if (com->intr_enable & CD1400_SRER_TXMPTY) { if (!(com->extra_state & CSE_ODONE)) { cy_events += LOTS_OF_EVENTS; com->extra_state |= CSE_ODONE; swi_sched(cy_fast_ih, 0); } cd_outb(iobase, CD1400_SRER, cy_align, com->intr_enable &= ~CD1400_SRER_TXMPTY); goto terminate_tx_service; } if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) { u_char *ioptr; u_int ocount; ioptr = com->obufq.l_head; ocount = com->obufq.l_tail - ioptr; if (ocount > CD1400_TX_FIFO_SIZE) ocount = CD1400_TX_FIFO_SIZE; com->bytes_out += ocount; do cd_outb(iobase, CD1400_TDR, cy_align, *ioptr++); while (--ocount != 0); com->obufq.l_head = ioptr; if (ioptr >= com->obufq.l_tail) { struct lbq *qp; qp = com->obufq.l_next; qp->l_queued = FALSE; qp = qp->l_next; if (qp != NULL) { com->obufq.l_head = qp->l_head; com->obufq.l_tail = qp->l_tail; com->obufq.l_next = qp; } else { /* output just completed */ com->state &= ~CS_BUSY; /* * The setting of CSE_ODONE may be * stale here. We currently only * use it when CS_BUSY is set, and * fixing it when we clear CS_BUSY * is easiest. */ if (com->extra_state & CSE_ODONE) { cy_events -= LOTS_OF_EVENTS; com->extra_state &= ~CSE_ODONE; } cd_outb(iobase, CD1400_SRER, cy_align, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXRDY) | CD1400_SRER_TXMPTY); } if (!(com->state & CS_ODONE)) { cy_events += LOTS_OF_EVENTS; com->state |= CS_ODONE; /* handle at high level ASAP */ swi_sched(cy_fast_ih, 0); } } } /* terminate service context */ terminate_tx_service: #ifdef PollMode cd_outb(iobase, CD1400_TIR, cy_align, save_tir & ~(CD1400_TIR_RDIREQ | CD1400_TIR_RBUSY)); #else cd_outb(iobase, CD1400_EOSRR, cy_align, 0); #endif } } /* ensure an edge for the next interrupt */ cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0); swi_sched(cy_slow_ih, SWI_DELAY); COM_UNLOCK(); return (FILTER_HANDLED); } static void cybreak(struct tty *tp, int sig) { struct com_s *com; com = tp->t_sc; if (sig) cd_etc(com, CD1400_ETC_SENDBREAK); else cd_etc(com, CD1400_ETC_STOPBREAK); } static void cypoll(void *arg) { int unit; #ifdef CyDebug ++cy_timeouts; #endif if (cy_events == 0) return; repeat: for (unit = 0; unit < NPORTS; ++unit) { struct com_s *com; int incc; struct tty *tp; com = cy_addr(unit); if (com == NULL) continue; tp = com->tp; if (tp == NULL) { /* * XXX forget any events related to closed devices * (actually never opened devices) so that we don't * loop. */ critical_enter(); COM_LOCK(); incc = com->iptr - com->ibuf; com->iptr = com->ibuf; if (com->state & CS_CHECKMSR) { incc += LOTS_OF_EVENTS; com->state &= ~CS_CHECKMSR; } cy_events -= incc; COM_UNLOCK(); critical_exit(); if (incc != 0) log(LOG_DEBUG, "cy%d: %d events for device with no tp\n", unit, incc); continue; } if (com->iptr != com->ibuf) { critical_enter(); COM_LOCK(); cyinput(com); COM_UNLOCK(); critical_exit(); } if (com->state & CS_CHECKMSR) { u_char delta_modem_status; critical_enter(); COM_LOCK(); cyinput(com); delta_modem_status = com->last_modem_status ^ com->prev_modem_status; com->prev_modem_status = com->last_modem_status; cy_events -= LOTS_OF_EVENTS; com->state &= ~CS_CHECKMSR; COM_UNLOCK(); critical_exit(); if (delta_modem_status & CD1400_MSVR2_CD) ttyld_modem(tp, com->prev_modem_status & CD1400_MSVR2_CD); } if (com->extra_state & CSE_ODONE) { critical_enter(); COM_LOCK(); cy_events -= LOTS_OF_EVENTS; com->extra_state &= ~CSE_ODONE; COM_UNLOCK(); critical_exit(); if (!(com->state & CS_BUSY)) { tp->t_state &= ~TS_BUSY; ttwwakeup(com->tp); } if (com->etc != ETC_NONE) { if (com->etc == ETC_BREAK_ENDED) com->etc = ETC_NONE; wakeup(&com->etc); } } if (com->state & CS_ODONE) { critical_enter(); COM_LOCK(); cy_events -= LOTS_OF_EVENTS; com->state &= ~CS_ODONE; COM_UNLOCK(); critical_exit(); ttyld_start(tp); } if (cy_events == 0) break; } if (cy_events >= LOTS_OF_EVENTS) goto repeat; } static int cyparam(struct tty *tp, struct termios *t) { int bits; int cflag; struct com_s *com; u_char cor_change; u_long cy_clock; int idivisor; int iflag; int iprescaler; int itimeout; int odivisor; int oprescaler; u_char opt; int s; com = tp->t_sc; /* check requested parameters */ cy_clock = CY_CLOCK(com->gfrcr_image); idivisor = cyspeed(t->c_ispeed, cy_clock, &iprescaler); if (idivisor <= 0) return (EINVAL); odivisor = cyspeed(t->c_ospeed != 0 ? t->c_ospeed : tp->t_ospeed, cy_clock, &oprescaler); if (odivisor <= 0) return (EINVAL); /* parameters are OK, convert them to the com struct and the device */ s = spltty(); if (t->c_ospeed == 0) (void)cymodem(tp, 0, SER_DTR); else (void)cymodem(tp, SER_DTR, 0); (void) cysetwater(com, t->c_ispeed); /* XXX we don't actually change the speed atomically. */ cd_setreg(com, CD1400_RBPR, idivisor); cd_setreg(com, CD1400_RCOR, iprescaler); cd_setreg(com, CD1400_TBPR, odivisor); cd_setreg(com, CD1400_TCOR, oprescaler); /* * channel control * receiver enable * transmitter enable (always set) */ cflag = t->c_cflag; opt = CD1400_CCR_CMDCHANCTL | CD1400_CCR_XMTEN | (cflag & CREAD ? CD1400_CCR_RCVEN : CD1400_CCR_RCVDIS); if (opt != com->channel_control) { com->channel_control = opt; cd1400_channel_cmd(com, opt); } #ifdef Smarts /* set special chars */ /* XXX if one is _POSIX_VDISABLE, can't use some others */ if (t->c_cc[VSTOP] != _POSIX_VDISABLE) cd_setreg(com, CD1400_SCHR1, t->c_cc[VSTOP]); if (t->c_cc[VSTART] != _POSIX_VDISABLE) cd_setreg(com, CD1400_SCHR2, t->c_cc[VSTART]); if (t->c_cc[VINTR] != _POSIX_VDISABLE) cd_setreg(com, CD1400_SCHR3, t->c_cc[VINTR]); if (t->c_cc[VSUSP] != _POSIX_VDISABLE) cd_setreg(com, CD1400_SCHR4, t->c_cc[VSUSP]); #endif /* * set channel option register 1 - * parity mode * stop bits * char length */ opt = 0; /* parity */ if (cflag & PARENB) { if (cflag & PARODD) opt |= CD1400_COR1_PARODD; opt |= CD1400_COR1_PARNORMAL; } iflag = t->c_iflag; if (!(iflag & INPCK)) opt |= CD1400_COR1_NOINPCK; bits = 1 + 1; /* stop bits */ if (cflag & CSTOPB) { ++bits; opt |= CD1400_COR1_STOP2; } /* char length */ switch (cflag & CSIZE) { case CS5: bits += 5; opt |= CD1400_COR1_CS5; break; case CS6: bits += 6; opt |= CD1400_COR1_CS6; break; case CS7: bits += 7; opt |= CD1400_COR1_CS7; break; default: bits += 8; opt |= CD1400_COR1_CS8; break; } cor_change = 0; if (opt != com->cor[0]) { cor_change |= CD1400_CCR_COR1; cd_setreg(com, CD1400_COR1, com->cor[0] = opt); } /* * Set receive time-out period, normally to max(one char time, 5 ms). */ - itimeout = (1000 * bits + t->c_ispeed - 1) / t->c_ispeed; + itimeout = howmany(1000 * bits, t->c_ispeed); #ifdef SOFT_HOTCHAR #define MIN_RTP 1 #else #define MIN_RTP 5 #endif if (itimeout < MIN_RTP) itimeout = MIN_RTP; if (!(t->c_lflag & ICANON) && t->c_cc[VMIN] != 0 && t->c_cc[VTIME] != 0 && t->c_cc[VTIME] * 10 > itimeout) itimeout = t->c_cc[VTIME] * 10; if (itimeout > 255) itimeout = 255; cd_setreg(com, CD1400_RTPR, itimeout); /* * set channel option register 2 - * flow control */ opt = 0; #ifdef Smarts if (iflag & IXANY) opt |= CD1400_COR2_IXANY; if (iflag & IXOFF) opt |= CD1400_COR2_IXOFF; #endif #ifndef SOFT_CTS_OFLOW if (cflag & CCTS_OFLOW) opt |= CD1400_COR2_CCTS_OFLOW; #endif critical_enter(); COM_LOCK(); if (opt != com->cor[1]) { cor_change |= CD1400_CCR_COR2; cd_setreg(com, CD1400_COR2, com->cor[1] = opt); } COM_UNLOCK(); critical_exit(); /* * set channel option register 3 - * receiver FIFO interrupt threshold * flow control */ opt = RxFifoThreshold; #ifdef Smarts if (t->c_lflag & ICANON) opt |= CD1400_COR3_SCD34; /* detect INTR & SUSP chars */ if (iflag & IXOFF) /* detect and transparently handle START and STOP chars */ opt |= CD1400_COR3_FCT | CD1400_COR3_SCD12; #endif if (opt != com->cor[2]) { cor_change |= CD1400_CCR_COR3; cd_setreg(com, CD1400_COR3, com->cor[2] = opt); } /* notify the CD1400 if COR1-3 have changed */ if (cor_change) cd1400_channel_cmd(com, CD1400_CCR_CMDCORCHG | cor_change); /* * set channel option register 4 - * CR/NL processing * break processing * received exception processing */ opt = 0; if (iflag & IGNCR) opt |= CD1400_COR4_IGNCR; #ifdef Smarts /* * we need a new ttyinput() for this, as we don't want to * have ICRNL && INLCR being done in both layers, or to have * synchronisation problems */ if (iflag & ICRNL) opt |= CD1400_COR4_ICRNL; if (iflag & INLCR) opt |= CD1400_COR4_INLCR; #endif if (iflag & IGNBRK) opt |= CD1400_COR4_IGNBRK | CD1400_COR4_NOBRKINT; /* * The `-ignbrk -brkint parmrk' case is not handled by the hardware, * so only tell the hardware about -brkint if -parmrk. */ if (!(iflag & (BRKINT | PARMRK))) opt |= CD1400_COR4_NOBRKINT; #if 0 /* XXX using this "intelligence" breaks reporting of overruns. */ if (iflag & IGNPAR) opt |= CD1400_COR4_PFO_DISCARD; else { if (iflag & PARMRK) opt |= CD1400_COR4_PFO_ESC; else opt |= CD1400_COR4_PFO_NUL; } #else opt |= CD1400_COR4_PFO_EXCEPTION; #endif cd_setreg(com, CD1400_COR4, opt); /* * set channel option register 5 - */ opt = 0; if (iflag & ISTRIP) opt |= CD1400_COR5_ISTRIP; if (t->c_iflag & IEXTEN) /* enable LNEXT (e.g. ctrl-v quoting) handling */ opt |= CD1400_COR5_LNEXT; #ifdef Smarts if (t->c_oflag & ONLCR) opt |= CD1400_COR5_ONLCR; if (t->c_oflag & OCRNL) opt |= CD1400_COR5_OCRNL; #endif cd_setreg(com, CD1400_COR5, opt); /* * We always generate modem status change interrupts for CD changes. * Among other things, this is necessary to track TS_CARR_ON for * pstat to print even when the driver doesn't care. CD changes * should be rare so interrupts for them are not worth extra code to * avoid. We avoid interrupts for other modem status changes (except * for CTS changes when SOFT_CTS_OFLOW is configured) since this is * simplest and best. */ /* * set modem change option register 1 * generate modem interrupts on which 1 -> 0 input transitions * also controls auto-DTR output flow-control, which we don't use */ opt = CD1400_MCOR1_CDzd; #ifdef SOFT_CTS_OFLOW if (cflag & CCTS_OFLOW) opt |= CD1400_MCOR1_CTSzd; #endif cd_setreg(com, CD1400_MCOR1, opt); /* * set modem change option register 2 * generate modem interrupts on specific 0 -> 1 input transitions */ opt = CD1400_MCOR2_CDod; #ifdef SOFT_CTS_OFLOW if (cflag & CCTS_OFLOW) opt |= CD1400_MCOR2_CTSod; #endif cd_setreg(com, CD1400_MCOR2, opt); /* * XXX should have done this long ago, but there is too much state * to change all atomically. */ critical_enter(); COM_LOCK(); com->state &= ~CS_TTGO; if (!(tp->t_state & TS_TTSTOP)) com->state |= CS_TTGO; if (cflag & CRTS_IFLOW) { com->state |= CS_RTS_IFLOW; /* * If CS_RTS_IFLOW just changed from off to on, the change * needs to be propagated to CD1400_MSVR1_RTS. This isn't urgent, * so do it later by calling cystart() instead of repeating * a lot of code from cystart() here. */ } else if (com->state & CS_RTS_IFLOW) { com->state &= ~CS_RTS_IFLOW; /* * CS_RTS_IFLOW just changed from on to off. Force CD1400_MSVR1_RTS * on here, since cystart() won't do it later. */ cd_setreg(com, com->mcr_rts_reg, com->mcr_image |= com->mcr_rts); } /* * Set up state to handle output flow control. * XXX - worth handling MDMBUF (DCD) flow control at the lowest level? * Now has 10+ msec latency, while CTS flow has 50- usec latency. */ com->state |= CS_ODEVREADY; #ifdef SOFT_CTS_OFLOW com->state &= ~CS_CTS_OFLOW; if (cflag & CCTS_OFLOW) { com->state |= CS_CTS_OFLOW; if (!(com->last_modem_status & CD1400_MSVR2_CTS)) com->state &= ~CS_ODEVREADY; } #endif /* XXX shouldn't call functions while intrs are disabled. */ disc_optim(tp, t, com); #if 0 /* * Recover from fiddling with CS_TTGO. We used to call cyintr1() * unconditionally, but that defeated the careful discarding of * stale input in cyopen(). */ if (com->state >= (CS_BUSY | CS_TTGO)) cyintr1(com); #endif if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) { if (!(com->intr_enable & CD1400_SRER_TXRDY)) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXMPTY) | CD1400_SRER_TXRDY); } else { if (com->intr_enable & CD1400_SRER_TXRDY) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXRDY) | CD1400_SRER_TXMPTY); } COM_UNLOCK(); critical_exit(); splx(s); cystart(tp); if (com->ibufold != NULL) { free(com->ibufold, M_DEVBUF); com->ibufold = NULL; } return (0); } static int cysetwater(struct com_s *com, speed_t speed) { int cp4ticks; u_char *ibuf; int ibufsize; struct tty *tp; /* * Make the buffer size large enough to handle a softtty interrupt * latency of about 2 ticks without loss of throughput or data * (about 3 ticks if input flow control is not used or not honoured, * but a bit less for CS5-CS7 modes). */ cp4ticks = speed / 10 / hz * 4; for (ibufsize = 128; ibufsize < cp4ticks;) ibufsize <<= 1; if (ibufsize == com->ibufsize) { return (0); } /* * Allocate input buffer. The extra factor of 2 in the size is * to allow for an error byte for each input byte. */ ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT); if (ibuf == NULL) { return (ENOMEM); } /* Initialize non-critical variables. */ com->ibufold = com->ibuf; com->ibufsize = ibufsize; tp = com->tp; if (tp != NULL) { tp->t_ififosize = 2 * ibufsize; tp->t_ispeedwat = (speed_t)-1; tp->t_ospeedwat = (speed_t)-1; } /* * Read current input buffer, if any. Continue with interrupts * disabled. */ critical_enter(); COM_LOCK(); if (com->iptr != com->ibuf) cyinput(com); /*- * Initialize critical variables, including input buffer watermarks. * The external device is asked to stop sending when the buffer * exactly reaches high water, or when the high level requests it. * The high level is notified immediately (rather than at a later * clock tick) when this watermark is reached. * The buffer size is chosen so the watermark should almost never * be reached. * The low watermark is invisibly 0 since the buffer is always * emptied all at once. */ com->iptr = com->ibuf = ibuf; com->ibufend = ibuf + ibufsize; com->ierroff = ibufsize; com->ihighwater = ibuf + 3 * ibufsize / 4; COM_UNLOCK(); critical_exit(); return (0); } static void cystart(struct tty *tp) { struct com_s *com; int s; #ifdef CyDebug bool_t started; #endif com = tp->t_sc; s = spltty(); #ifdef CyDebug ++com->start_count; started = FALSE; #endif critical_enter(); COM_LOCK(); if (tp->t_state & TS_TTSTOP) { com->state &= ~CS_TTGO; if (com->intr_enable & CD1400_SRER_TXRDY) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXRDY) | CD1400_SRER_TXMPTY); } else { com->state |= CS_TTGO; if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY) && !(com->intr_enable & CD1400_SRER_TXRDY)) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXMPTY) | CD1400_SRER_TXRDY); } if (tp->t_state & TS_TBLOCK) { if (com->mcr_image & com->mcr_rts && com->state & CS_RTS_IFLOW) #if 0 outb(com->modem_ctl_port, com->mcr_image &= ~CD1400_MSVR1_RTS); #else cd_setreg(com, com->mcr_rts_reg, com->mcr_image &= ~com->mcr_rts); #endif } else { if (!(com->mcr_image & com->mcr_rts) && com->iptr < com->ihighwater && com->state & CS_RTS_IFLOW) #if 0 outb(com->modem_ctl_port, com->mcr_image |= CD1400_MSVR1_RTS); #else cd_setreg(com, com->mcr_rts_reg, com->mcr_image |= com->mcr_rts); #endif } COM_UNLOCK(); critical_exit(); if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); splx(s); return; } if (tp->t_outq.c_cc != 0) { struct lbq *qp; struct lbq *next; if (!com->obufs[0].l_queued) { #ifdef CyDebug started = TRUE; #endif com->obufs[0].l_tail = com->obuf1 + q_to_b(&tp->t_outq, com->obuf1, sizeof com->obuf1); com->obufs[0].l_next = NULL; com->obufs[0].l_queued = TRUE; critical_enter(); COM_LOCK(); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) qp = next; qp->l_next = &com->obufs[0]; } else { com->obufq.l_head = com->obufs[0].l_head; com->obufq.l_tail = com->obufs[0].l_tail; com->obufq.l_next = &com->obufs[0]; com->state |= CS_BUSY; if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXMPTY) | CD1400_SRER_TXRDY); } COM_UNLOCK(); critical_exit(); } if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) { #ifdef CyDebug started = TRUE; #endif com->obufs[1].l_tail = com->obuf2 + q_to_b(&tp->t_outq, com->obuf2, sizeof com->obuf2); com->obufs[1].l_next = NULL; com->obufs[1].l_queued = TRUE; critical_enter(); COM_LOCK(); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) qp = next; qp->l_next = &com->obufs[1]; } else { com->obufq.l_head = com->obufs[1].l_head; com->obufq.l_tail = com->obufs[1].l_tail; com->obufq.l_next = &com->obufs[1]; com->state |= CS_BUSY; if (com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXMPTY) | CD1400_SRER_TXRDY); } COM_UNLOCK(); critical_exit(); } tp->t_state |= TS_BUSY; } #ifdef CyDebug if (started) ++com->start_real; #endif #if 0 critical_enter(); COM_LOCK(); if (com->state >= (CS_BUSY | CS_TTGO)) cyintr1(com); /* fake interrupt to start output */ COM_UNLOCK(); critical_exit(); #endif ttwwakeup(tp); splx(s); } static void comstop(struct tty *tp, int rw) { struct com_s *com; bool_t wakeup_etc; com = tp->t_sc; wakeup_etc = FALSE; critical_enter(); COM_LOCK(); if (rw & FWRITE) { com->obufs[0].l_queued = FALSE; com->obufs[1].l_queued = FALSE; if (com->extra_state & CSE_ODONE) { cy_events -= LOTS_OF_EVENTS; com->extra_state &= ~CSE_ODONE; if (com->etc != ETC_NONE) { if (com->etc == ETC_BREAK_ENDED) com->etc = ETC_NONE; wakeup_etc = TRUE; } } com->tp->t_state &= ~TS_BUSY; if (com->state & CS_ODONE) cy_events -= LOTS_OF_EVENTS; com->state &= ~(CS_ODONE | CS_BUSY); } if (rw & FREAD) { /* XXX no way to reset only input fifo. */ cy_events -= (com->iptr - com->ibuf); com->iptr = com->ibuf; } COM_UNLOCK(); critical_exit(); if (wakeup_etc) wakeup(&com->etc); if (rw & FWRITE && com->etc == ETC_NONE) cd1400_channel_cmd(com, CD1400_CCR_CMDRESET | CD1400_CCR_FTF); cystart(tp); } static int cymodem(struct tty *tp, int sigon, int sigoff) { struct com_s *com; int mcr; int msr; com = tp->t_sc; if (sigon == 0 && sigoff == 0) { sigon = 0; mcr = com->mcr_image; if (mcr & com->mcr_dtr) sigon |= SER_DTR; if (mcr & com->mcr_rts) /* XXX wired on for Cyclom-8Ys */ sigon |= SER_RTS; /* * We must read the modem status from the hardware because * we don't generate modem status change interrupts for all * changes, so com->prev_modem_status is not guaranteed to * be up to date. This is safe, unlike for sio, because * reading the status register doesn't clear pending modem * status change interrupts. */ msr = cd_getreg(com, CD1400_MSVR2); if (msr & CD1400_MSVR2_CTS) sigon |= SER_CTS; if (msr & CD1400_MSVR2_CD) sigon |= SER_DCD; if (msr & CD1400_MSVR2_DSR) sigon |= SER_DSR; if (msr & CD1400_MSVR2_RI) /* XXX not connected except for Cyclom-16Y? */ sigon |= SER_RI; return (sigon); } mcr = com->mcr_image; if (sigon & SER_DTR) mcr |= com->mcr_dtr; if (sigoff & SER_DTR) mcr &= ~com->mcr_dtr; if (sigon & SER_RTS) mcr |= com->mcr_rts; if (sigoff & SER_RTS) mcr &= ~com->mcr_rts; critical_enter(); COM_LOCK(); com->mcr_image = mcr; cd_setreg(com, CD1400_MSVR1, mcr); cd_setreg(com, CD1400_MSVR2, mcr); COM_UNLOCK(); critical_exit(); return (0); } static void cysettimeout() { struct com_s *com; bool_t someopen; int unit; /* * Set our timeout period to 1 second if no polled devices are open. * Otherwise set it to max(1/200, 1/hz). * Enable timeouts iff some device is open. */ untimeout(cywakeup, (void *)NULL, cy_timeout_handle); cy_timeout = hz; someopen = FALSE; for (unit = 0; unit < NPORTS; ++unit) { com = cy_addr(unit); if (com != NULL && com->tp != NULL && com->tp->t_state & TS_ISOPEN) { someopen = TRUE; } } if (someopen) { cy_timeouts_until_log = hz / cy_timeout; cy_timeout_handle = timeout(cywakeup, (void *)NULL, cy_timeout); } else { /* Flush error messages, if any. */ cy_timeouts_until_log = 1; cywakeup((void *)NULL); untimeout(cywakeup, (void *)NULL, cy_timeout_handle); } } static void cywakeup(void *chan) { struct com_s *com; int unit; cy_timeout_handle = timeout(cywakeup, (void *)NULL, cy_timeout); /* * Check for and log errors, but not too often. */ if (--cy_timeouts_until_log > 0) return; cy_timeouts_until_log = hz / cy_timeout; for (unit = 0; unit < NPORTS; ++unit) { int errnum; com = cy_addr(unit); if (com == NULL) continue; for (errnum = 0; errnum < CE_NTYPES; ++errnum) { u_int delta; u_long total; critical_enter(); COM_LOCK(); delta = com->delta_error_counts[errnum]; com->delta_error_counts[errnum] = 0; COM_UNLOCK(); critical_exit(); if (delta == 0) continue; total = com->error_counts[errnum] += delta; log(LOG_ERR, "cy%d: %u more %s%s (total %lu)\n", unit, delta, error_desc[errnum], delta == 1 ? "" : "s", total); } } } static void disc_optim(struct tty *tp, struct termios *t, struct com_s *com) { #ifndef SOFT_HOTCHAR u_char opt; #endif ttyldoptim(tp); #ifndef SOFT_HOTCHAR opt = com->cor[2] & ~CD1400_COR3_SCD34; if (com->tp->t_hotchar != 0) { cd_setreg(com, CD1400_SCHR3, com->tp->t_hotchar); cd_setreg(com, CD1400_SCHR4, com->tp->t_hotchar); opt |= CD1400_COR3_SCD34; } if (opt != com->cor[2]) { cd_setreg(com, CD1400_COR3, com->cor[2] = opt); cd1400_channel_cmd(com, CD1400_CCR_CMDCORCHG | CD1400_CCR_COR3); } #endif } #ifdef Smarts /* standard line discipline input routine */ int cyinput(int c, struct tty *tp) { /* XXX duplicate ttyinput(), but without the IXOFF/IXON/ISTRIP/IPARMRK * bits, as they are done by the CD1400. Hardly worth the effort, * given that high-throughput session are raw anyhow. */ } #endif /* Smarts */ static int cyspeed(speed_t speed, u_long cy_clock, int *prescaler_io) { int actual; int error; int divider; int prescaler; int prescaler_unit; if (speed == 0) return (0); if (speed < 0 || speed > 150000) return (-1); /* determine which prescaler to use */ for (prescaler_unit = 4, prescaler = 2048; prescaler_unit; prescaler_unit--, prescaler >>= 2) { if (cy_clock / prescaler / speed > 63) break; } divider = (cy_clock / prescaler * 2 / speed + 1) / 2; /* round off */ if (divider > 255) divider = 255; actual = cy_clock/prescaler/divider; /* 10 times error in percent: */ error = ((actual - (long)speed) * 2000 / (long)speed + 1) / 2; /* 3.0% max error tolerance */ if (error < -30 || error > 30) return (-1); *prescaler_io = prescaler_unit; return (divider); } static void cd1400_channel_cmd(struct com_s *com, int cmd) { cd1400_channel_cmd_wait(com); cd_setreg(com, CD1400_CCR, cmd); cd1400_channel_cmd_wait(com); } static void cd1400_channel_cmd_wait(struct com_s *com) { struct timeval start; struct timeval tv; long usec; if (cd_getreg(com, CD1400_CCR) == 0) return; microtime(&start); for (;;) { if (cd_getreg(com, CD1400_CCR) == 0) return; microtime(&tv); usec = 1000000 * (tv.tv_sec - start.tv_sec) + tv.tv_usec - start.tv_usec; if (usec >= 5000) { log(LOG_ERR, "cy%d: channel command timeout (%ld usec)\n", com->unit, usec); return; } } } static void cd_etc(struct com_s *com, int etc) { /* * We can't change the hardware's ETC state while there are any * characters in the tx fifo, since those characters would be * interpreted as commands! Unputting characters from the fifo * is difficult, so we wait up to 12 character times for the fifo * to drain. The command will be delayed for up to 2 character * times for the tx to become empty. Unputting characters from * the tx holding and shift registers is impossible, so we wait * for the tx to become empty so that the command is sure to be * executed soon after we issue it. */ critical_enter(); COM_LOCK(); if (com->etc == etc) goto wait; if ((etc == CD1400_ETC_SENDBREAK && (com->etc == ETC_BREAK_STARTING || com->etc == ETC_BREAK_STARTED)) || (etc == CD1400_ETC_STOPBREAK && (com->etc == ETC_BREAK_ENDING || com->etc == ETC_BREAK_ENDED || com->etc == ETC_NONE))) { COM_UNLOCK(); critical_exit(); return; } com->etc = etc; cd_setreg(com, CD1400_SRER, com->intr_enable = (com->intr_enable & ~CD1400_SRER_TXRDY) | CD1400_SRER_TXMPTY); wait: COM_UNLOCK(); critical_exit(); while (com->etc == etc && tsleep(&com->etc, TTIPRI | PCATCH, "cyetc", 0) == 0) continue; } static int cd_getreg(struct com_s *com, int reg) { struct com_s *basecom; u_char car; int cy_align; cy_addr iobase; #ifdef SMP int need_unlock; #endif int val; basecom = cy_addr(com->unit & ~(CD1400_NO_OF_CHANNELS - 1)); car = com->unit & CD1400_CAR_CHAN; cy_align = com->cy_align; iobase = com->iobase; critical_enter(); #ifdef SMP need_unlock = 0; if (!mtx_owned(&cy_lock)) { COM_LOCK(); need_unlock = 1; } #endif if (basecom->car != car) cd_outb(iobase, CD1400_CAR, cy_align, basecom->car = car); val = cd_inb(iobase, reg, cy_align); #ifdef SMP if (need_unlock) COM_UNLOCK(); #endif critical_exit(); return (val); } static void cd_setreg(struct com_s *com, int reg, int val) { struct com_s *basecom; u_char car; int cy_align; cy_addr iobase; #ifdef SMP int need_unlock; #endif basecom = cy_addr(com->unit & ~(CD1400_NO_OF_CHANNELS - 1)); car = com->unit & CD1400_CAR_CHAN; cy_align = com->cy_align; iobase = com->iobase; critical_enter(); #ifdef SMP need_unlock = 0; if (!mtx_owned(&cy_lock)) { COM_LOCK(); need_unlock = 1; } #endif if (basecom->car != car) cd_outb(iobase, CD1400_CAR, cy_align, basecom->car = car); cd_outb(iobase, reg, cy_align, val); #ifdef SMP if (need_unlock) COM_UNLOCK(); #endif critical_exit(); } #ifdef CyDebug /* useful in ddb */ void cystatus(int unit) { struct com_s *com; cy_addr iobase; u_int ocount; struct tty *tp; com = cy_addr(unit); printf("info for channel %d\n", unit); printf("------------------\n"); printf("total cyclom service probes:\t%d\n", cy_svrr_probes); printf("calls to upper layer:\t\t%d\n", cy_timeouts); if (com == NULL) return; iobase = com->iobase; printf("\n"); printf("cd1400 base address:\\tt%p\n", iobase); printf("saved channel_control:\t\t0x%02x\n", com->channel_control); printf("saved cor1-3:\t\t\t0x%02x 0x%02x 0x%02x\n", com->cor[0], com->cor[1], com->cor[2]); printf("service request enable reg:\t0x%02x (0x%02x cached)\n", cd_getreg(com, CD1400_SRER), com->intr_enable); printf("service request register:\t0x%02x\n", cd_inb(iobase, CD1400_SVRR, com->cy_align)); printf("modem status:\t\t\t0x%02x (0x%02x cached)\n", cd_getreg(com, CD1400_MSVR2), com->prev_modem_status); printf("rx/tx/mdm interrupt registers:\t0x%02x 0x%02x 0x%02x\n", cd_inb(iobase, CD1400_RIR, com->cy_align), cd_inb(iobase, CD1400_TIR, com->cy_align), cd_inb(iobase, CD1400_MIR, com->cy_align)); printf("\n"); printf("com state:\t\t\t0x%02x\n", com->state); printf("calls to cystart():\t\t%d (%d useful)\n", com->start_count, com->start_real); printf("rx buffer chars free:\t\t%d\n", com->iptr - com->ibuf); ocount = 0; if (com->obufs[0].l_queued) ocount += com->obufs[0].l_tail - com->obufs[0].l_head; if (com->obufs[1].l_queued) ocount += com->obufs[1].l_tail - com->obufs[1].l_head; printf("tx buffer chars:\t\t%u\n", ocount); printf("received chars:\t\t\t%d\n", com->bytes_in); printf("received exceptions:\t\t%d\n", com->recv_exception); printf("modem signal deltas:\t\t%d\n", com->mdm); printf("transmitted chars:\t\t%d\n", com->bytes_out); printf("\n"); tp = com->tp; if (tp != NULL) { printf("tty state:\t\t\t0x%08x\n", tp->t_state); printf( "upper layer queue lengths:\t%d raw, %d canon, %d output\n", tp->t_rawq.c_cc, tp->t_canq.c_cc, tp->t_outq.c_cc); } else printf("tty state:\t\t\tclosed\n"); } #endif /* CyDebug */ diff --git a/sys/dev/fatm/if_fatm.c b/sys/dev/fatm/if_fatm.c index 9a42b932955a..83f43fd4ed52 100644 --- a/sys/dev/fatm/if_fatm.c +++ b/sys/dev/fatm/if_fatm.c @@ -1,3091 +1,3091 @@ /*- * Copyright (c) 2001-2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Hartmut Brandt * * Fore PCA200E driver for NATM */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #ifdef INET #include #include #endif #include #include #include #include #include #include #include #include #include #include devclass_t fatm_devclass; static const struct { uint16_t vid; uint16_t did; const char *name; } fatm_devs[] = { { 0x1127, 0x300, "FORE PCA200E" }, { 0, 0, NULL } }; static const struct rate { uint32_t ratio; uint32_t cell_rate; } rate_table[] = { #include }; #define RATE_TABLE_SIZE (sizeof(rate_table) / sizeof(rate_table[0])) SYSCTL_DECL(_hw_atm); MODULE_DEPEND(fatm, utopia, 1, 1, 1); static int fatm_utopia_readregs(struct ifatm *, u_int, uint8_t *, u_int *); static int fatm_utopia_writereg(struct ifatm *, u_int, u_int, u_int); static const struct utopia_methods fatm_utopia_methods = { fatm_utopia_readregs, fatm_utopia_writereg }; #define VC_OK(SC, VPI, VCI) \ (rounddown2(VPI, 1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) == 0 && \ (VCI) != 0 && rounddown2(VCI, 1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) == 0) static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc); /* * Probing is easy: step trough the list of known vendor and device * ids and compare. If one is found - it's our. */ static int fatm_probe(device_t dev) { int i; for (i = 0; fatm_devs[i].name; i++) if (pci_get_vendor(dev) == fatm_devs[i].vid && pci_get_device(dev) == fatm_devs[i].did) { device_set_desc(dev, fatm_devs[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Function called at completion of a SUNI writeregs/readregs command. * This is called from the interrupt handler while holding the softc lock. * We use the queue entry as the randevouze point. */ static void fatm_utopia_writeregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if(H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(q); } /* * Write a SUNI register. The bits that are 1 in mask are written from val * into register reg. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) { int error; struct cmdqueue *q; struct fatm_softc *sc; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); if (!(ifatm->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); /* get queue element and fill it */ q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_writeregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, 0); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_MAKE_SETOC3(reg, val, mask) | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_setreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: error = q->error; break; } return (error); } /* * Function called at completion of a SUNI readregs command. * This is called from the interrupt handler while holding the softc lock. * We use reg_mem as the randevouze point. */ static void fatm_utopia_readregs_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.suni_reg_errors++; q->error = EIO; } wakeup(&sc->reg_mem); } /* * Read SUNI registers * * We use a preallocated buffer to read the registers. Therefor we need * to protect against multiple threads trying to read registers. We do this * with a condition variable and a flag. We wait for the command to complete by sleeping on * the register memory. * * We assume, that we already hold the softc mutex. */ static int fatm_utopia_readregs_internal(struct fatm_softc *sc) { int error, i; uint32_t *ptr; struct cmdqueue *q; /* get the buffer */ for (;;) { if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); if (!(sc->flags & FATM_REGS_INUSE)) break; cv_wait(&sc->cv_regs, &sc->mtx); } sc->flags |= FATM_REGS_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_utopia_readregs_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GETOC3_BUF, sc->reg_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_OC3_GET_REG | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->reg_mem, &sc->mtx, PZERO | PCATCH, "fatm_getreg", hz); switch(error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->reg_mem.dmat, sc->reg_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } if (error != 0) { /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (error); } /* swap if needed */ ptr = (uint32_t *)sc->reg_mem.mem; for (i = 0; i < FATM_NREGS; i++) ptr[i] = le32toh(ptr[i]) & 0xff; return (0); } /* * Read SUNI registers for the SUNI module. * * We assume, that we already hold the mutex. */ static int fatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *valp, u_int *np) { int err; int i; struct fatm_softc *sc; if (reg >= FATM_NREGS) return (EINVAL); if (reg + *np > FATM_NREGS) *np = FATM_NREGS - reg; sc = ifatm->ifp->if_softc; FATM_CHECKLOCK(sc); err = fatm_utopia_readregs_internal(sc); if (err != 0) return (err); for (i = 0; i < *np; i++) valp[i] = ((uint32_t *)sc->reg_mem.mem)[reg + i]; /* declare buffer to be free */ sc->flags &= ~FATM_REGS_INUSE; cv_signal(&sc->cv_regs); return (0); } /* * Check whether the hard is beating. We remember the last heart beat and * compare it to the current one. If it appears stuck for 10 times, we have * a problem. * * Assume we hold the lock. */ static void fatm_check_heartbeat(struct fatm_softc *sc) { uint32_t h; FATM_CHECKLOCK(sc); h = READ4(sc, FATMO_HEARTBEAT); DBG(sc, BEAT, ("heartbeat %08x", h)); if (sc->stop_cnt == 10) return; if (h == sc->heartbeat) { if (++sc->stop_cnt == 10) { log(LOG_ERR, "i960 stopped???\n"); WRITE4(sc, FATMO_HIMR, 1); } return; } sc->stop_cnt = 0; sc->heartbeat = h; } /* * Ensure that the heart is still beating. */ static void fatm_watchdog(void *arg) { struct fatm_softc *sc; sc = arg; FATM_CHECKLOCK(sc); fatm_check_heartbeat(sc); callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc); } /* * Hard reset the i960 on the board. This is done by initializing registers, * clearing interrupts and waiting for the selftest to finish. Not sure, * whether all these barriers are actually needed. * * Assumes that we hold the lock. */ static int fatm_reset(struct fatm_softc *sc) { int w; uint32_t val; FATM_CHECKLOCK(sc); WRITE4(sc, FATMO_APP_BASE, FATMO_COMMON_ORIGIN); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_960, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_UART_TO_HOST, XMIT_READY); BARRIER_W(sc); WRITE4(sc, FATMO_BOOT_STATUS, COLD_START); BARRIER_W(sc); WRITE1(sc, FATMO_HCR, FATM_HCR_RESET); BARRIER_W(sc); DELAY(1000); WRITE1(sc, FATMO_HCR, 0); BARRIER_RW(sc); DELAY(1000); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case SELF_TEST_OK: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Stop the card. Must be called WITH the lock held * Reset, free transmit and receive buffers. Wakeup everybody who may sleep. */ static void fatm_stop(struct fatm_softc *sc) { int i; struct cmdqueue *q; struct rbuf *rb; struct txqueue *tx; uint32_t stat; FATM_CHECKLOCK(sc); /* Stop the board */ utopia_stop(&sc->utopia); (void)fatm_reset(sc); /* stop watchdog */ callout_stop(&sc->watchdog_timer); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* * Collect transmit mbufs, partial receive mbufs and * supplied mbufs */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if (tx->m) { bus_dmamap_unload(sc->tx_tag, tx->map); m_freem(tx->m); tx->m = NULL; } } /* Collect supplied mbufs */ while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { LIST_REMOVE(rb, link); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* Unwait any waiters */ wakeup(&sc->sadi_mem); /* wakeup all threads waiting for STAT or REG buffers */ cv_broadcast(&sc->cv_stat); cv_broadcast(&sc->cv_regs); sc->flags &= ~(FATM_STAT_INUSE | FATM_REGS_INUSE); /* wakeup all threads waiting on commands */ for (i = 0; i < FATM_CMD_QLEN; i++) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, i); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if ((stat = H_GETSTAT(q->q.statp)) != FATM_STAT_FREE) { H_SETSTAT(q->q.statp, stat | FATM_STAT_ERROR); H_SYNCSTAT_PREWRITE(sc, q->q.statp); wakeup(q); } } utopia_reset_media(&sc->utopia); } sc->small_cnt = sc->large_cnt = 0; /* Reset vcc info */ if (sc->vccs != NULL) { sc->open_vccs = 0; for (i = 0; i < FORE_MAX_VCC + 1; i++) { if (sc->vccs[i] != NULL) { if ((sc->vccs[i]->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN)) == 0) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } else { sc->vccs[i]->vflags = 0; sc->open_vccs++; } } } } } /* * Load the firmware into the board and save the entry point. */ static uint32_t firmware_load(struct fatm_softc *sc) { struct firmware *fw = (struct firmware *)firmware; DBG(sc, INIT, ("loading - entry=%x", fw->entry)); bus_space_write_region_4(sc->memt, sc->memh, fw->offset, firmware, sizeof(firmware) / sizeof(firmware[0])); BARRIER_RW(sc); return (fw->entry); } /* * Read a character from the virtual UART. The availability of a character * is signaled by a non-null value of the 32 bit register. The eating of * the character by us is signalled to the card by setting that register * to zero. */ static int rx_getc(struct fatm_softc *sc) { int w = 50; int c; while (w--) { c = READ4(sc, FATMO_UART_TO_HOST); BARRIER_RW(sc); if (c != 0) { WRITE4(sc, FATMO_UART_TO_HOST, 0); DBGC(sc, UART, ("%c", c & 0xff)); return (c & 0xff); } DELAY(1000); } return (-1); } /* * Eat up characters from the board and stuff them in the bit-bucket. */ static void rx_flush(struct fatm_softc *sc) { int w = 10000; while (w-- && rx_getc(sc) >= 0) ; } /* * Write a character to the card. The UART is available if the register * is zero. */ static int tx_putc(struct fatm_softc *sc, u_char c) { int w = 10; int c1; while (w--) { c1 = READ4(sc, FATMO_UART_TO_960); BARRIER_RW(sc); if (c1 == 0) { WRITE4(sc, FATMO_UART_TO_960, c | CHAR_AVAIL); DBGC(sc, UART, ("%c", c & 0xff)); return (0); } DELAY(1000); } return (-1); } /* * Start the firmware. This is doing by issuing a 'go' command with * the hex entry address of the firmware. Then we wait for the self-test to * succeed. */ static int fatm_start_firmware(struct fatm_softc *sc, uint32_t start) { static char hex[] = "0123456789abcdef"; u_int w, val; DBG(sc, INIT, ("starting")); rx_flush(sc); tx_putc(sc, '\r'); DELAY(1000); rx_flush(sc); tx_putc(sc, 'g'); (void)rx_getc(sc); tx_putc(sc, 'o'); (void)rx_getc(sc); tx_putc(sc, ' '); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 12) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 8) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 4) & 0xf]); (void)rx_getc(sc); tx_putc(sc, hex[(start >> 0) & 0xf]); (void)rx_getc(sc); tx_putc(sc, '\r'); rx_flush(sc); for (w = 100; w; w--) { BARRIER_R(sc); val = READ4(sc, FATMO_BOOT_STATUS); switch (val) { case CP_RUNNING: return (0); case SELF_TEST_FAIL: return (EIO); } DELAY(1000); } return (EIO); } /* * Initialize one card and host queue. */ static void init_card_queue(struct fatm_softc *sc, struct fqueue *queue, int qlen, size_t qel_size, size_t desc_size, cardoff_t off, u_char **statpp, uint32_t *cardstat, u_char *descp, uint32_t carddesc) { struct fqelem *el = queue->chunk; while (qlen--) { el->card = off; off += 8; /* size of card entry */ el->statp = (uint32_t *)(*statpp); (*statpp) += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOS_STATP, (*cardstat)); (*cardstat) += sizeof(uint32_t); el->ioblk = descp; descp += desc_size; el->card_ioblk = carddesc; carddesc += desc_size; el = (struct fqelem *)((u_char *)el + qel_size); } queue->tail = queue->head = 0; } /* * Issue the initialize operation to the card, wait for completion and * initialize the on-board and host queue structures with offsets and * addresses. */ static int fatm_init_cmd(struct fatm_softc *sc) { int w, c; u_char *statp; uint32_t card_stat; u_int cnt; struct fqelem *el; cardoff_t off; DBG(sc, INIT, ("command")); WRITE4(sc, FATMO_ISTAT, 0); WRITE4(sc, FATMO_IMASK, 1); WRITE4(sc, FATMO_HLOGGER, 0); WRITE4(sc, FATMO_INIT + FATMOI_RECEIVE_TRESHOLD, 0); WRITE4(sc, FATMO_INIT + FATMOI_NUM_CONNECT, FORE_MAX_VCC); WRITE4(sc, FATMO_INIT + FATMOI_CQUEUE_LEN, FATM_CMD_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_TQUEUE_LEN, FATM_TX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RQUEUE_LEN, FATM_RX_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_RPD_EXTENSION, RPD_EXTENSIONS); WRITE4(sc, FATMO_INIT + FATMOI_TPD_EXTENSION, TPD_EXTENSIONS); /* * initialize buffer descriptors */ WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_QUEUE_LENGTH, SMALL_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_BUFFER_SIZE, SMALL_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_POOL_SIZE, SMALL_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B1 + FATMOB_SUPPLY_BLKSIZE, SMALL_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_QUEUE_LENGTH, LARGE_SUPPLY_QLEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_BUFFER_SIZE, LARGE_BUFFER_LEN); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_POOL_SIZE, LARGE_POOL_SIZE); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B1 + FATMOB_SUPPLY_BLKSIZE, LARGE_SUPPLY_BLKSIZE); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_SMALL_B2 + FATMOB_SUPPLY_BLKSIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_QUEUE_LENGTH, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_BUFFER_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_POOL_SIZE, 0); WRITE4(sc, FATMO_INIT + FATMOI_LARGE_B2 + FATMOB_SUPPLY_BLKSIZE, 0); /* * Start the command */ BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_STATUS, FATM_STAT_PENDING); BARRIER_W(sc); WRITE4(sc, FATMO_INIT + FATMOI_OP, FATM_OP_INITIALIZE); BARRIER_W(sc); /* * Busy wait for completion */ w = 100; while (w--) { c = READ4(sc, FATMO_INIT + FATMOI_STATUS); BARRIER_R(sc); if (c & FATM_STAT_COMPLETE) break; DELAY(1000); } if (c & FATM_STAT_ERROR) return (EIO); /* * Initialize the queues */ statp = sc->stat_mem.mem; card_stat = sc->stat_mem.paddr; /* * Command queue. This is special in that it's on the card. */ el = sc->cmdqueue.chunk; off = READ4(sc, FATMO_COMMAND_QUEUE); DBG(sc, INIT, ("cmd queue=%x", off)); for (cnt = 0; cnt < FATM_CMD_QLEN; cnt++) { el = &((struct cmdqueue *)sc->cmdqueue.chunk + cnt)->q; el->card = off; off += 32; /* size of card structure */ el->statp = (uint32_t *)statp; statp += sizeof(uint32_t); H_SETSTAT(el->statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, el->statp); WRITE4(sc, el->card + FATMOC_STATP, card_stat); card_stat += sizeof(uint32_t); } sc->cmdqueue.tail = sc->cmdqueue.head = 0; /* * Now the other queues. These are in memory */ init_card_queue(sc, &sc->txqueue, FATM_TX_QLEN, sizeof(struct txqueue), TPD_SIZE, READ4(sc, FATMO_TRANSMIT_QUEUE), &statp, &card_stat, sc->txq_mem.mem, sc->txq_mem.paddr); init_card_queue(sc, &sc->rxqueue, FATM_RX_QLEN, sizeof(struct rxqueue), RPD_SIZE, READ4(sc, FATMO_RECEIVE_QUEUE), &statp, &card_stat, sc->rxq_mem.mem, sc->rxq_mem.paddr); init_card_queue(sc, &sc->s1queue, SMALL_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE), READ4(sc, FATMO_SMALL_B1_QUEUE), &statp, &card_stat, sc->s1q_mem.mem, sc->s1q_mem.paddr); init_card_queue(sc, &sc->l1queue, LARGE_SUPPLY_QLEN, sizeof(struct supqueue), BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE), READ4(sc, FATMO_LARGE_B1_QUEUE), &statp, &card_stat, sc->l1q_mem.mem, sc->l1q_mem.paddr); sc->txcnt = 0; return (0); } /* * Read PROM. Called only from attach code. Here we spin because the interrupt * handler is not yet set up. */ static int fatm_getprom(struct fatm_softc *sc) { int i; struct prom *prom; struct cmdqueue *q; DBG(sc, INIT, ("reading prom")); q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = NULL; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GPROM_BUF, sc->prom_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_GET_PROM_DATA); BARRIER_W(sc); for (i = 0; i < 1000; i++) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & (FATM_STAT_COMPLETE | FATM_STAT_ERROR)) break; DELAY(1000); } if (i == 1000) { if_printf(sc->ifp, "getprom timeout\n"); return (EIO); } H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { if_printf(sc->ifp, "getprom error\n"); return (EIO); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); bus_dmamap_sync(sc->prom_mem.dmat, sc->prom_mem.map, BUS_DMASYNC_POSTREAD); #ifdef notdef { u_int i; printf("PROM: "); u_char *ptr = (u_char *)sc->prom_mem.mem; for (i = 0; i < sizeof(struct prom); i++) printf("%02x ", *ptr++); printf("\n"); } #endif prom = (struct prom *)sc->prom_mem.mem; bcopy(prom->mac + 2, IFP2IFATM(sc->ifp)->mib.esi, 6); IFP2IFATM(sc->ifp)->mib.serial = le32toh(prom->serial); IFP2IFATM(sc->ifp)->mib.hw_version = le32toh(prom->version); IFP2IFATM(sc->ifp)->mib.sw_version = READ4(sc, FATMO_FIRMWARE_RELEASE); if_printf(sc->ifp, "ESI=%02x:%02x:%02x:%02x:%02x:%02x " "serial=%u hw=0x%x sw=0x%x\n", IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2], IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5], IFP2IFATM(sc->ifp)->mib.serial, IFP2IFATM(sc->ifp)->mib.hw_version, IFP2IFATM(sc->ifp)->mib.sw_version); return (0); } /* * This is the callback function for bus_dmamap_load. We assume, that we * have a 32-bit bus and so have always one segment. */ static void dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *ptr = (bus_addr_t *)arg; if (error != 0) { printf("%s: error=%d\n", __func__, error); return; } KASSERT(nsegs == 1, ("too many DMA segments")); KASSERT(segs[0].ds_addr <= 0xffffffff, ("DMA address too large %lx", (u_long)segs[0].ds_addr)); *ptr = segs[0].ds_addr; } /* * Allocate a chunk of DMA-able memory and map it. */ static int alloc_dma_memory(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(sc->parent_dmat, mem->align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } error = bus_dmamem_alloc(mem->dmat, &mem->mem, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA memory: " "%d\n", nm, error); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMA %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); return (0); } #ifdef TEST_DMA_SYNC static int alloc_dma_memoryX(struct fatm_softc *sc, const char *nm, struct fatm_mem *mem) { int error; mem->mem = NULL; if (bus_dma_tag_create(NULL, mem->align, 0, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, NULL, NULL, mem->size, 1, mem->size, BUS_DMA_ALLOCNOW, NULL, NULL, &mem->dmat)) { if_printf(sc->ifp, "could not allocate %s DMA tag\n", nm); return (ENOMEM); } mem->mem = contigmalloc(mem->size, M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR_32BIT, mem->align, 0); error = bus_dmamap_create(mem->dmat, 0, &mem->map); if (error) { if_printf(sc->ifp, "could not allocate %s DMA map: " "%d\n", nm, error); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } error = bus_dmamap_load(mem->dmat, mem->map, mem->mem, mem->size, dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "could not load %s DMA memory: " "%d\n", nm, error); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; return (error); } DBG(sc, DMA, ("DMAX %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align)); printf("DMAX: %s V/P/S/Z %p/%lx/%x/%x", nm, mem->mem, (u_long)mem->paddr, mem->size, mem->align); return (0); } #endif /* TEST_DMA_SYNC */ /* * Destroy all resources of an dma-able memory chunk */ static void destroy_dma_memory(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamem_free(mem->dmat, mem->mem, mem->map); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #ifdef TEST_DMA_SYNC static void destroy_dma_memoryX(struct fatm_mem *mem) { if (mem->mem != NULL) { bus_dmamap_unload(mem->dmat, mem->map); bus_dmamap_destroy(mem->dmat, mem->map); contigfree(mem->mem, mem->size, M_DEVBUF); bus_dma_tag_destroy(mem->dmat); mem->mem = NULL; } } #endif /* TEST_DMA_SYNC */ /* * Try to supply buffers to the card if there are free entries in the queues */ static void fatm_supply_small_buffers(struct fatm_softc *sc) { int nblocks, nbufs; struct supqueue *q; struct rbd *bd; int i, j, error, cnt; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, SMALL_POOL_SIZE); nbufs -= sc->small_cnt; - nblocks = (nbufs + SMALL_SUPPLY_BLKSIZE - 1) / SMALL_SUPPLY_BLKSIZE; + nblocks = howmany(nbufs, SMALL_SUPPLY_BLKSIZE); for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < SMALL_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } M_ALIGN(m, SMALL_BUFFER_LEN); error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, SMALL_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < SMALL_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->s1q_mem, bd, sizeof(struct rbd) * SMALL_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->small_cnt += SMALL_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->s1queue.head, SMALL_SUPPLY_QLEN); } } /* * Try to supply buffers to the card if there are free entries in the queues * We assume that all buffers are within the address space accessible by the * card (32-bit), so we don't need bounce buffers. */ static void fatm_supply_large_buffers(struct fatm_softc *sc) { int nbufs, nblocks, cnt; struct supqueue *q; struct rbd *bd; int i, j, error; struct mbuf *m; struct rbuf *rb; bus_addr_t phys; nbufs = max(4 * sc->open_vccs, 32); nbufs = min(nbufs, LARGE_POOL_SIZE); nbufs -= sc->large_cnt; - nblocks = (nbufs + LARGE_SUPPLY_BLKSIZE - 1) / LARGE_SUPPLY_BLKSIZE; + nblocks = howmany(nbufs, LARGE_SUPPLY_BLKSIZE); for (cnt = 0; cnt < nblocks; cnt++) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) break; bd = (struct rbd *)q->q.ioblk; for (i = 0; i < LARGE_SUPPLY_BLKSIZE; i++) { if ((rb = LIST_FIRST(&sc->rbuf_free)) == NULL) { if_printf(sc->ifp, "out of rbufs\n"); break; } if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) { LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } /* No MEXT_ALIGN */ m->m_data += MCLBYTES - LARGE_BUFFER_LEN; error = bus_dmamap_load(sc->rbuf_tag, rb->map, m->m_data, LARGE_BUFFER_LEN, dmaload_helper, &phys, BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "dmamap_load mbuf failed %d", error); m_freem(m); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); break; } bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_PREREAD); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_used, rb, link); rb->m = m; bd[i].handle = rb - sc->rbufs; H_SETDESC(bd[i].buffer, phys); } if (i < LARGE_SUPPLY_BLKSIZE) { for (j = 0; j < i; j++) { rb = sc->rbufs + bd[j].handle; bus_dmamap_unload(sc->rbuf_tag, rb->map); m_free(rb->m); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } break; } H_SYNCQ_PREWRITE(&sc->l1q_mem, bd, sizeof(struct rbd) * LARGE_SUPPLY_BLKSIZE); H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); sc->large_cnt += LARGE_SUPPLY_BLKSIZE; NEXT_QUEUE_ENTRY(sc->l1queue.head, LARGE_SUPPLY_QLEN); } } /* * Actually start the card. The lock must be held here. * Reset, load the firmware, start it, initializes queues, read the PROM * and supply receive buffers to the card. */ static void fatm_init_locked(struct fatm_softc *sc) { struct rxqueue *q; int i, c, error; uint32_t start; DBG(sc, INIT, ("initialize")); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) fatm_stop(sc); /* * Hard reset the board */ if (fatm_reset(sc)) return; start = firmware_load(sc); if (fatm_start_firmware(sc, start) || fatm_init_cmd(sc) || fatm_getprom(sc)) { fatm_reset(sc); return; } /* * Handle media */ c = READ4(sc, FATMO_MEDIA_TYPE); switch (c) { case FORE_MT_TAXI_100: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_100; IFP2IFATM(sc->ifp)->mib.pcr = 227273; break; case FORE_MT_TAXI_140: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_TAXI_140; IFP2IFATM(sc->ifp)->mib.pcr = 318181; break; case FORE_MT_UTP_SONET: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_MM_OC3_ST: case FORE_MT_MM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; case FORE_MT_SM_OC3_ST: case FORE_MT_SM_OC3_SC: IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; default: log(LOG_ERR, "fatm: unknown media type %d\n", c); IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->mib.pcr = 353207; break; } sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr; utopia_init_media(&sc->utopia); /* * Initialize the RBDs */ for (i = 0; i < FATM_RX_QLEN; i++) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, i); WRITE4(sc, q->q.card + 0, q->q.card_ioblk); } BARRIER_W(sc); /* * Supply buffers to the card */ fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); /* * Now set flags, that we are ready */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* * Start the watchdog timer */ callout_reset(&sc->watchdog_timer, hz * 5, fatm_watchdog, sc); /* start SUNI */ utopia_start(&sc->utopia); ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp), sc->utopia.carrier == UTP_CARR_OK); /* start all channels */ for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { sc->vccs[i]->vflags |= FATM_VCC_REOPEN; error = fatm_load_vc(sc, sc->vccs[i]); if (error != 0) { if_printf(sc->ifp, "reopening %u " "failed: %d\n", i, error); sc->vccs[i]->vflags &= ~FATM_VCC_REOPEN; } } DBG(sc, INIT, ("done")); } /* * This is the exported as initialisation function. */ static void fatm_init(void *p) { struct fatm_softc *sc = p; FATM_LOCK(sc); fatm_init_locked(sc); FATM_UNLOCK(sc); } /************************************************************/ /* * The INTERRUPT handling */ /* * Check the command queue. If a command was completed, call the completion * function for that command. */ static void fatm_intr_drain_cmd(struct fatm_softc *sc) { struct cmdqueue *q; int stat; /* * Drain command queue */ for (;;) { q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; (*q->cb)(sc, q); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->cmdqueue.tail, FATM_CMD_QLEN); } } /* * Drain the small buffer supply queue. */ static void fatm_intr_drain_small_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->s1queue, struct supqueue, sc->s1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s: status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->s1queue.tail, SMALL_SUPPLY_QLEN); } } /* * Drain the large buffer supply queue. */ static void fatm_intr_drain_large_buffers(struct fatm_softc *sc) { struct supqueue *q; int stat; for (;;) { q = GET_QUEUE(sc->l1queue, struct supqueue, sc->l1queue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; if (stat & FATM_STAT_ERROR) log(LOG_ERR, "%s status %x\n", __func__, stat); H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); NEXT_QUEUE_ENTRY(sc->l1queue.tail, LARGE_SUPPLY_QLEN); } } /* * Check the receive queue. Send any received PDU up the protocol stack * (except when there was an error or the VCI appears to be closed. In this * case discard the PDU). */ static void fatm_intr_drain_rx(struct fatm_softc *sc) { struct rxqueue *q; int stat, mlen; u_int i; uint32_t h; struct mbuf *last, *m0; struct rpd *rpd; struct rbuf *rb; u_int vci, vpi, pt; struct atm_pseudohdr aph; struct ifnet *ifp; struct card_vcc *vc; for (;;) { q = GET_QUEUE(sc->rxqueue, struct rxqueue, sc->rxqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if ((stat & FATM_STAT_COMPLETE) == 0) break; rpd = (struct rpd *)q->q.ioblk; H_SYNCQ_POSTREAD(&sc->rxq_mem, rpd, RPD_SIZE); rpd->nseg = le32toh(rpd->nseg); mlen = 0; m0 = last = 0; for (i = 0; i < rpd->nseg; i++) { rb = sc->rbufs + rpd->segment[i].handle; if (m0 == NULL) { m0 = last = rb->m; } else { last->m_next = rb->m; last = rb->m; } last->m_next = NULL; if (last->m_flags & M_EXT) sc->large_cnt--; else sc->small_cnt--; bus_dmamap_sync(sc->rbuf_tag, rb->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rbuf_tag, rb->map); rb->m = NULL; LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); last->m_len = le32toh(rpd->segment[i].length); mlen += last->m_len; } m0->m_pkthdr.len = mlen; m0->m_pkthdr.rcvif = sc->ifp; h = le32toh(rpd->atm_header); vpi = (h >> 20) & 0xff; vci = (h >> 4 ) & 0xffff; pt = (h >> 1 ) & 0x7; /* * Locate the VCC this packet belongs to */ if (!VC_OK(sc, vpi, vci)) vc = NULL; else if ((vc = sc->vccs[vci]) == NULL || !(sc->vccs[vci]->vflags & FATM_VCC_OPEN)) { sc->istats.rx_closed++; vc = NULL; } DBG(sc, RCV, ("RCV: vc=%u.%u pt=%u mlen=%d %s", vpi, vci, pt, mlen, vc == NULL ? "dropped" : "")); if (vc == NULL) { m_freem(m0); } else { #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m0); #endif ATM_PH_FLAGS(&aph) = vc->param.flags; ATM_PH_VPI(&aph) = vpi; ATM_PH_SETVCI(&aph, vci); ifp = sc->ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); vc->ipackets++; vc->ibytes += m0->m_pkthdr.len; atm_input(ifp, &aph, m0, vc->rxhand); } H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card, q->q.card_ioblk); BARRIER_W(sc); NEXT_QUEUE_ENTRY(sc->rxqueue.tail, FATM_RX_QLEN); } } /* * Check the transmit queue. Free the mbuf chains that we were transmitting. */ static void fatm_intr_drain_tx(struct fatm_softc *sc) { struct txqueue *q; int stat; /* * Drain tx queue */ for (;;) { q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.tail); H_SYNCSTAT_POSTREAD(sc, q->q.statp); stat = H_GETSTAT(q->q.statp); if (stat != FATM_STAT_COMPLETE && stat != (FATM_STAT_COMPLETE | FATM_STAT_ERROR) && stat != FATM_STAT_ERROR) break; H_SETSTAT(q->q.statp, FATM_STAT_FREE); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, q->map); m_freem(q->m); q->m = NULL; sc->txcnt--; NEXT_QUEUE_ENTRY(sc->txqueue.tail, FATM_TX_QLEN); } } /* * Interrupt handler */ static void fatm_intr(void *p) { struct fatm_softc *sc = (struct fatm_softc *)p; FATM_LOCK(sc); if (!READ4(sc, FATMO_PSR)) { FATM_UNLOCK(sc); return; } WRITE4(sc, FATMO_HCR, FATM_HCR_CLRIRQ); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { FATM_UNLOCK(sc); return; } fatm_intr_drain_cmd(sc); fatm_intr_drain_rx(sc); fatm_intr_drain_tx(sc); fatm_intr_drain_small_buffers(sc); fatm_intr_drain_large_buffers(sc); fatm_supply_small_buffers(sc); fatm_supply_large_buffers(sc); FATM_UNLOCK(sc); if (sc->retry_tx && _IF_QLEN(&sc->ifp->if_snd)) (*sc->ifp->if_start)(sc->ifp); } /* * Get device statistics. This must be called with the softc locked. * We use a preallocated buffer, so we need to protect this buffer. * We do this by using a condition variable and a flag. If the flag is set * the buffer is in use by one thread (one thread is executing a GETSTAT * card command). In this case all other threads that are trying to get * statistics block on that condition variable. When the thread finishes * using the buffer it resets the flag and signals the condition variable. This * will wakeup the next thread that is waiting for the buffer. If the interface * is stopped the stopping function will broadcast the cv. All threads will * find that the interface has been stopped and return. * * Aquiring of the buffer is done by the fatm_getstat() function. The freeing * must be done by the caller when he has finished using the buffer. */ static void fatm_getstat_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(&sc->sadi_mem); } static int fatm_getstat(struct fatm_softc *sc) { int error; struct cmdqueue *q; /* * Wait until either the interface is stopped or we can get the * statistics buffer */ for (;;) { if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) return (EIO); if (!(sc->flags & FATM_STAT_INUSE)) break; cv_wait(&sc->cv_stat, &sc->mtx); } sc->flags |= FATM_STAT_INUSE; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (EIO); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = fatm_getstat_complete; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_PREREAD); WRITE4(sc, q->q.card + FATMOC_GSTAT_BUF, sc->sadi_mem.paddr); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, FATM_OP_REQUEST_STATS | FATM_OP_INTERRUPT_SEL); BARRIER_W(sc); /* * Wait for the command to complete */ error = msleep(&sc->sadi_mem, &sc->mtx, PZERO | PCATCH, "fatm_stat", hz); switch (error) { case EWOULDBLOCK: error = EIO; break; case ERESTART: error = EINTR; break; case 0: bus_dmamap_sync(sc->sadi_mem.dmat, sc->sadi_mem.map, BUS_DMASYNC_POSTREAD); error = q->error; break; } /* * Swap statistics */ if (q->error == 0) { u_int i; uint32_t *p = (uint32_t *)sc->sadi_mem.mem; for (i = 0; i < sizeof(struct fatm_stats) / sizeof(uint32_t); i++, p++) *p = be32toh(*p); } return (error); } /* * Create a copy of a single mbuf. It can have either internal or * external data, it may have a packet header. External data is really * copied, so the new buffer is writeable. */ static struct mbuf * copy_mbuf(struct mbuf *m) { struct mbuf *new; MGET(new, M_NOWAIT, MT_DATA); if (new == NULL) return (NULL); if (m->m_flags & M_PKTHDR) { M_MOVE_PKTHDR(new, m); if (m->m_len > MHLEN) MCLGET(new, M_WAITOK); } else { if (m->m_len > MLEN) MCLGET(new, M_WAITOK); } bcopy(m->m_data, new->m_data, m->m_len); new->m_len = m->m_len; new->m_flags &= ~M_RDONLY; return (new); } /* * All segments must have a four byte aligned buffer address and a four * byte aligned length. Step through an mbuf chain and check these conditions. * If the buffer address is not aligned and this is a normal mbuf, move * the data down. Else make a copy of the mbuf with aligned data. * If the buffer length is not aligned steel data from the next mbuf. * We don't need to check whether this has more than one external reference, * because steeling data doesn't change the external cluster. * If the last mbuf is not aligned, fill with zeroes. * * Return packet length (well we should have this in the packet header), * but be careful not to count the zero fill at the end. * * If fixing fails free the chain and zero the pointer. * * We assume, that aligning the virtual address also aligns the mapped bus * address. */ static u_int fatm_fix_chain(struct fatm_softc *sc, struct mbuf **mp) { struct mbuf *m = *mp, *prev = NULL, *next, *new; u_int mlen = 0, fill = 0; int first, off; u_char *d, *cp; do { next = m->m_next; if ((uintptr_t)mtod(m, void *) % 4 != 0 || (m->m_len % 4 != 0 && next)) { /* * Needs fixing */ first = (m == *mp); d = mtod(m, u_char *); if ((off = (uintptr_t)(void *)d % 4) != 0) { if (M_WRITABLE(m)) { sc->istats.fix_addr_copy++; bcopy(d, d - off, m->m_len); m->m_data = (caddr_t)(d - off); } else { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_addr_noext++; goto fail; } sc->istats.fix_addr_ext++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } } if ((off = m->m_len % 4) != 0) { if (!M_WRITABLE(m)) { if ((new = copy_mbuf(m)) == NULL) { sc->istats.fix_len_noext++; goto fail; } sc->istats.fix_len_copy++; if (prev) prev->m_next = new; new->m_next = next; m_free(m); m = new; } else sc->istats.fix_len++; d = mtod(m, u_char *) + m->m_len; off = 4 - off; while (off) { if (next == NULL) { *d++ = 0; fill++; } else if (next->m_len == 0) { sc->istats.fix_empty++; next = m_free(next); continue; } else { cp = mtod(next, u_char *); *d++ = *cp++; next->m_len--; next->m_data = (caddr_t)cp; } off--; m->m_len++; } } if (first) *mp = m; } mlen += m->m_len; prev = m; } while ((m = next) != NULL); return (mlen - fill); fail: m_freem(*mp); *mp = NULL; return (0); } /* * The helper function is used to load the computed physical addresses * into the transmit descriptor. */ static void fatm_tpd_load(void *varg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { struct tpd *tpd = varg; if (error) return; KASSERT(nsegs <= TPD_EXTENSIONS + TXD_FIXED, ("too many segments")); tpd->spec = 0; while (nsegs--) { H_SETDESC(tpd->segment[tpd->spec].buffer, segs->ds_addr); H_SETDESC(tpd->segment[tpd->spec].length, segs->ds_len); tpd->spec++; segs++; } } /* * Start output. * * Note, that we update the internal statistics without the lock here. */ static int fatm_tx(struct fatm_softc *sc, struct mbuf *m, struct card_vcc *vc, u_int mlen) { struct txqueue *q; u_int nblks; int error, aal, nsegs; struct tpd *tpd; /* * Get a queue element. * If there isn't one - try to drain the transmit queue * We used to sleep here if that doesn't help, but we * should not sleep here, because we are called with locks. */ q = GET_QUEUE(sc->txqueue, struct txqueue, sc->txqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { fatm_intr_drain_tx(sc); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) != FATM_STAT_FREE) { if (sc->retry_tx) { sc->istats.tx_retry++; IF_PREPEND(&sc->ifp->if_snd, m); return (1); } sc->istats.tx_queue_full++; m_freem(m); return (0); } sc->istats.tx_queue_almost_full++; } tpd = q->q.ioblk; m->m_data += sizeof(struct atm_pseudohdr); m->m_len -= sizeof(struct atm_pseudohdr); #ifdef ENABLE_BPF if (!(vc->param.flags & ATMIO_FLAG_NG) && vc->param.aal == ATMIO_AAL_5 && (vc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m); #endif /* map the mbuf */ error = bus_dmamap_load_mbuf(sc->tx_tag, q->map, m, fatm_tpd_load, tpd, BUS_DMA_NOWAIT); if(error) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); if_printf(sc->ifp, "mbuf loaded error=%d\n", error); m_freem(m); return (0); } nsegs = tpd->spec; bus_dmamap_sync(sc->tx_tag, q->map, BUS_DMASYNC_PREWRITE); /* * OK. Now go and do it. */ aal = (vc->param.aal == ATMIO_AAL_5) ? 5 : 0; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); q->m = m; /* * If the transmit queue is almost full, schedule a * transmit interrupt so that transmit descriptors can * be recycled. */ H_SETDESC(tpd->spec, TDX_MKSPEC((sc->txcnt >= (4 * FATM_TX_QLEN) / 5), aal, nsegs, mlen)); H_SETDESC(tpd->atm_header, TDX_MKHDR(vc->param.vpi, vc->param.vci, 0, 0)); if (vc->param.traffic == ATMIO_TRAFFIC_UBR) H_SETDESC(tpd->stream, 0); else { u_int i; for (i = 0; i < RATE_TABLE_SIZE; i++) if (rate_table[i].cell_rate < vc->param.tparam.pcr) break; if (i > 0) i--; H_SETDESC(tpd->stream, rate_table[i].ratio); } H_SYNCQ_PREWRITE(&sc->txq_mem, tpd, TPD_SIZE); nblks = TDX_SEGS2BLKS(nsegs); DBG(sc, XMIT, ("XMIT: mlen=%d spec=0x%x nsegs=%d blocks=%d", mlen, le32toh(tpd->spec), nsegs, nblks)); WRITE4(sc, q->q.card + 0, q->q.card_ioblk | nblks); BARRIER_W(sc); sc->txcnt++; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); vc->obytes += m->m_pkthdr.len; vc->opackets++; NEXT_QUEUE_ENTRY(sc->txqueue.head, FATM_TX_QLEN); return (0); } static void fatm_start(struct ifnet *ifp) { struct atm_pseudohdr aph; struct fatm_softc *sc; struct mbuf *m; u_int mlen, vpi, vci; struct card_vcc *vc; sc = ifp->if_softc; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* * Loop through the mbuf chain and compute the total length * of the packet. Check that all data pointer are * 4 byte aligned. If they are not, call fatm_mfix to * fix that problem. This comes more or less from the * en driver. */ mlen = fatm_fix_chain(sc, &m); if (m == NULL) continue; if (m->m_len < sizeof(struct atm_pseudohdr) && (m = m_pullup(m, sizeof(struct atm_pseudohdr))) == NULL) continue; aph = *mtod(m, struct atm_pseudohdr *); mlen -= sizeof(struct atm_pseudohdr); if (mlen == 0) { m_freem(m); continue; } if (mlen > FATM_MAXPDU) { sc->istats.tx_pdu2big++; m_freem(m); continue; } vci = ATM_PH_VCI(&aph); vpi = ATM_PH_VPI(&aph); /* * From here on we need the softc */ FATM_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { FATM_UNLOCK(sc); m_freem(m); break; } if (!VC_OK(sc, vpi, vci) || (vc = sc->vccs[vci]) == NULL || !(vc->vflags & FATM_VCC_OPEN)) { FATM_UNLOCK(sc); m_freem(m); continue; } if (fatm_tx(sc, m, vc, mlen)) { FATM_UNLOCK(sc); break; } FATM_UNLOCK(sc); } } /* * VCC managment * * This may seem complicated. The reason for this is, that we need an * asynchronuous open/close for the NATM VCCs because our ioctl handler * is called with the radix node head of the routing table locked. Therefor * we cannot sleep there and wait for the open/close to succeed. For this * reason we just initiate the operation from the ioctl. */ /* * Command the card to open/close a VC. * Return the queue entry for waiting if we are succesful. */ static struct cmdqueue * fatm_start_vcc(struct fatm_softc *sc, u_int vpi, u_int vci, uint32_t cmd, u_int mtu, void (*func)(struct fatm_softc *, struct cmdqueue *)) { struct cmdqueue *q; q = GET_QUEUE(sc->cmdqueue, struct cmdqueue, sc->cmdqueue.head); H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (!(H_GETSTAT(q->q.statp) & FATM_STAT_FREE)) { sc->istats.cmd_queue_full++; return (NULL); } NEXT_QUEUE_ENTRY(sc->cmdqueue.head, FATM_CMD_QLEN); q->error = 0; q->cb = func; H_SETSTAT(q->q.statp, FATM_STAT_PENDING); H_SYNCSTAT_PREWRITE(sc, q->q.statp); WRITE4(sc, q->q.card + FATMOC_ACTIN_VPVC, MKVPVC(vpi, vci)); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_ACTIN_MTU, mtu); BARRIER_W(sc); WRITE4(sc, q->q.card + FATMOC_OP, cmd); BARRIER_W(sc); return (q); } /* * The VC has been opened/closed and somebody has been waiting for this. * Wake him up. */ static void fatm_cmd_complete(struct fatm_softc *sc, struct cmdqueue *q) { H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; q->error = EIO; } wakeup(q); } /* * Open complete */ static void fatm_open_finish(struct fatm_softc *sc, struct card_vcc *vc) { vc->vflags &= ~FATM_VCC_TRY_OPEN; vc->vflags |= FATM_VCC_OPEN; if (vc->vflags & FATM_VCC_REOPEN) { vc->vflags &= ~FATM_VCC_REOPEN; return; } /* inform management if this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 1); } /* * The VC that we have tried to open asynchronuosly has been opened. */ static void fatm_open_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; sc->vccs[vci] = NULL; uma_zfree(sc->vcc_zone, vc); if_printf(sc->ifp, "opening VCI %u failed\n", vci); return; } fatm_open_finish(sc, vc); } /* * Wait on the queue entry until the VCC is opened/closed. */ static int fatm_waitvcc(struct fatm_softc *sc, struct cmdqueue *q) { int error; /* * Wait for the command to complete */ error = msleep(q, &sc->mtx, PZERO | PCATCH, "fatm_vci", hz); if (error != 0) return (error); return (q->error); } /* * Start to open a VCC. This just initiates the operation. */ static int fatm_open_vcc(struct fatm_softc *sc, struct atmio_openvcc *op) { int error; struct card_vcc *vc; /* * Check parameters */ if ((op->param.flags & ATMIO_FLAG_NOTX) && (op->param.flags & ATMIO_FLAG_NORX)) return (EINVAL); if (!VC_OK(sc, op->param.vpi, op->param.vci)) return (EINVAL); if (op->param.aal != ATMIO_AAL_0 && op->param.aal != ATMIO_AAL_5) return (EINVAL); vc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); if (vc == NULL) return (ENOMEM); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = EIO; goto done; } if (sc->vccs[op->param.vci] != NULL) { error = EBUSY; goto done; } vc->param = op->param; vc->rxhand = op->rxhand; switch (op->param.traffic) { case ATMIO_TRAFFIC_UBR: break; case ATMIO_TRAFFIC_CBR: if (op->param.tparam.pcr == 0 || op->param.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) { error = EINVAL; goto done; } break; default: error = EINVAL; goto done; } vc->ibytes = vc->obytes = 0; vc->ipackets = vc->opackets = 0; vc->vflags = FATM_VCC_TRY_OPEN; sc->vccs[op->param.vci] = vc; sc->open_vccs++; error = fatm_load_vc(sc, vc); if (error != 0) { sc->vccs[op->param.vci] = NULL; sc->open_vccs--; goto done; } /* don't free below */ vc = NULL; done: FATM_UNLOCK(sc); if (vc != NULL) uma_zfree(sc->vcc_zone, vc); return (error); } /* * Try to initialize the given VC */ static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc) { uint32_t cmd; struct cmdqueue *q; int error; /* Command and buffer strategy */ cmd = FATM_OP_ACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL | (0 << 16); if (vc->param.aal == ATMIO_AAL_0) cmd |= (0 << 8); else cmd |= (5 << 8); q = fatm_start_vcc(sc, vc->param.vpi, vc->param.vci, cmd, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_open_complete : fatm_cmd_complete); if (q == NULL) return (EIO); if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) return (error); fatm_open_finish(sc, vc); } return (0); } /* * Finish close */ static void fatm_close_finish(struct fatm_softc *sc, struct card_vcc *vc) { /* inform management of this is not an NG * VCC or it's an NG PVC. */ if (!(vc->param.flags & ATMIO_FLAG_NG) || (vc->param.flags & ATMIO_FLAG_PVC)) ATMEV_SEND_VCC_CHANGED(IFP2IFATM(sc->ifp), 0, vc->param.vci, 0); sc->vccs[vc->param.vci] = NULL; sc->open_vccs--; uma_zfree(sc->vcc_zone, vc); } /* * The VC has been closed. */ static void fatm_close_complete(struct fatm_softc *sc, struct cmdqueue *q) { u_int vci; struct card_vcc *vc; vci = GETVCI(READ4(sc, q->q.card + FATMOC_ACTIN_VPVC)); vc = sc->vccs[vci]; H_SYNCSTAT_POSTREAD(sc, q->q.statp); if (H_GETSTAT(q->q.statp) & FATM_STAT_ERROR) { sc->istats.get_stat_errors++; /* keep the VCC in that state */ if_printf(sc->ifp, "closing VCI %u failed\n", vci); return; } fatm_close_finish(sc, vc); } /* * Initiate closing a VCC */ static int fatm_close_vcc(struct fatm_softc *sc, struct atmio_closevcc *cl) { int error; struct cmdqueue *q; struct card_vcc *vc; if (!VC_OK(sc, cl->vpi, cl->vci)) return (EINVAL); error = 0; FATM_LOCK(sc); if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) { error = EIO; goto done; } vc = sc->vccs[cl->vci]; if (vc == NULL || !(vc->vflags & (FATM_VCC_OPEN | FATM_VCC_TRY_OPEN))) { error = ENOENT; goto done; } q = fatm_start_vcc(sc, cl->vpi, cl->vci, FATM_OP_DEACTIVATE_VCIN | FATM_OP_INTERRUPT_SEL, 1, (vc->param.flags & ATMIO_FLAG_ASYNC) ? fatm_close_complete : fatm_cmd_complete); if (q == NULL) { error = EIO; goto done; } vc->vflags &= ~(FATM_VCC_OPEN | FATM_VCC_TRY_OPEN); vc->vflags |= FATM_VCC_TRY_CLOSE; if (!(vc->param.flags & ATMIO_FLAG_ASYNC)) { error = fatm_waitvcc(sc, q); if (error != 0) goto done; fatm_close_finish(sc, vc); } done: FATM_UNLOCK(sc); return (error); } /* * IOCTL handler */ static int fatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t arg) { int error; struct fatm_softc *sc = ifp->if_softc; struct ifaddr *ifa = (struct ifaddr *)arg; struct ifreq *ifr = (struct ifreq *)arg; struct atmio_closevcc *cl = (struct atmio_closevcc *)arg; struct atmio_openvcc *op = (struct atmio_openvcc *)arg; struct atmio_vcctable *vtab; error = 0; switch (cmd) { case SIOCATMOPENVCC: /* kernel internal use */ error = fatm_open_vcc(sc, op); break; case SIOCATMCLOSEVCC: /* kernel internal use */ error = fatm_close_vcc(sc, cl); break; case SIOCSIFADDR: FATM_LOCK(sc); ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) fatm_init_locked(sc); switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: case AF_INET6: ifa->ifa_rtrequest = atm_rtrequest; break; #endif default: break; } FATM_UNLOCK(sc); break; case SIOCSIFFLAGS: FATM_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { fatm_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { fatm_stop(sc); } } FATM_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); else error = EINVAL; break; case SIOCATMGVCCS: /* return vcc table */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 1); error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + vtab->count * sizeof(vtab->vccs[0])); free(vtab, M_DEVBUF); break; case SIOCATMGETVCCS: /* internal netgraph use */ vtab = atm_getvccs((struct atmio_vcc **)sc->vccs, FORE_MAX_VCC + 1, sc->open_vccs, &sc->mtx, 0); if (vtab == NULL) { error = ENOMEM; break; } *(void **)arg = vtab; break; default: DBG(sc, IOCTL, ("+++ cmd=%08lx arg=%p", cmd, arg)); error = EINVAL; break; } return (error); } /* * Detach from the interface and free all resources allocated during * initialisation and later. */ static int fatm_detach(device_t dev) { u_int i; struct rbuf *rb; struct fatm_softc *sc; struct txqueue *tx; sc = device_get_softc(dev); if (device_is_alive(dev)) { FATM_LOCK(sc); fatm_stop(sc); utopia_detach(&sc->utopia); FATM_UNLOCK(sc); atm_ifdetach(sc->ifp); /* XXX race */ } callout_drain(&sc->watchdog_timer); if (sc->ih != NULL) bus_teardown_intr(dev, sc->irqres, sc->ih); while ((rb = LIST_FIRST(&sc->rbuf_used)) != NULL) { if_printf(sc->ifp, "rbuf %p still in use!\n", rb); bus_dmamap_unload(sc->rbuf_tag, rb->map); m_freem(rb->m); LIST_REMOVE(rb, link); LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } if (sc->txqueue.chunk != NULL) { for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); bus_dmamap_destroy(sc->tx_tag, tx->map); } } while ((rb = LIST_FIRST(&sc->rbuf_free)) != NULL) { bus_dmamap_destroy(sc->rbuf_tag, rb->map); LIST_REMOVE(rb, link); } if (sc->rbufs != NULL) free(sc->rbufs, M_DEVBUF); if (sc->vccs != NULL) { for (i = 0; i < FORE_MAX_VCC + 1; i++) if (sc->vccs[i] != NULL) { uma_zfree(sc->vcc_zone, sc->vccs[i]); sc->vccs[i] = NULL; } free(sc->vccs, M_DEVBUF); } if (sc->vcc_zone != NULL) uma_zdestroy(sc->vcc_zone); if (sc->l1queue.chunk != NULL) free(sc->l1queue.chunk, M_DEVBUF); if (sc->s1queue.chunk != NULL) free(sc->s1queue.chunk, M_DEVBUF); if (sc->rxqueue.chunk != NULL) free(sc->rxqueue.chunk, M_DEVBUF); if (sc->txqueue.chunk != NULL) free(sc->txqueue.chunk, M_DEVBUF); if (sc->cmdqueue.chunk != NULL) free(sc->cmdqueue.chunk, M_DEVBUF); destroy_dma_memory(&sc->reg_mem); destroy_dma_memory(&sc->sadi_mem); destroy_dma_memory(&sc->prom_mem); #ifdef TEST_DMA_SYNC destroy_dma_memoryX(&sc->s1q_mem); destroy_dma_memoryX(&sc->l1q_mem); destroy_dma_memoryX(&sc->rxq_mem); destroy_dma_memoryX(&sc->txq_mem); destroy_dma_memoryX(&sc->stat_mem); #endif if (sc->tx_tag != NULL) if (bus_dma_tag_destroy(sc->tx_tag)) printf("tx DMA tag busy!\n"); if (sc->rbuf_tag != NULL) if (bus_dma_tag_destroy(sc->rbuf_tag)) printf("rbuf DMA tag busy!\n"); if (sc->parent_dmat != NULL) if (bus_dma_tag_destroy(sc->parent_dmat)) printf("parent DMA tag busy!\n"); if (sc->irqres != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irqres); if (sc->memres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->memres); (void)sysctl_ctx_free(&sc->sysctl_ctx); cv_destroy(&sc->cv_stat); cv_destroy(&sc->cv_regs); mtx_destroy(&sc->mtx); if_free(sc->ifp); return (0); } /* * Sysctl handler */ static int fatm_sysctl_istats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; u_long *ret; int error; ret = malloc(sizeof(sc->istats), M_TEMP, M_WAITOK); FATM_LOCK(sc); bcopy(&sc->istats, ret, sizeof(sc->istats)); FATM_UNLOCK(sc); error = SYSCTL_OUT(req, ret, sizeof(sc->istats)); free(ret, M_TEMP); return (error); } /* * Sysctl handler for card statistics * This is disable because it destroys the PHY statistics. */ static int fatm_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct fatm_softc *sc = arg1; int error; const struct fatm_stats *s; u_long *ret; u_int i; ret = malloc(sizeof(u_long) * FATM_NSTATS, M_TEMP, M_WAITOK); FATM_LOCK(sc); if ((error = fatm_getstat(sc)) == 0) { s = sc->sadi_mem.mem; i = 0; ret[i++] = s->phy_4b5b.crc_header_errors; ret[i++] = s->phy_4b5b.framing_errors; ret[i++] = s->phy_oc3.section_bip8_errors; ret[i++] = s->phy_oc3.path_bip8_errors; ret[i++] = s->phy_oc3.line_bip24_errors; ret[i++] = s->phy_oc3.line_febe_errors; ret[i++] = s->phy_oc3.path_febe_errors; ret[i++] = s->phy_oc3.corr_hcs_errors; ret[i++] = s->phy_oc3.ucorr_hcs_errors; ret[i++] = s->atm.cells_transmitted; ret[i++] = s->atm.cells_received; ret[i++] = s->atm.vpi_bad_range; ret[i++] = s->atm.vpi_no_conn; ret[i++] = s->atm.vci_bad_range; ret[i++] = s->atm.vci_no_conn; ret[i++] = s->aal0.cells_transmitted; ret[i++] = s->aal0.cells_received; ret[i++] = s->aal0.cells_dropped; ret[i++] = s->aal4.cells_transmitted; ret[i++] = s->aal4.cells_received; ret[i++] = s->aal4.cells_crc_errors; ret[i++] = s->aal4.cels_protocol_errors; ret[i++] = s->aal4.cells_dropped; ret[i++] = s->aal4.cspdus_transmitted; ret[i++] = s->aal4.cspdus_received; ret[i++] = s->aal4.cspdus_protocol_errors; ret[i++] = s->aal4.cspdus_dropped; ret[i++] = s->aal5.cells_transmitted; ret[i++] = s->aal5.cells_received; ret[i++] = s->aal5.congestion_experienced; ret[i++] = s->aal5.cells_dropped; ret[i++] = s->aal5.cspdus_transmitted; ret[i++] = s->aal5.cspdus_received; ret[i++] = s->aal5.cspdus_crc_errors; ret[i++] = s->aal5.cspdus_protocol_errors; ret[i++] = s->aal5.cspdus_dropped; ret[i++] = s->aux.small_b1_failed; ret[i++] = s->aux.large_b1_failed; ret[i++] = s->aux.small_b2_failed; ret[i++] = s->aux.large_b2_failed; ret[i++] = s->aux.rpd_alloc_failed; ret[i++] = s->aux.receive_carrier; } /* declare the buffer free */ sc->flags &= ~FATM_STAT_INUSE; cv_signal(&sc->cv_stat); FATM_UNLOCK(sc); if (error == 0) error = SYSCTL_OUT(req, ret, sizeof(u_long) * FATM_NSTATS); free(ret, M_TEMP); return (error); } #define MAXDMASEGS 32 /* maximum number of receive descriptors */ /* * Attach to the device. * * We assume, that there is a global lock (Giant in this case) that protects * multiple threads from entering this function. This makes sense, doesn't it? */ static int fatm_attach(device_t dev) { struct ifnet *ifp; struct fatm_softc *sc; int unit; uint16_t cfg; int error = 0; struct rbuf *rb; u_int i; struct txqueue *tx; sc = device_get_softc(dev); unit = device_get_unit(dev); ifp = sc->ifp = if_alloc(IFT_ATM); if (ifp == NULL) { error = ENOSPC; goto fail; } IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_PCA200E; IFP2IFATM(sc->ifp)->mib.serial = 0; IFP2IFATM(sc->ifp)->mib.hw_version = 0; IFP2IFATM(sc->ifp)->mib.sw_version = 0; IFP2IFATM(sc->ifp)->mib.vpi_bits = 0; IFP2IFATM(sc->ifp)->mib.vci_bits = FORE_VCIBITS; IFP2IFATM(sc->ifp)->mib.max_vpcs = 0; IFP2IFATM(sc->ifp)->mib.max_vccs = FORE_MAX_VCC; IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN; IFP2IFATM(sc->ifp)->phy = &sc->utopia; LIST_INIT(&sc->rbuf_free); LIST_INIT(&sc->rbuf_used); /* * Initialize mutex and condition variables. */ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); cv_init(&sc->cv_stat, "fatm_stat"); cv_init(&sc->cv_regs, "fatm_regs"); sysctl_ctx_init(&sc->sysctl_ctx); callout_init_mtx(&sc->watchdog_timer, &sc->mtx, 0); /* * Make the sysctl tree */ if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "istats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0, fatm_sysctl_istats, "LU", "internal statistics") == NULL) goto fail; if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "stats", CTLTYPE_ULONG | CTLFLAG_RD, sc, 0, fatm_sysctl_stats, "LU", "card statistics") == NULL) goto fail; if (SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "retry_tx", CTLFLAG_RW, &sc->retry_tx, 0, "retry flag") == NULL) goto fail; #ifdef FATM_DEBUG if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug flags") == NULL) goto fail; sc->debug = FATM_DEBUG; #endif /* * Network subsystem stuff */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX; ifp->if_ioctl = fatm_ioctl; ifp->if_start = fatm_start; ifp->if_init = fatm_init; ifp->if_linkmib = &IFP2IFATM(sc->ifp)->mib; ifp->if_linkmiblen = sizeof(IFP2IFATM(sc->ifp)->mib); /* * Enable busmaster */ pci_enable_busmaster(dev); /* * Map memory */ sc->memid = 0x10; sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid, RF_ACTIVE); if (sc->memres == NULL) { if_printf(ifp, "could not map memory\n"); error = ENXIO; goto fail; } sc->memh = rman_get_bushandle(sc->memres); sc->memt = rman_get_bustag(sc->memres); /* * Convert endianess of slave access */ cfg = pci_read_config(dev, FATM_PCIR_MCTL, 1); cfg |= FATM_PCIM_SWAB; pci_write_config(dev, FATM_PCIR_MCTL, cfg, 1); /* * Allocate interrupt (activate at the end) */ sc->irqid = 0; sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_SHAREABLE | RF_ACTIVE); if (sc->irqres == NULL) { if_printf(ifp, "could not allocate irq\n"); error = ENXIO; goto fail; } /* * Allocate the parent DMA tag. This is used simply to hold overall * restrictions for the controller (and PCI bus) and is never used * to do anything. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, MAXDMASEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat)) { if_printf(ifp, "could not allocate parent DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the receive buffer DMA tag. This tag must map a maximum of * a mbuf cluster. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rbuf_tag)) { if_printf(ifp, "could not allocate rbuf DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate the transmission DMA tag. Must add 1, because * rounded up PDU will be 65536 bytes long. */ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FATM_MAXPDU + 1, TPD_EXTENSIONS + TXD_FIXED, MCLBYTES, 0, NULL, NULL, &sc->tx_tag)) { if_printf(ifp, "could not allocate tx DMA tag\n"); error = ENOMEM; goto fail; } /* * Allocate DMAable memory. */ sc->stat_mem.size = sizeof(uint32_t) * (FATM_CMD_QLEN + FATM_TX_QLEN + FATM_RX_QLEN + SMALL_SUPPLY_QLEN + LARGE_SUPPLY_QLEN); sc->stat_mem.align = 4; sc->txq_mem.size = FATM_TX_QLEN * TPD_SIZE; sc->txq_mem.align = 32; sc->rxq_mem.size = FATM_RX_QLEN * RPD_SIZE; sc->rxq_mem.align = 32; sc->s1q_mem.size = SMALL_SUPPLY_QLEN * BSUP_BLK2SIZE(SMALL_SUPPLY_BLKSIZE); sc->s1q_mem.align = 32; sc->l1q_mem.size = LARGE_SUPPLY_QLEN * BSUP_BLK2SIZE(LARGE_SUPPLY_BLKSIZE); sc->l1q_mem.align = 32; #ifdef TEST_DMA_SYNC if ((error = alloc_dma_memoryX(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memoryX(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memoryX(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memoryX(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #else if ((error = alloc_dma_memory(sc, "STATUS", &sc->stat_mem)) != 0 || (error = alloc_dma_memory(sc, "TXQ", &sc->txq_mem)) != 0 || (error = alloc_dma_memory(sc, "RXQ", &sc->rxq_mem)) != 0 || (error = alloc_dma_memory(sc, "S1Q", &sc->s1q_mem)) != 0 || (error = alloc_dma_memory(sc, "L1Q", &sc->l1q_mem)) != 0) goto fail; #endif sc->prom_mem.size = sizeof(struct prom); sc->prom_mem.align = 32; if ((error = alloc_dma_memory(sc, "PROM", &sc->prom_mem)) != 0) goto fail; sc->sadi_mem.size = sizeof(struct fatm_stats); sc->sadi_mem.align = 32; if ((error = alloc_dma_memory(sc, "STATISTICS", &sc->sadi_mem)) != 0) goto fail; sc->reg_mem.size = sizeof(uint32_t) * FATM_NREGS; sc->reg_mem.align = 32; if ((error = alloc_dma_memory(sc, "REGISTERS", &sc->reg_mem)) != 0) goto fail; /* * Allocate queues */ sc->cmdqueue.chunk = malloc(FATM_CMD_QLEN * sizeof(struct cmdqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->txqueue.chunk = malloc(FATM_TX_QLEN * sizeof(struct txqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->rxqueue.chunk = malloc(FATM_RX_QLEN * sizeof(struct rxqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->s1queue.chunk = malloc(SMALL_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->l1queue.chunk = malloc(LARGE_SUPPLY_QLEN * sizeof(struct supqueue), M_DEVBUF, M_ZERO | M_WAITOK); sc->vccs = malloc((FORE_MAX_VCC + 1) * sizeof(sc->vccs[0]), M_DEVBUF, M_ZERO | M_WAITOK); sc->vcc_zone = uma_zcreate("FATM vccs", sizeof(struct card_vcc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (sc->vcc_zone == NULL) { error = ENOMEM; goto fail; } /* * Allocate memory for the receive buffer headers. The total number * of headers should probably also include the maximum number of * buffers on the receive queue. */ sc->rbuf_total = SMALL_POOL_SIZE + LARGE_POOL_SIZE; sc->rbufs = malloc(sc->rbuf_total * sizeof(struct rbuf), M_DEVBUF, M_ZERO | M_WAITOK); /* * Put all rbuf headers on the free list and create DMA maps. */ for (rb = sc->rbufs, i = 0; i < sc->rbuf_total; i++, rb++) { if ((error = bus_dmamap_create(sc->rbuf_tag, 0, &rb->map))) { if_printf(sc->ifp, "creating rx map: %d\n", error); goto fail; } LIST_INSERT_HEAD(&sc->rbuf_free, rb, link); } /* * Create dma maps for transmission. In case of an error, free the * allocated DMA maps, because on some architectures maps are NULL * and we cannot distinguish between a failure and a NULL map in * the detach routine. */ for (i = 0; i < FATM_TX_QLEN; i++) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i); if ((error = bus_dmamap_create(sc->tx_tag, 0, &tx->map))) { if_printf(sc->ifp, "creating tx map: %d\n", error); while (i > 0) { tx = GET_QUEUE(sc->txqueue, struct txqueue, i - 1); bus_dmamap_destroy(sc->tx_tag, tx->map); i--; } goto fail; } } utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx, &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), &fatm_utopia_methods); sc->utopia.flags |= UTP_FL_NORESET | UTP_FL_POLL_CARRIER; /* * Attach the interface */ atm_ifattach(ifp); ifp->if_snd.ifq_maxlen = 512; #ifdef ENABLE_BPF bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); #endif error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET | INTR_MPSAFE, NULL, fatm_intr, sc, &sc->ih); if (error) { if_printf(ifp, "couldn't setup irq\n"); goto fail; } fail: if (error) fatm_detach(dev); return (error); } #if defined(FATM_DEBUG) && 0 static void dump_s1_queue(struct fatm_softc *sc) { int i; struct supqueue *q; for(i = 0; i < SMALL_SUPPLY_QLEN; i++) { q = GET_QUEUE(sc->s1queue, struct supqueue, i); printf("%2d: card=%x(%x,%x) stat=%x\n", i, q->q.card, READ4(sc, q->q.card), READ4(sc, q->q.card + 4), *q->q.statp); } } #endif /* * Driver infrastructure. */ static device_method_t fatm_methods[] = { DEVMETHOD(device_probe, fatm_probe), DEVMETHOD(device_attach, fatm_attach), DEVMETHOD(device_detach, fatm_detach), { 0, 0 } }; static driver_t fatm_driver = { "fatm", fatm_methods, sizeof(struct fatm_softc), }; DRIVER_MODULE(fatm, pci, fatm_driver, fatm_devclass, 0, 0); diff --git a/sys/dev/fb/vga.c b/sys/dev/fb/vga.c index 365fca4491d9..d0239b3a18b8 100644 --- a/sys/dev/fb/vga.c +++ b/sys/dev/fb/vga.c @@ -1,3072 +1,3072 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * Copyright (c) 1992-1998 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_vga.h" #include "opt_fb.h" #ifndef FB_DEBUG #define FB_DEBUG 0 #endif #include "opt_syscons.h" /* should be removed in the future, XXX */ #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #ifndef VGA_DEBUG #define VGA_DEBUG 0 #endif /* XXX machine/pc/bios.h has got too much i386-specific stuff in it */ #ifndef BIOS_PADDRTOVADDR #define BIOS_PADDRTOVADDR(x) (x) #endif int vga_probe_unit(int unit, video_adapter_t *buf, int flags) { video_adapter_t *adp; video_switch_t *sw; int error; sw = vid_get_switch(VGA_DRIVER_NAME); if (sw == NULL) return 0; error = (*sw->probe)(unit, &adp, NULL, flags); if (error) return error; bcopy(adp, buf, sizeof(*buf)); return 0; } int vga_attach_unit(int unit, vga_softc_t *sc, int flags) { video_switch_t *sw; int error; sw = vid_get_switch(VGA_DRIVER_NAME); if (sw == NULL) return ENXIO; error = (*sw->probe)(unit, &sc->adp, NULL, flags); if (error) return error; return (*sw->init)(unit, sc->adp, flags); } /* cdev driver functions */ #ifdef FB_INSTALL_CDEV int vga_open(struct cdev *dev, vga_softc_t *sc, int flag, int mode, struct thread *td) { if (sc == NULL) return ENXIO; if (mode & (O_CREAT | O_APPEND | O_TRUNC)) return ENODEV; return genfbopen(&sc->gensc, sc->adp, flag, mode, td); } int vga_close(struct cdev *dev, vga_softc_t *sc, int flag, int mode, struct thread *td) { return genfbclose(&sc->gensc, sc->adp, flag, mode, td); } int vga_read(struct cdev *dev, vga_softc_t *sc, struct uio *uio, int flag) { return genfbread(&sc->gensc, sc->adp, uio, flag); } int vga_write(struct cdev *dev, vga_softc_t *sc, struct uio *uio, int flag) { return genfbread(&sc->gensc, sc->adp, uio, flag); } int vga_ioctl(struct cdev *dev, vga_softc_t *sc, u_long cmd, caddr_t arg, int flag, struct thread *td) { return genfbioctl(&sc->gensc, sc->adp, cmd, arg, flag, td); } int vga_mmap(struct cdev *dev, vga_softc_t *sc, vm_ooffset_t offset, vm_offset_t *paddr, int prot, vm_memattr_t *memattr) { return genfbmmap(&sc->gensc, sc->adp, offset, paddr, prot, memattr); } #endif /* FB_INSTALL_CDEV */ /* LOW-LEVEL */ #include #ifdef __i386__ #include #endif #define probe_done(adp) ((adp)->va_flags & V_ADP_PROBED) #define init_done(adp) ((adp)->va_flags & V_ADP_INITIALIZED) #define config_done(adp) ((adp)->va_flags & V_ADP_REGISTERED) /* for compatibility with old kernel options */ #ifdef SC_ALT_SEQACCESS #undef SC_ALT_SEQACCESS #undef VGA_ALT_SEQACCESS #define VGA_ALT_SEQACCESS 1 #endif #ifdef SLOW_VGA #undef SLOW_VGA #undef VGA_SLOW_IOACCESS #define VGA_SLOW_IOACCESS #endif /* architecture dependent option */ #if !defined(__i386__) && !defined(__amd64__) #define VGA_NO_BIOS 1 #endif /* this should really be in `rtc.h' */ #define RTC_EQUIPMENT 0x14 /* various sizes */ #define V_MODE_MAP_SIZE (M_VGA_CG320 + 1) #define V_MODE_PARAM_SIZE 64 /* video adapter state buffer */ struct adp_state { int sig; #define V_STATE_SIG 0x736f6962 u_char regs[V_MODE_PARAM_SIZE]; }; typedef struct adp_state adp_state_t; /* video adapter information */ #define DCC_MONO 0 #define DCC_CGA40 1 #define DCC_CGA80 2 #define DCC_EGAMONO 3 #define DCC_EGA40 4 #define DCC_EGA80 5 /* * NOTE: `va_window' should have a virtual address, but is initialized * with a physical address in the following table, as verify_adapter() * will perform address conversion at run-time. */ static video_adapter_t adapter_init_value[] = { /* DCC_MONO */ { 0, KD_MONO, "mda", 0, 0, 0, IO_MDA, IO_MDASIZE, MONO_CRTC, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, 0, 0, 7, 0, }, /* DCC_CGA40 */ { 0, KD_CGA, "cga", 0, 0, V_ADP_COLOR, IO_CGA, IO_CGASIZE, COLOR_CRTC, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, 0, 0, 3, 0, }, /* DCC_CGA80 */ { 0, KD_CGA, "cga", 0, 0, V_ADP_COLOR, IO_CGA, IO_CGASIZE, COLOR_CRTC, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, 0, 0, 3, 0, }, /* DCC_EGAMONO */ { 0, KD_EGA, "ega", 0, 0, 0, IO_MDA, 48, MONO_CRTC, EGA_BUF_BASE, EGA_BUF_SIZE, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, 0, 0, 7, 0, }, /* DCC_EGA40 */ { 0, KD_EGA, "ega", 0, 0, V_ADP_COLOR, IO_MDA, 48, COLOR_CRTC, EGA_BUF_BASE, EGA_BUF_SIZE, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, 0, 0, 3, 0, }, /* DCC_EGA80 */ { 0, KD_EGA, "ega", 0, 0, V_ADP_COLOR, IO_MDA, 48, COLOR_CRTC, EGA_BUF_BASE, EGA_BUF_SIZE, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, 0, 0, 3, 0, }, }; static video_adapter_t biosadapter[2]; static int biosadapters = 0; /* video driver declarations */ static int vga_configure(int flags); int (*vga_sub_configure)(int flags); #if 0 static int vga_nop(void); #endif static int vga_error(void); static vi_probe_t vga_probe; static vi_init_t vga_init; static vi_get_info_t vga_get_info; static vi_query_mode_t vga_query_mode; static vi_set_mode_t vga_set_mode; static vi_save_font_t vga_save_font; static vi_load_font_t vga_load_font; static vi_show_font_t vga_show_font; static vi_save_palette_t vga_save_palette; static vi_load_palette_t vga_load_palette; static vi_set_border_t vga_set_border; static vi_save_state_t vga_save_state; static vi_load_state_t vga_load_state; static vi_set_win_org_t vga_set_origin; static vi_read_hw_cursor_t vga_read_hw_cursor; static vi_set_hw_cursor_t vga_set_hw_cursor; static vi_set_hw_cursor_shape_t vga_set_hw_cursor_shape; static vi_blank_display_t vga_blank_display; static vi_mmap_t vga_mmap_buf; static vi_ioctl_t vga_dev_ioctl; #ifndef VGA_NO_MODE_CHANGE static vi_clear_t vga_clear; static vi_fill_rect_t vga_fill_rect; static vi_bitblt_t vga_bitblt; #else /* VGA_NO_MODE_CHANGE */ #define vga_clear (vi_clear_t *)vga_error #define vga_fill_rect (vi_fill_rect_t *)vga_error #define vga_bitblt (vi_bitblt_t *)vga_error #endif static vi_diag_t vga_diag; static video_switch_t vgavidsw = { vga_probe, vga_init, vga_get_info, vga_query_mode, vga_set_mode, vga_save_font, vga_load_font, vga_show_font, vga_save_palette, vga_load_palette, vga_set_border, vga_save_state, vga_load_state, vga_set_origin, vga_read_hw_cursor, vga_set_hw_cursor, vga_set_hw_cursor_shape, vga_blank_display, vga_mmap_buf, vga_dev_ioctl, vga_clear, vga_fill_rect, vga_bitblt, vga_error, vga_error, vga_diag, }; VIDEO_DRIVER(mda, vgavidsw, NULL); VIDEO_DRIVER(cga, vgavidsw, NULL); VIDEO_DRIVER(ega, vgavidsw, NULL); VIDEO_DRIVER(vga, vgavidsw, vga_configure); /* VGA BIOS standard video modes */ #define EOT (-1) #define NA (-2) static video_info_t bios_vmode[] = { /* CGA */ { M_B40x25, V_INFO_COLOR, 40, 25, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_C40x25, V_INFO_COLOR, 40, 25, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_B80x25, V_INFO_COLOR, 80, 25, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_C80x25, V_INFO_COLOR, 80, 25, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, /* EGA */ { M_ENH_B40x25, V_INFO_COLOR, 40, 25, 8, 14, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_ENH_C40x25, V_INFO_COLOR, 40, 25, 8, 14, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_ENH_B80x25, V_INFO_COLOR, 80, 25, 8, 14, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_ENH_C80x25, V_INFO_COLOR, 80, 25, 8, 14, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, /* VGA */ { M_VGA_C40x25, V_INFO_COLOR, 40, 25, 8, 16, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M80x25, 0, 80, 25, 8, 16, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C80x25, V_INFO_COLOR, 80, 25, 8, 16, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, /* MDA */ { M_EGAMONO80x25, 0, 80, 25, 8, 14, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, /* EGA */ { M_ENH_B80x43, 0, 80, 43, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_ENH_C80x43, V_INFO_COLOR, 80, 43, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, /* VGA */ { M_VGA_M80x30, 0, 80, 30, 8, 16, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C80x30, V_INFO_COLOR, 80, 30, 8, 16, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M80x50, 0, 80, 50, 8, 8, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C80x50, V_INFO_COLOR, 80, 50, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M80x60, 0, 80, 60, 8, 8, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C80x60, V_INFO_COLOR, 80, 60, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, #ifndef VGA_NO_MODE_CHANGE #ifdef VGA_WIDTH90 { M_VGA_M90x25, 0, 90, 25, 8, 16, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C90x25, V_INFO_COLOR, 90, 25, 8, 16, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M90x30, 0, 90, 30, 8, 16, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C90x30, V_INFO_COLOR, 90, 30, 8, 16, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M90x43, 0, 90, 43, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C90x43, V_INFO_COLOR, 90, 43, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M90x50, 0, 90, 50, 8, 8, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C90x50, V_INFO_COLOR, 90, 50, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_M90x60, 0, 90, 60, 8, 8, 2, 1, MDA_BUF_BASE, MDA_BUF_SIZE, MDA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, { M_VGA_C90x60, V_INFO_COLOR, 90, 60, 8, 8, 4, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_TEXT }, #endif /* VGA_WIDTH90 */ /* CGA */ { M_BG320, V_INFO_COLOR | V_INFO_GRAPHICS, 320, 200, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_CGA }, { M_CG320, V_INFO_COLOR | V_INFO_GRAPHICS, 320, 200, 8, 8, 2, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_CGA }, { M_BG640, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 200, 8, 8, 1, 1, CGA_BUF_BASE, CGA_BUF_SIZE, CGA_BUF_SIZE, 0, 0, V_INFO_MM_CGA }, /* EGA */ { M_CG320_D, V_INFO_COLOR | V_INFO_GRAPHICS, 320, 200, 8, 8, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0, V_INFO_MM_PLANAR }, { M_CG640_E, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 200, 8, 8, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, { M_EGAMONOAPA, V_INFO_GRAPHICS, 640, 350, 8, 14, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, 64*1024, 0, 0 , V_INFO_MM_PLANAR }, { M_ENHMONOAPA2,V_INFO_GRAPHICS, 640, 350, 8, 14, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, { M_CG640x350, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 350, 8, 14, 2, 2, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, { M_ENH_CG640, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 350, 8, 14, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, /* VGA */ { M_BG640x480, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 480, 8, 16, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, { M_CG640x480, V_INFO_COLOR | V_INFO_GRAPHICS, 640, 480, 8, 16, 4, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0 , V_INFO_MM_PLANAR }, { M_VGA_CG320, V_INFO_COLOR | V_INFO_GRAPHICS, 320, 200, 8, 8, 8, 1, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0, V_INFO_MM_PACKED, 1 }, { M_VGA_MODEX, V_INFO_COLOR | V_INFO_GRAPHICS, 320, 240, 8, 8, 8, 4, GRAPHICS_BUF_BASE, GRAPHICS_BUF_SIZE, GRAPHICS_BUF_SIZE, 0, 0, V_INFO_MM_VGAX, 1 }, #endif /* VGA_NO_MODE_CHANGE */ { EOT }, }; static int vga_init_done = FALSE; #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) static u_char *video_mode_ptr = NULL; /* EGA/VGA */ static u_char *video_mode_ptr2 = NULL; /* CGA/MDA */ #endif static u_char *mode_map[V_MODE_MAP_SIZE]; static adp_state_t adpstate; static adp_state_t adpstate2; static int rows_offset = 1; /* local macros and functions */ #define BIOS_SADDRTOLADDR(p) ((((p) & 0xffff0000) >> 12) + ((p) & 0x0000ffff)) #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) static void map_mode_table(u_char *map[], u_char *table, int max); #endif static void clear_mode_map(video_adapter_t *adp, u_char *map[], int max, int color); #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) static int map_mode_num(int mode); #endif static int map_gen_mode_num(int type, int color, int mode); static int map_bios_mode_num(int type, int color, int bios_mode); static u_char *get_mode_param(int mode); #ifndef VGA_NO_BIOS static void fill_adapter_param(int code, video_adapter_t *adp); #endif static int verify_adapter(video_adapter_t *adp); static void update_adapter_info(video_adapter_t *adp, video_info_t *info); #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) #define COMP_IDENTICAL 0 #define COMP_SIMILAR 1 #define COMP_DIFFERENT 2 static int comp_adpregs(u_char *buf1, u_char *buf2); #endif static int probe_adapters(void); static int set_line_length(video_adapter_t *adp, int pixel); static int set_display_start(video_adapter_t *adp, int x, int y); #ifndef VGA_NO_MODE_CHANGE #ifdef VGA_WIDTH90 static void set_width90(adp_state_t *params); #endif #endif /* !VGA_NO_MODE_CHANGE */ #ifndef VGA_NO_FONT_LOADING #define PARAM_BUFSIZE 6 static void set_font_mode(video_adapter_t *adp, u_char *buf); static void set_normal_mode(video_adapter_t *adp, u_char *buf); #endif #ifndef VGA_NO_MODE_CHANGE static void filll_io(int val, vm_offset_t d, size_t size); static void planar_fill(video_adapter_t *adp, int val); static void packed_fill(video_adapter_t *adp, int val); static void direct_fill(video_adapter_t *adp, int val); #ifdef notyet static void planar_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy); static void packed_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy); static void direct_fill_rect16(video_adapter_t *adp, int val, int x, int y, int cx, int cy); static void direct_fill_rect24(video_adapter_t *adp, int val, int x, int y, int cx, int cy); static void direct_fill_rect32(video_adapter_t *adp, int val, int x, int y, int cx, int cy); #endif /* notyet */ #endif /* !VGA_NO_MODE_CHANGE */ static void dump_buffer(u_char *buf, size_t len); #define ISMAPPED(pa, width) \ (((pa) <= (u_long)0x1000 - (width)) \ || ((pa) >= ISA_HOLE_START && (pa) <= 0x100000 - (width))) #define prologue(adp, flag, err) \ if (!vga_init_done || !((adp)->va_flags & (flag))) \ return (err) /* a backdoor for the console driver */ static int vga_configure(int flags) { int i; probe_adapters(); for (i = 0; i < biosadapters; ++i) { if (!probe_done(&biosadapter[i])) continue; biosadapter[i].va_flags |= V_ADP_INITIALIZED; if (!config_done(&biosadapter[i])) { if (vid_register(&biosadapter[i]) < 0) continue; biosadapter[i].va_flags |= V_ADP_REGISTERED; } } if (vga_sub_configure != NULL) (*vga_sub_configure)(flags); return biosadapters; } /* local subroutines */ #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* construct the mode parameter map */ static void map_mode_table(u_char *map[], u_char *table, int max) { int i; for(i = 0; i < max; ++i) map[i] = table + i*V_MODE_PARAM_SIZE; for(; i < V_MODE_MAP_SIZE; ++i) map[i] = NULL; } #endif /* !VGA_NO_BIOS && !VGA_NO_MODE_CHANGE */ static void clear_mode_map(video_adapter_t *adp, u_char *map[], int max, int color) { video_info_t info; int i; /* * NOTE: we don't touch `bios_vmode[]' because it is shared * by all adapters. */ for(i = 0; i < max; ++i) { if (vga_get_info(adp, i, &info)) continue; if ((info.vi_flags & V_INFO_COLOR) != color) map[i] = NULL; } } #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* map the non-standard video mode to a known mode number */ static int map_mode_num(int mode) { static struct { int from; int to; } mode_map[] = { { M_ENH_B80x43, M_ENH_B80x25 }, { M_ENH_C80x43, M_ENH_C80x25 }, { M_VGA_M80x30, M_VGA_M80x25 }, { M_VGA_C80x30, M_VGA_C80x25 }, { M_VGA_M80x50, M_VGA_M80x25 }, { M_VGA_C80x50, M_VGA_C80x25 }, { M_VGA_M80x60, M_VGA_M80x25 }, { M_VGA_C80x60, M_VGA_C80x25 }, #ifdef VGA_WIDTH90 { M_VGA_M90x25, M_VGA_M80x25 }, { M_VGA_C90x25, M_VGA_C80x25 }, { M_VGA_M90x30, M_VGA_M80x25 }, { M_VGA_C90x30, M_VGA_C80x25 }, { M_VGA_M90x43, M_ENH_B80x25 }, { M_VGA_C90x43, M_ENH_C80x25 }, { M_VGA_M90x50, M_VGA_M80x25 }, { M_VGA_C90x50, M_VGA_C80x25 }, { M_VGA_M90x60, M_VGA_M80x25 }, { M_VGA_C90x60, M_VGA_C80x25 }, #endif { M_VGA_MODEX, M_VGA_CG320 }, }; int i; for (i = 0; i < nitems(mode_map); ++i) { if (mode_map[i].from == mode) return mode_map[i].to; } return mode; } #endif /* !VGA_NO_BIOS && !VGA_NO_MODE_CHANGE */ /* map a generic video mode to a known mode number */ static int map_gen_mode_num(int type, int color, int mode) { static struct { int from; int to_color; int to_mono; } mode_map[] = { { M_TEXT_80x30, M_VGA_C80x30, M_VGA_M80x30, }, { M_TEXT_80x43, M_ENH_C80x43, M_ENH_B80x43, }, { M_TEXT_80x50, M_VGA_C80x50, M_VGA_M80x50, }, { M_TEXT_80x60, M_VGA_C80x60, M_VGA_M80x60, }, }; int i; if (mode == M_TEXT_80x25) { switch (type) { case KD_VGA: if (color) return M_VGA_C80x25; else return M_VGA_M80x25; break; case KD_EGA: if (color) return M_ENH_C80x25; else return M_EGAMONO80x25; break; case KD_CGA: return M_C80x25; case KD_MONO: case KD_HERCULES: return M_EGAMONO80x25; /* XXX: this name is confusing */ default: return -1; } } for (i = 0; i < nitems(mode_map); ++i) { if (mode_map[i].from == mode) return ((color) ? mode_map[i].to_color : mode_map[i].to_mono); } return mode; } /* turn the BIOS video number into our video mode number */ static int map_bios_mode_num(int type, int color, int bios_mode) { static int cga_modes[7] = { M_B40x25, M_C40x25, /* 0, 1 */ M_B80x25, M_C80x25, /* 2, 3 */ M_BG320, M_CG320, M_BG640, }; static int ega_modes[17] = { M_ENH_B40x25, M_ENH_C40x25, /* 0, 1 */ M_ENH_B80x25, M_ENH_C80x25, /* 2, 3 */ M_BG320, M_CG320, M_BG640, M_EGAMONO80x25, /* 7 */ 8, 9, 10, 11, 12, M_CG320_D, M_CG640_E, M_ENHMONOAPA2, /* XXX: video momery > 64K */ M_ENH_CG640, /* XXX: video momery > 64K */ }; static int vga_modes[20] = { M_VGA_C40x25, M_VGA_C40x25, /* 0, 1 */ M_VGA_C80x25, M_VGA_C80x25, /* 2, 3 */ M_BG320, M_CG320, M_BG640, M_VGA_M80x25, /* 7 */ 8, 9, 10, 11, 12, M_CG320_D, M_CG640_E, M_ENHMONOAPA2, M_ENH_CG640, M_BG640x480, M_CG640x480, M_VGA_CG320, }; switch (type) { case KD_VGA: if (bios_mode < nitems(vga_modes)) return vga_modes[bios_mode]; else if (color) return M_VGA_C80x25; else return M_VGA_M80x25; break; case KD_EGA: if (bios_mode < nitems(ega_modes)) return ega_modes[bios_mode]; else if (color) return M_ENH_C80x25; else return M_EGAMONO80x25; break; case KD_CGA: if (bios_mode < nitems(cga_modes)) return cga_modes[bios_mode]; else return M_C80x25; break; case KD_MONO: case KD_HERCULES: return M_EGAMONO80x25; /* XXX: this name is confusing */ default: break; } return -1; } /* look up a parameter table entry */ static u_char *get_mode_param(int mode) { #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) if (mode >= V_MODE_MAP_SIZE) mode = map_mode_num(mode); #endif if ((mode >= 0) && (mode < V_MODE_MAP_SIZE)) return mode_map[mode]; else return NULL; } #ifndef VGA_NO_BIOS static void fill_adapter_param(int code, video_adapter_t *adp) { static struct { int primary; int secondary; } dcc[] = { { DCC_MONO, DCC_EGA40 /* CGA monitor */ }, { DCC_MONO, DCC_EGA80 /* CGA monitor */ }, { DCC_MONO, DCC_EGA80 }, { DCC_MONO, DCC_EGA80 }, { DCC_CGA40, DCC_EGAMONO }, { DCC_CGA80, DCC_EGAMONO }, { DCC_EGA40 /* CGA monitor */, DCC_MONO}, { DCC_EGA80 /* CGA monitor */, DCC_MONO}, { DCC_EGA80, DCC_MONO }, { DCC_EGA80, DCC_MONO }, { DCC_EGAMONO, DCC_CGA40 }, { DCC_EGAMONO, DCC_CGA80 }, }; if ((code < 0) || (code >= nitems(dcc))) { adp[V_ADP_PRIMARY] = adapter_init_value[DCC_MONO]; adp[V_ADP_SECONDARY] = adapter_init_value[DCC_CGA80]; } else { adp[V_ADP_PRIMARY] = adapter_init_value[dcc[code].primary]; adp[V_ADP_SECONDARY] = adapter_init_value[dcc[code].secondary]; } } #endif /* VGA_NO_BIOS */ static int verify_adapter(video_adapter_t *adp) { vm_offset_t buf; u_int16_t v; #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) u_int32_t p; #endif buf = BIOS_PADDRTOVADDR(adp->va_window); v = readw(buf); writew(buf, 0xA55A); if (readw(buf) != 0xA55A) return ENXIO; writew(buf, v); switch (adp->va_type) { case KD_EGA: outb(adp->va_crtc_addr, 7); if (inb(adp->va_crtc_addr) == 7) { adp->va_type = KD_VGA; adp->va_name = "vga"; adp->va_flags |= V_ADP_STATESAVE | V_ADP_PALETTE; } adp->va_flags |= V_ADP_STATELOAD | V_ADP_BORDER; /* the color adapter may be in the 40x25 mode... XXX */ #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* get the BIOS video mode pointer */ p = *(u_int32_t *)BIOS_PADDRTOVADDR(0x4a8); p = BIOS_SADDRTOLADDR(p); if (ISMAPPED(p, sizeof(u_int32_t))) { p = *(u_int32_t *)BIOS_PADDRTOVADDR(p); p = BIOS_SADDRTOLADDR(p); if (ISMAPPED(p, V_MODE_PARAM_SIZE)) video_mode_ptr = (u_char *)BIOS_PADDRTOVADDR(p); } #endif break; case KD_CGA: adp->va_flags |= V_ADP_COLOR | V_ADP_BORDER; /* may be in the 40x25 mode... XXX */ #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* get the BIOS video mode pointer */ p = *(u_int32_t *)BIOS_PADDRTOVADDR(0x1d*4); p = BIOS_SADDRTOLADDR(p); video_mode_ptr2 = (u_char *)BIOS_PADDRTOVADDR(p); #endif break; case KD_MONO: #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* get the BIOS video mode pointer */ p = *(u_int32_t *)BIOS_PADDRTOVADDR(0x1d*4); p = BIOS_SADDRTOLADDR(p); video_mode_ptr2 = (u_char *)BIOS_PADDRTOVADDR(p); #endif break; } return 0; } static void update_adapter_info(video_adapter_t *adp, video_info_t *info) { adp->va_flags &= ~V_ADP_COLOR; adp->va_flags |= (info->vi_flags & V_INFO_COLOR) ? V_ADP_COLOR : 0; adp->va_crtc_addr = (adp->va_flags & V_ADP_COLOR) ? COLOR_CRTC : MONO_CRTC; adp->va_window = BIOS_PADDRTOVADDR(info->vi_window); adp->va_window_size = info->vi_window_size; adp->va_window_gran = info->vi_window_gran; adp->va_window_orig = 0; /* XXX */ adp->va_buffer = info->vi_buffer; adp->va_buffer_size = info->vi_buffer_size; if (info->vi_mem_model == V_INFO_MM_VGAX) { adp->va_line_width = info->vi_width/2; } else if (info->vi_flags & V_INFO_GRAPHICS) { switch (info->vi_depth/info->vi_planes) { case 1: adp->va_line_width = info->vi_width/8; break; case 2: adp->va_line_width = info->vi_width/4; break; case 4: adp->va_line_width = info->vi_width/2; break; case 8: default: /* shouldn't happen */ adp->va_line_width = info->vi_width; break; } } else { adp->va_line_width = info->vi_width; } adp->va_disp_start.x = 0; adp->va_disp_start.y = 0; bcopy(info, &adp->va_info, sizeof(adp->va_info)); } #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) /* compare two parameter table entries */ static int comp_adpregs(u_char *buf1, u_char *buf2) { static struct { u_char mask; } params[V_MODE_PARAM_SIZE] = { {0xff}, {0x00}, {0xff}, /* COLS}, ROWS}, POINTS */ {0x00}, {0x00}, /* page length */ {0xfe}, {0xff}, {0xff}, {0xff}, /* sequencer registers */ {0xf3}, /* misc register */ {0xff}, {0xff}, {0xff}, {0x7f}, {0xff}, /* CRTC */ {0xff}, {0xff}, {0xff}, {0x7f}, {0xff}, {0x00}, {0x00}, {0x00}, {0x00}, {0x00}, {0x00}, {0xff}, {0x7f}, {0xff}, {0xff}, {0x7f}, {0xff}, {0xff}, {0xef}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, /* attribute controller regs */ {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, {0xf0}, {0xff}, {0xff}, {0xff}, {0xff}, {0xff}, /* GDC register */ {0xff}, {0xff}, {0xff}, {0xff}, }; int identical = TRUE; int i; if ((buf1 == NULL) || (buf2 == NULL)) return COMP_DIFFERENT; for (i = 0; i < nitems(params); ++i) { if (params[i].mask == 0) /* don't care */ continue; if ((buf1[i] & params[i].mask) != (buf2[i] & params[i].mask)) return COMP_DIFFERENT; if (buf1[i] != buf2[i]) identical = FALSE; } return (identical) ? COMP_IDENTICAL : COMP_SIMILAR; } #endif /* !VGA_NO_BIOS && !VGA_NO_MODE_CHANGE */ /* probe video adapters and return the number of detected adapters */ static int probe_adapters(void) { video_adapter_t *adp; video_info_t info; #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) u_char *mp; #endif int i; /* do this test only once */ if (vga_init_done) return biosadapters; vga_init_done = TRUE; /* * Locate display adapters. * The AT architecture supports upto two adapters. `syscons' allows * the following combinations of adapters: * 1) MDA + CGA * 2) MDA + EGA/VGA color * 3) CGA + EGA/VGA mono * Note that `syscons' doesn't bother with MCGA as it is only * avaiable for low end PS/2 models which has 80286 or earlier CPUs, * thus, they are not running FreeBSD! * When there are two adapaters in the system, one becomes `primary' * and the other `secondary'. The EGA adapter has a set of DIP * switches on board for this information and the EGA BIOS copies * it in the BIOS data area BIOSDATA_VIDEOSWITCH (40:88). * The VGA BIOS has more sophisticated mechanism and has this * information in BIOSDATA_DCCINDEX (40:8a), but it also maintains * compatibility with the EGA BIOS by updating BIOSDATA_VIDEOSWITCH. */ /* * Check rtc and BIOS data area. * XXX: we don't use BIOSDATA_EQUIPMENT, since it is not a dead * copy of RTC_EQUIPMENT. Bits 4 and 5 of ETC_EQUIPMENT are * zeros for EGA and VGA. However, the EGA/VGA BIOS sets * these bits in BIOSDATA_EQUIPMENT according to the monitor * type detected. */ #ifndef VGA_NO_BIOS if (*(u_int32_t *)BIOS_PADDRTOVADDR(0x4a8)) { /* EGA/VGA BIOS is present */ fill_adapter_param(readb(BIOS_PADDRTOVADDR(0x488)) & 0x0f, biosadapter); } else { switch ((rtcin(RTC_EQUIPMENT) >> 4) & 3) { /* bit 4 and 5 */ case 0: /* EGA/VGA: shouldn't be happening */ fill_adapter_param(readb(BIOS_PADDRTOVADDR(0x488)) & 0x0f, biosadapter); break; case 1: /* CGA 40x25 */ /* FIXME: switch to the 80x25 mode? XXX */ biosadapter[V_ADP_PRIMARY] = adapter_init_value[DCC_CGA40]; biosadapter[V_ADP_SECONDARY] = adapter_init_value[DCC_MONO]; break; case 2: /* CGA 80x25 */ biosadapter[V_ADP_PRIMARY] = adapter_init_value[DCC_CGA80]; biosadapter[V_ADP_SECONDARY] = adapter_init_value[DCC_MONO]; break; case 3: /* MDA */ biosadapter[V_ADP_PRIMARY] = adapter_init_value[DCC_MONO]; biosadapter[V_ADP_SECONDARY] = adapter_init_value[DCC_CGA80]; break; } } #else /* assume EGA/VGA? XXX */ biosadapter[V_ADP_PRIMARY] = adapter_init_value[DCC_EGA80]; biosadapter[V_ADP_SECONDARY] = adapter_init_value[DCC_MONO]; #endif /* VGA_NO_BIOS */ biosadapters = 0; if (verify_adapter(&biosadapter[V_ADP_SECONDARY]) == 0) { ++biosadapters; biosadapter[V_ADP_SECONDARY].va_flags |= V_ADP_PROBED; biosadapter[V_ADP_SECONDARY].va_mode = biosadapter[V_ADP_SECONDARY].va_initial_mode = map_bios_mode_num(biosadapter[V_ADP_SECONDARY].va_type, biosadapter[V_ADP_SECONDARY].va_flags & V_ADP_COLOR, biosadapter[V_ADP_SECONDARY].va_initial_bios_mode); } else { biosadapter[V_ADP_SECONDARY].va_type = -1; } if (verify_adapter(&biosadapter[V_ADP_PRIMARY]) == 0) { ++biosadapters; biosadapter[V_ADP_PRIMARY].va_flags |= V_ADP_PROBED; #ifndef VGA_NO_BIOS biosadapter[V_ADP_PRIMARY].va_initial_bios_mode = readb(BIOS_PADDRTOVADDR(0x449)); #else biosadapter[V_ADP_PRIMARY].va_initial_bios_mode = 3; /* XXX */ #endif biosadapter[V_ADP_PRIMARY].va_mode = biosadapter[V_ADP_PRIMARY].va_initial_mode = map_bios_mode_num(biosadapter[V_ADP_PRIMARY].va_type, biosadapter[V_ADP_PRIMARY].va_flags & V_ADP_COLOR, biosadapter[V_ADP_PRIMARY].va_initial_bios_mode); } else { biosadapter[V_ADP_PRIMARY] = biosadapter[V_ADP_SECONDARY]; biosadapter[V_ADP_SECONDARY].va_type = -1; } if (biosadapters == 0) return biosadapters; biosadapter[V_ADP_PRIMARY].va_unit = V_ADP_PRIMARY; biosadapter[V_ADP_SECONDARY].va_unit = V_ADP_SECONDARY; #if 0 /* we don't need these... */ fb_init_struct(&biosadapter[V_ADP_PRIMARY], ...); fb_init_struct(&biosadapter[V_ADP_SECONDARY], ...); #endif #ifdef notyet /* * We cannot have two video adapter of the same type; there must be * only one of color or mono adapter, or one each of them. */ if (biosadapters > 1) { if (!((biosadapter[0].va_flags ^ biosadapter[1].va_flags) & V_ADP_COLOR)) /* we have two mono or color adapters!! */ return (biosadapters = 0); } #endif /* * Ensure a zero start address. The registers are w/o * for old hardware so it's too hard to relocate the active screen * memory. * This must be done before vga_save_state() for VGA. */ outb(biosadapter[V_ADP_PRIMARY].va_crtc_addr, 12); outb(biosadapter[V_ADP_PRIMARY].va_crtc_addr + 1, 0); outb(biosadapter[V_ADP_PRIMARY].va_crtc_addr, 13); outb(biosadapter[V_ADP_PRIMARY].va_crtc_addr + 1, 0); /* the video mode parameter table in EGA/VGA BIOS */ /* NOTE: there can be only one EGA/VGA, wheather color or mono, * recognized by the video BIOS. */ if ((biosadapter[V_ADP_PRIMARY].va_type == KD_EGA) || (biosadapter[V_ADP_PRIMARY].va_type == KD_VGA)) { adp = &biosadapter[V_ADP_PRIMARY]; } else if ((biosadapter[V_ADP_SECONDARY].va_type == KD_EGA) || (biosadapter[V_ADP_SECONDARY].va_type == KD_VGA)) { adp = &biosadapter[V_ADP_SECONDARY]; } else { adp = NULL; } bzero(mode_map, sizeof(mode_map)); if (adp != NULL) { if (adp->va_type == KD_VGA) { vga_save_state(adp, &adpstate, sizeof(adpstate)); #if defined(VGA_NO_BIOS) || defined(VGA_NO_MODE_CHANGE) mode_map[adp->va_initial_mode] = adpstate.regs; rows_offset = 1; #else /* VGA_NO_BIOS || VGA_NO_MODE_CHANGE */ if (video_mode_ptr == NULL) { mode_map[adp->va_initial_mode] = adpstate.regs; rows_offset = 1; } else { /* discard the table if we are not familiar with it... */ map_mode_table(mode_map, video_mode_ptr, M_VGA_CG320 + 1); mp = get_mode_param(adp->va_initial_mode); if (mp != NULL) bcopy(mp, adpstate2.regs, sizeof(adpstate2.regs)); switch (comp_adpregs(adpstate.regs, mp)) { case COMP_IDENTICAL: /* * OK, this parameter table looks reasonably familiar * to us... */ /* * This is a kludge for Toshiba DynaBook SS433 * whose BIOS video mode table entry has the actual # * of rows at the offset 1; BIOSes from other * manufacturers store the # of rows - 1 there. XXX */ rows_offset = adpstate.regs[1] + 1 - mp[1]; break; case COMP_SIMILAR: /* * Not exactly the same, but similar enough to be * trusted. However, use the saved register values * for the initial mode and other modes which are * based on the initial mode. */ mode_map[adp->va_initial_mode] = adpstate.regs; rows_offset = adpstate.regs[1] + 1 - mp[1]; adpstate.regs[1] -= rows_offset - 1; break; case COMP_DIFFERENT: default: /* * Don't use the paramter table in BIOS. It doesn't * look familiar to us. Video mode switching is allowed * only if the new mode is the same as or based on * the initial mode. */ video_mode_ptr = NULL; bzero(mode_map, sizeof(mode_map)); mode_map[adp->va_initial_mode] = adpstate.regs; rows_offset = 1; break; } } #endif /* VGA_NO_BIOS || VGA_NO_MODE_CHANGE */ #ifndef VGA_NO_MODE_CHANGE adp->va_flags |= V_ADP_MODECHANGE; #endif #ifndef VGA_NO_FONT_LOADING adp->va_flags |= V_ADP_FONT; #endif } else if (adp->va_type == KD_EGA) { #if defined(VGA_NO_BIOS) || defined(VGA_NO_MODE_CHANGE) rows_offset = 1; #else /* VGA_NO_BIOS || VGA_NO_MODE_CHANGE */ if (video_mode_ptr == NULL) { rows_offset = 1; } else { map_mode_table(mode_map, video_mode_ptr, M_ENH_C80x25 + 1); /* XXX how can one validate the EGA table... */ mp = get_mode_param(adp->va_initial_mode); if (mp != NULL) { adp->va_flags |= V_ADP_MODECHANGE; #ifndef VGA_NO_FONT_LOADING adp->va_flags |= V_ADP_FONT; #endif rows_offset = 1; } else { /* * This is serious. We will not be able to switch video * modes at all... */ video_mode_ptr = NULL; bzero(mode_map, sizeof(mode_map)); rows_offset = 1; } } #endif /* VGA_NO_BIOS || VGA_NO_MODE_CHANGE */ } } /* remove conflicting modes if we have more than one adapter */ if (biosadapters > 0) { for (i = 0; i < biosadapters; ++i) { if (!(biosadapter[i].va_flags & V_ADP_MODECHANGE)) continue; clear_mode_map(&biosadapter[i], mode_map, M_VGA_CG320 + 1, (biosadapter[i].va_flags & V_ADP_COLOR) ? V_INFO_COLOR : 0); if ((biosadapter[i].va_type == KD_VGA) || (biosadapter[i].va_type == KD_EGA)) { biosadapter[i].va_io_base = (biosadapter[i].va_flags & V_ADP_COLOR) ? IO_VGA : IO_MDA; biosadapter[i].va_io_size = 32; } } } /* buffer address */ vga_get_info(&biosadapter[V_ADP_PRIMARY], biosadapter[V_ADP_PRIMARY].va_initial_mode, &info); info.vi_flags &= ~V_INFO_LINEAR; /* XXX */ update_adapter_info(&biosadapter[V_ADP_PRIMARY], &info); if (biosadapters > 1) { vga_get_info(&biosadapter[V_ADP_SECONDARY], biosadapter[V_ADP_SECONDARY].va_initial_mode, &info); info.vi_flags &= ~V_INFO_LINEAR; /* XXX */ update_adapter_info(&biosadapter[V_ADP_SECONDARY], &info); } /* * XXX: we should verify the following values for the primary adapter... * crtc I/O port address: *(u_int16_t *)BIOS_PADDRTOVADDR(0x463); * color/mono display: (*(u_int8_t *)BIOS_PADDRTOVADDR(0x487) & 0x02) * ? 0 : V_ADP_COLOR; * columns: *(u_int8_t *)BIOS_PADDRTOVADDR(0x44a); * rows: *(u_int8_t *)BIOS_PADDRTOVADDR(0x484); * font size: *(u_int8_t *)BIOS_PADDRTOVADDR(0x485); * buffer size: *(u_int16_t *)BIOS_PADDRTOVADDR(0x44c); */ return biosadapters; } /* set the scan line length in pixel */ static int set_line_length(video_adapter_t *adp, int pixel) { u_char *mp; int ppw; /* pixels per word */ int bpl; /* bytes per line */ int count; if ((adp->va_type != KD_VGA) && (adp->va_type != KD_EGA)) return ENODEV; mp = get_mode_param(adp->va_mode); if (mp == NULL) return EINVAL; switch (adp->va_info.vi_mem_model) { case V_INFO_MM_PLANAR: ppw = 16/(adp->va_info.vi_depth/adp->va_info.vi_planes); - count = (pixel + ppw - 1)/ppw/2; - bpl = ((pixel + ppw - 1)/ppw/2)*4; + count = howmany(pixel, ppw)/2; + bpl = (howmany(pixel, ppw)/2)*4; break; case V_INFO_MM_PACKED: count = (pixel + 7)/8; bpl = ((pixel + 7)/8)*8; break; case V_INFO_MM_TEXT: count = (pixel + 7)/8; /* columns */ bpl = (pixel + 7)/8; /* columns */ break; default: return ENODEV; } if (mp[10 + 0x17] & 0x40) /* CRTC mode control reg */ count *= 2; /* byte mode */ outb(adp->va_crtc_addr, 0x13); outb(adp->va_crtc_addr + 1, count); adp->va_line_width = bpl; return 0; } static int set_display_start(video_adapter_t *adp, int x, int y) { int off; /* byte offset (graphics mode)/word offset (text mode) */ int poff; /* pixel offset */ int roff; /* row offset */ int ppb; /* pixels per byte */ if ((adp->va_type != KD_VGA) && (adp->va_type != KD_EGA)) x &= ~7; if (adp->va_info.vi_flags & V_INFO_GRAPHICS) { ppb = 8/(adp->va_info.vi_depth/adp->va_info.vi_planes); off = y*adp->va_line_width + x/ppb; roff = 0; poff = x%ppb; } else { if ((adp->va_type == KD_VGA) || (adp->va_type == KD_EGA)) { outb(TSIDX, 1); if (inb(TSREG) & 1) ppb = 9; else ppb = 8; } else { ppb = 8; } off = y/adp->va_info.vi_cheight*adp->va_line_width + x/ppb; roff = y%adp->va_info.vi_cheight; /* FIXME: is this correct? XXX */ if (ppb == 8) poff = x%ppb; else poff = (x + 8)%ppb; } /* start address */ outb(adp->va_crtc_addr, 0xc); /* high */ outb(adp->va_crtc_addr + 1, off >> 8); outb(adp->va_crtc_addr, 0xd); /* low */ outb(adp->va_crtc_addr + 1, off & 0xff); /* horizontal pel pan */ if ((adp->va_type == KD_VGA) || (adp->va_type == KD_EGA)) { inb(adp->va_crtc_addr + 6); outb(ATC, 0x13 | 0x20); outb(ATC, poff); inb(adp->va_crtc_addr + 6); outb(ATC, 0x20); } /* preset raw scan */ outb(adp->va_crtc_addr, 8); outb(adp->va_crtc_addr + 1, roff); adp->va_disp_start.x = x; adp->va_disp_start.y = y; return 0; } #ifndef VGA_NO_MODE_CHANGE #if defined(__i386__) || defined(__amd64__) /* XXX */ static void fill(int val, void *d, size_t size) { u_char *p = d; while (size-- > 0) *p++ = val; } #endif /* __i386__ */ static void filll_io(int val, vm_offset_t d, size_t size) { while (size-- > 0) { writel(d, val); d += sizeof(u_int32_t); } } #endif /* !VGA_NO_MODE_CHANGE */ /* entry points */ #if 0 static int vga_nop(void) { return 0; } #endif static int vga_error(void) { return ENODEV; } static int vga_probe(int unit, video_adapter_t **adpp, void *arg, int flags) { probe_adapters(); if (unit >= biosadapters) return ENXIO; *adpp = &biosadapter[unit]; return 0; } static int vga_init(int unit, video_adapter_t *adp, int flags) { if ((unit >= biosadapters) || (adp == NULL) || !probe_done(adp)) return ENXIO; if (!init_done(adp)) { /* nothing to do really... */ adp->va_flags |= V_ADP_INITIALIZED; } if (!config_done(adp)) { if (vid_register(adp) < 0) return ENXIO; adp->va_flags |= V_ADP_REGISTERED; } if (vga_sub_configure != NULL) (*vga_sub_configure)(0); return 0; } /* * get_info(): * Return the video_info structure of the requested video mode. * * all adapters */ static int vga_get_info(video_adapter_t *adp, int mode, video_info_t *info) { int i; if (!vga_init_done) return ENXIO; mode = map_gen_mode_num(adp->va_type, adp->va_flags & V_ADP_COLOR, mode); #ifndef VGA_NO_MODE_CHANGE if (adp->va_flags & V_ADP_MODECHANGE) { /* * If the parameter table entry for this mode is not found, * the mode is not supported... */ if (get_mode_param(mode) == NULL) return EINVAL; } else #endif /* VGA_NO_MODE_CHANGE */ { /* * Even if we don't support video mode switching on this adapter, * the information on the initial (thus current) video mode * should be made available. */ if (mode != adp->va_initial_mode) return EINVAL; } for (i = 0; bios_vmode[i].vi_mode != EOT; ++i) { if (bios_vmode[i].vi_mode == NA) continue; if (mode == bios_vmode[i].vi_mode) { *info = bios_vmode[i]; /* XXX */ info->vi_buffer_size = info->vi_window_size*info->vi_planes; return 0; } } return EINVAL; } /* * query_mode(): * Find a video mode matching the requested parameters. * Fields filled with 0 are considered "don't care" fields and * match any modes. * * all adapters */ static int vga_query_mode(video_adapter_t *adp, video_info_t *info) { int i; if (!vga_init_done) return ENXIO; for (i = 0; bios_vmode[i].vi_mode != EOT; ++i) { if (bios_vmode[i].vi_mode == NA) continue; if ((info->vi_width != 0) && (info->vi_width != bios_vmode[i].vi_width)) continue; if ((info->vi_height != 0) && (info->vi_height != bios_vmode[i].vi_height)) continue; if ((info->vi_cwidth != 0) && (info->vi_cwidth != bios_vmode[i].vi_cwidth)) continue; if ((info->vi_cheight != 0) && (info->vi_cheight != bios_vmode[i].vi_cheight)) continue; if ((info->vi_depth != 0) && (info->vi_depth != bios_vmode[i].vi_depth)) continue; if ((info->vi_planes != 0) && (info->vi_planes != bios_vmode[i].vi_planes)) continue; /* XXX: should check pixel format, memory model */ if ((info->vi_flags != 0) && (info->vi_flags != bios_vmode[i].vi_flags)) continue; /* verify if this mode is supported on this adapter */ if (vga_get_info(adp, bios_vmode[i].vi_mode, info)) continue; return 0; } return ENODEV; } /* * set_mode(): * Change the video mode. * * EGA/VGA */ #ifndef VGA_NO_MODE_CHANGE #ifdef VGA_WIDTH90 static void set_width90(adp_state_t *params) { /* * Based on code submitted by Kelly Yancey (kbyanc@freedomnet.com) * and alexv@sui.gda.itesm.mx. */ params->regs[5] |= 1; /* toggle 8 pixel wide fonts */ params->regs[10+0x0] = 0x6b; params->regs[10+0x1] = 0x59; params->regs[10+0x2] = 0x5a; params->regs[10+0x3] = 0x8e; params->regs[10+0x4] = 0x5e; params->regs[10+0x5] = 0x8a; params->regs[10+0x13] = 45; params->regs[35+0x13] = 0; } #endif /* VGA_WIDTH90 */ #endif /* !VGA_NO_MODE_CHANGE */ static int vga_set_mode(video_adapter_t *adp, int mode) { #ifndef VGA_NO_MODE_CHANGE video_info_t info; adp_state_t params; prologue(adp, V_ADP_MODECHANGE, ENODEV); mode = map_gen_mode_num(adp->va_type, adp->va_flags & V_ADP_COLOR, mode); if (vga_get_info(adp, mode, &info)) return EINVAL; #if VGA_DEBUG > 1 printf("vga_set_mode(): setting mode %d\n", mode); #endif params.sig = V_STATE_SIG; bcopy(get_mode_param(mode), params.regs, sizeof(params.regs)); switch (mode) { #ifdef VGA_WIDTH90 case M_VGA_C90x60: case M_VGA_M90x60: set_width90(¶ms); /* FALLTHROUGH */ #endif case M_VGA_C80x60: case M_VGA_M80x60: params.regs[2] = 0x08; params.regs[19] = 0x47; goto special_480l; #ifdef VGA_WIDTH90 case M_VGA_C90x30: case M_VGA_M90x30: set_width90(¶ms); /* FALLTHROUGH */ #endif case M_VGA_C80x30: case M_VGA_M80x30: params.regs[19] = 0x4f; special_480l: params.regs[9] |= 0xc0; params.regs[16] = 0x08; params.regs[17] = 0x3e; params.regs[26] = 0xea; params.regs[28] = 0xdf; params.regs[31] = 0xe7; params.regs[32] = 0x04; goto setup_mode; #ifdef VGA_WIDTH90 case M_VGA_C90x43: case M_VGA_M90x43: set_width90(¶ms); /* FALLTHROUGH */ #endif case M_ENH_C80x43: case M_ENH_B80x43: params.regs[28] = 87; goto special_80x50; #ifdef VGA_WIDTH90 case M_VGA_C90x50: case M_VGA_M90x50: set_width90(¶ms); /* FALLTHROUGH */ #endif case M_VGA_C80x50: case M_VGA_M80x50: special_80x50: params.regs[2] = 8; params.regs[19] = 7; goto setup_mode; #ifdef VGA_WIDTH90 case M_VGA_C90x25: case M_VGA_M90x25: set_width90(¶ms); /* FALLTHROUGH */ #endif case M_VGA_C40x25: case M_VGA_C80x25: case M_VGA_M80x25: case M_B40x25: case M_C40x25: case M_B80x25: case M_C80x25: case M_ENH_B40x25: case M_ENH_C40x25: case M_ENH_B80x25: case M_ENH_C80x25: case M_EGAMONO80x25: setup_mode: vga_load_state(adp, ¶ms); break; case M_VGA_MODEX: /* "unchain" the VGA mode */ params.regs[5-1+0x04] &= 0xf7; params.regs[5-1+0x04] |= 0x04; /* turn off doubleword mode */ params.regs[10+0x14] &= 0xbf; /* turn off word addressing */ params.regs[10+0x17] |= 0x40; /* set logical screen width */ params.regs[10+0x13] = 80; /* set 240 lines */ params.regs[10+0x11] = 0x2c; params.regs[10+0x06] = 0x0d; params.regs[10+0x07] = 0x3e; params.regs[10+0x10] = 0xea; params.regs[10+0x11] = 0xac; params.regs[10+0x12] = 0xdf; params.regs[10+0x15] = 0xe7; params.regs[10+0x16] = 0x06; /* set vertical sync polarity to reflect aspect ratio */ params.regs[9] = 0xe3; goto setup_grmode; case M_BG320: case M_CG320: case M_BG640: case M_CG320_D: case M_CG640_E: case M_CG640x350: case M_ENH_CG640: case M_BG640x480: case M_CG640x480: case M_VGA_CG320: setup_grmode: vga_load_state(adp, ¶ms); break; default: return EINVAL; } adp->va_mode = mode; info.vi_flags &= ~V_INFO_LINEAR; /* XXX */ update_adapter_info(adp, &info); /* move hardware cursor out of the way */ vidd_set_hw_cursor(adp, -1, -1); return 0; #else /* VGA_NO_MODE_CHANGE */ return ENODEV; #endif /* VGA_NO_MODE_CHANGE */ } #ifndef VGA_NO_FONT_LOADING static void set_font_mode(video_adapter_t *adp, u_char *buf) { u_char *mp; int s; s = splhigh(); /* save register values */ if (adp->va_type == KD_VGA) { outb(TSIDX, 0x02); buf[0] = inb(TSREG); outb(TSIDX, 0x04); buf[1] = inb(TSREG); outb(GDCIDX, 0x04); buf[2] = inb(GDCREG); outb(GDCIDX, 0x05); buf[3] = inb(GDCREG); outb(GDCIDX, 0x06); buf[4] = inb(GDCREG); inb(adp->va_crtc_addr + 6); outb(ATC, 0x10); buf[5] = inb(ATC + 1); } else /* if (adp->va_type == KD_EGA) */ { /* * EGA cannot be read; copy parameters from the mode parameter * table. */ mp = get_mode_param(adp->va_mode); buf[0] = mp[5 + 0x02 - 1]; buf[1] = mp[5 + 0x04 - 1]; buf[2] = mp[55 + 0x04]; buf[3] = mp[55 + 0x05]; buf[4] = mp[55 + 0x06]; buf[5] = mp[35 + 0x10]; } /* setup vga for loading fonts */ inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x10); outb(ATC, buf[5] & ~0x01); inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ #ifdef VGA_SLOW_IOACCESS #ifdef VGA_ALT_SEQACCESS outb(TSIDX, 0x00); outb(TSREG, 0x01); #endif outb(TSIDX, 0x02); outb(TSREG, 0x04); outb(TSIDX, 0x04); outb(TSREG, 0x07); #ifdef VGA_ALT_SEQACCESS outb(TSIDX, 0x00); outb(TSREG, 0x03); #endif outb(GDCIDX, 0x04); outb(GDCREG, 0x02); outb(GDCIDX, 0x05); outb(GDCREG, 0x00); outb(GDCIDX, 0x06); outb(GDCREG, 0x04); #else /* VGA_SLOW_IOACCESS */ #ifdef VGA_ALT_SEQACCESS outw(TSIDX, 0x0100); #endif outw(TSIDX, 0x0402); outw(TSIDX, 0x0704); #ifdef VGA_ALT_SEQACCESS outw(TSIDX, 0x0300); #endif outw(GDCIDX, 0x0204); outw(GDCIDX, 0x0005); outw(GDCIDX, 0x0406); /* addr = a0000, 64kb */ #endif /* VGA_SLOW_IOACCESS */ splx(s); } static void set_normal_mode(video_adapter_t *adp, u_char *buf) { int s; s = splhigh(); /* setup vga for normal operation mode again */ inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x10); outb(ATC, buf[5]); inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ #ifdef VGA_SLOW_IOACCESS #ifdef VGA_ALT_SEQACCESS outb(TSIDX, 0x00); outb(TSREG, 0x01); #endif outb(TSIDX, 0x02); outb(TSREG, buf[0]); outb(TSIDX, 0x04); outb(TSREG, buf[1]); #ifdef VGA_ALT_SEQACCESS outb(TSIDX, 0x00); outb(TSREG, 0x03); #endif outb(GDCIDX, 0x04); outb(GDCREG, buf[2]); outb(GDCIDX, 0x05); outb(GDCREG, buf[3]); if (adp->va_crtc_addr == MONO_CRTC) { outb(GDCIDX, 0x06); outb(GDCREG,(buf[4] & 0x03) | 0x08); } else { outb(GDCIDX, 0x06); outb(GDCREG,(buf[4] & 0x03) | 0x0c); } #else /* VGA_SLOW_IOACCESS */ #ifdef VGA_ALT_SEQACCESS outw(TSIDX, 0x0100); #endif outw(TSIDX, 0x0002 | (buf[0] << 8)); outw(TSIDX, 0x0004 | (buf[1] << 8)); #ifdef VGA_ALT_SEQACCESS outw(TSIDX, 0x0300); #endif outw(GDCIDX, 0x0004 | (buf[2] << 8)); outw(GDCIDX, 0x0005 | (buf[3] << 8)); if (adp->va_crtc_addr == MONO_CRTC) outw(GDCIDX, 0x0006 | (((buf[4] & 0x03) | 0x08)<<8)); else outw(GDCIDX, 0x0006 | (((buf[4] & 0x03) | 0x0c)<<8)); #endif /* VGA_SLOW_IOACCESS */ splx(s); } #endif /* VGA_NO_FONT_LOADING */ /* * save_font(): * Read the font data in the requested font page from the video adapter. * * EGA/VGA */ static int vga_save_font(video_adapter_t *adp, int page, int fontsize, int fontwidth, u_char *data, int ch, int count) { #ifndef VGA_NO_FONT_LOADING u_char buf[PARAM_BUFSIZE]; vm_offset_t segment; int c; #ifdef VGA_ALT_SEQACCESS int s; u_char val = 0; #endif prologue(adp, V_ADP_FONT, ENODEV); if (fontsize < 14) { /* FONT_8 */ fontsize = 8; } else if (fontsize >= 32) { fontsize = 32; } else if (fontsize >= 16) { /* FONT_16 */ fontsize = 16; } else { /* FONT_14 */ fontsize = 14; } if (page < 0 || page >= 8 || fontwidth != 8) return EINVAL; segment = FONT_BUF + 0x4000*page; if (page > 3) segment -= 0xe000; #ifdef VGA_ALT_SEQACCESS if (adp->va_type == KD_VGA) { /* what about EGA? XXX */ s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); outb(TSIDX, 0x01); val = inb(TSREG); /* disable screen */ outb(TSIDX, 0x01); outb(TSREG, val | 0x20); outb(TSIDX, 0x00); outb(TSREG, 0x03); splx(s); } #endif set_font_mode(adp, buf); if (fontsize == 32) { bcopy_fromio((uintptr_t)segment + ch*32, data, fontsize*count); } else { for (c = ch; count > 0; ++c, --count) { bcopy_fromio((uintptr_t)segment + c*32, data, fontsize); data += fontsize; } } set_normal_mode(adp, buf); #ifdef VGA_ALT_SEQACCESS if (adp->va_type == KD_VGA) { s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); outb(TSIDX, 0x01); outb(TSREG, val & 0xdf); /* enable screen */ outb(TSIDX, 0x00); outb(TSREG, 0x03); splx(s); } #endif return 0; #else /* VGA_NO_FONT_LOADING */ return ENODEV; #endif /* VGA_NO_FONT_LOADING */ } /* * load_font(): * Set the font data in the requested font page. * NOTE: it appears that some recent video adapters do not support * the font page other than 0... XXX * * EGA/VGA */ static int vga_load_font(video_adapter_t *adp, int page, int fontsize, int fontwidth, u_char *data, int ch, int count) { #ifndef VGA_NO_FONT_LOADING u_char buf[PARAM_BUFSIZE]; vm_offset_t segment; int c; #ifdef VGA_ALT_SEQACCESS int s; u_char val = 0; #endif prologue(adp, V_ADP_FONT, ENODEV); if (fontsize < 14) { /* FONT_8 */ fontsize = 8; } else if (fontsize >= 32) { fontsize = 32; } else if (fontsize >= 16) { /* FONT_16 */ fontsize = 16; } else { /* FONT_14 */ fontsize = 14; } if (page < 0 || page >= 8 || fontwidth != 8) return EINVAL; segment = FONT_BUF + 0x4000*page; if (page > 3) segment -= 0xe000; #ifdef VGA_ALT_SEQACCESS if (adp->va_type == KD_VGA) { /* what about EGA? XXX */ s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); outb(TSIDX, 0x01); val = inb(TSREG); /* disable screen */ outb(TSIDX, 0x01); outb(TSREG, val | 0x20); outb(TSIDX, 0x00); outb(TSREG, 0x03); splx(s); } #endif set_font_mode(adp, buf); if (fontsize == 32) { bcopy_toio(data, (uintptr_t)segment + ch*32, fontsize*count); } else { for (c = ch; count > 0; ++c, --count) { bcopy_toio(data, (uintptr_t)segment + c*32, fontsize); data += fontsize; } } set_normal_mode(adp, buf); #ifdef VGA_ALT_SEQACCESS if (adp->va_type == KD_VGA) { s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); outb(TSIDX, 0x01); outb(TSREG, val & 0xdf); /* enable screen */ outb(TSIDX, 0x00); outb(TSREG, 0x03); splx(s); } #endif return 0; #else /* VGA_NO_FONT_LOADING */ return ENODEV; #endif /* VGA_NO_FONT_LOADING */ } /* * show_font(): * Activate the requested font page. * NOTE: it appears that some recent video adapters do not support * the font page other than 0... XXX * * EGA/VGA */ static int vga_show_font(video_adapter_t *adp, int page) { #ifndef VGA_NO_FONT_LOADING static u_char cg[] = { 0x00, 0x05, 0x0a, 0x0f, 0x30, 0x35, 0x3a, 0x3f }; int s; prologue(adp, V_ADP_FONT, ENODEV); if (page < 0 || page >= 8) return EINVAL; s = splhigh(); outb(TSIDX, 0x03); outb(TSREG, cg[page]); splx(s); return 0; #else /* VGA_NO_FONT_LOADING */ return ENODEV; #endif /* VGA_NO_FONT_LOADING */ } /* * save_palette(): * Read DAC values. The values have expressed in 8 bits. * * VGA */ static int vga_save_palette(video_adapter_t *adp, u_char *palette) { int bits; int i; prologue(adp, V_ADP_PALETTE, ENODEV); /* * We store 8 bit values in the palette buffer, while the standard * VGA has 6 bit DAC . */ outb(PALRADR, 0x00); bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2; for (i = 0; i < 256*3; ++i) palette[i] = inb(PALDATA) << bits; inb(adp->va_crtc_addr + 6); /* reset flip/flop */ return 0; } static int vga_save_palette2(video_adapter_t *adp, int base, int count, u_char *r, u_char *g, u_char *b) { int bits; int i; prologue(adp, V_ADP_PALETTE, ENODEV); outb(PALRADR, base); bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2; for (i = 0; i < count; ++i) { r[i] = inb(PALDATA) << bits; g[i] = inb(PALDATA) << bits; b[i] = inb(PALDATA) << bits; } inb(adp->va_crtc_addr + 6); /* reset flip/flop */ return 0; } /* * load_palette(): * Set DAC values. * * VGA */ static int vga_load_palette(video_adapter_t *adp, u_char *palette) { int bits; int i; prologue(adp, V_ADP_PALETTE, ENODEV); outb(PIXMASK, 0xff); /* no pixelmask */ outb(PALWADR, 0x00); bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2; for (i = 0; i < 256*3; ++i) outb(PALDATA, palette[i] >> bits); inb(adp->va_crtc_addr + 6); /* reset flip/flop */ outb(ATC, 0x20); /* enable palette */ return 0; } static int vga_load_palette2(video_adapter_t *adp, int base, int count, u_char *r, u_char *g, u_char *b) { int bits; int i; prologue(adp, V_ADP_PALETTE, ENODEV); outb(PIXMASK, 0xff); /* no pixelmask */ outb(PALWADR, base); bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2; for (i = 0; i < count; ++i) { outb(PALDATA, r[i] >> bits); outb(PALDATA, g[i] >> bits); outb(PALDATA, b[i] >> bits); } inb(adp->va_crtc_addr + 6); /* reset flip/flop */ outb(ATC, 0x20); /* enable palette */ return 0; } /* * set_border(): * Change the border color. * * CGA/EGA/VGA */ static int vga_set_border(video_adapter_t *adp, int color) { prologue(adp, V_ADP_BORDER, ENODEV); switch (adp->va_type) { case KD_EGA: case KD_VGA: inb(adp->va_crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x31); outb(ATC, color & 0xff); break; case KD_CGA: outb(adp->va_crtc_addr + 5, color & 0x0f); /* color select register */ break; case KD_MONO: case KD_HERCULES: default: break; } return 0; } /* * save_state(): * Read video register values. * NOTE: this function only reads the standard EGA/VGA registers. * any extra/extended registers of SVGA adapters are not saved. * * VGA */ static int vga_save_state(video_adapter_t *adp, void *p, size_t size) { video_info_t info; u_char *buf; int crtc_addr; int i, j; int s; if (size == 0) { /* return the required buffer size */ prologue(adp, V_ADP_STATESAVE, 0); return sizeof(adp_state_t); } else { prologue(adp, V_ADP_STATESAVE, ENODEV); if (size < sizeof(adp_state_t)) return EINVAL; } ((adp_state_t *)p)->sig = V_STATE_SIG; buf = ((adp_state_t *)p)->regs; bzero(buf, V_MODE_PARAM_SIZE); crtc_addr = adp->va_crtc_addr; s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); /* stop sequencer */ for (i = 0, j = 5; i < 4; i++) { outb(TSIDX, i + 1); buf[j++] = inb(TSREG); } buf[9] = inb(MISC + 10); /* dot-clock */ outb(TSIDX, 0x00); outb(TSREG, 0x03); /* start sequencer */ for (i = 0, j = 10; i < 25; i++) { /* crtc */ outb(crtc_addr, i); buf[j++] = inb(crtc_addr + 1); } for (i = 0, j = 35; i < 20; i++) { /* attribute ctrl */ inb(crtc_addr + 6); /* reset flip-flop */ outb(ATC, i); buf[j++] = inb(ATC + 1); } for (i = 0, j = 55; i < 9; i++) { /* graph data ctrl */ outb(GDCIDX, i); buf[j++] = inb(GDCREG); } inb(crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ splx(s); #if 1 if (vga_get_info(adp, adp->va_mode, &info) == 0) { if (info.vi_flags & V_INFO_GRAPHICS) { buf[0] = info.vi_width/info.vi_cwidth; /* COLS */ buf[1] = info.vi_height/info.vi_cheight - 1; /* ROWS */ } else { buf[0] = info.vi_width; /* COLS */ buf[1] = info.vi_height - 1; /* ROWS */ } buf[2] = info.vi_cheight; /* POINTS */ } #else buf[0] = readb(BIOS_PADDRTOVADDR(0x44a)); /* COLS */ buf[1] = readb(BIOS_PADDRTOVADDR(0x484)); /* ROWS */ buf[2] = readb(BIOS_PADDRTOVADDR(0x485)); /* POINTS */ buf[3] = readb(BIOS_PADDRTOVADDR(0x44c)); buf[4] = readb(BIOS_PADDRTOVADDR(0x44d)); #endif return 0; } /* * load_state(): * Set video registers at once. * NOTE: this function only updates the standard EGA/VGA registers. * any extra/extended registers of SVGA adapters are not changed. * * EGA/VGA */ static int vga_load_state(video_adapter_t *adp, void *p) { u_char *buf; int crtc_addr; int s; int i; prologue(adp, V_ADP_STATELOAD, ENODEV); if (((adp_state_t *)p)->sig != V_STATE_SIG) return EINVAL; buf = ((adp_state_t *)p)->regs; crtc_addr = adp->va_crtc_addr; #if VGA_DEBUG > 1 dump_buffer(buf, V_MODE_PARAM_SIZE); #endif s = splhigh(); outb(TSIDX, 0x00); outb(TSREG, 0x01); /* stop sequencer */ for (i = 0; i < 4; ++i) { /* program sequencer */ outb(TSIDX, i + 1); outb(TSREG, buf[i + 5]); } outb(MISC, buf[9]); /* set dot-clock */ outb(TSIDX, 0x00); outb(TSREG, 0x03); /* start sequencer */ outb(crtc_addr, 0x11); outb(crtc_addr + 1, inb(crtc_addr + 1) & 0x7F); for (i = 0; i < 25; ++i) { /* program crtc */ outb(crtc_addr, i); outb(crtc_addr + 1, buf[i + 10]); } inb(crtc_addr+6); /* reset flip-flop */ for (i = 0; i < 20; ++i) { /* program attribute ctrl */ outb(ATC, i); outb(ATC, buf[i + 35]); } for (i = 0; i < 9; ++i) { /* program graph data ctrl */ outb(GDCIDX, i); outb(GDCREG, buf[i + 55]); } inb(crtc_addr + 6); /* reset flip-flop */ outb(ATC, 0x20); /* enable palette */ #ifdef notyet /* a temporary workaround for kernel panic, XXX */ #ifndef VGA_NO_BIOS if (adp->va_unit == V_ADP_PRIMARY) { writeb(BIOS_PADDRTOVADDR(0x44a), buf[0]); /* COLS */ writeb(BIOS_PADDRTOVADDR(0x484), buf[1] + rows_offset - 1); /* ROWS */ writeb(BIOS_PADDRTOVADDR(0x485), buf[2]); /* POINTS */ #if 0 writeb(BIOS_PADDRTOVADDR(0x44c), buf[3]); writeb(BIOS_PADDRTOVADDR(0x44d), buf[4]); #endif } #endif /* VGA_NO_BIOS */ #endif /* notyet */ splx(s); return 0; } /* * set_origin(): * Change the origin (window mapping) of the banked frame buffer. */ static int vga_set_origin(video_adapter_t *adp, off_t offset) { /* * The standard video modes do not require window mapping; * always return error. */ return ENODEV; } /* * read_hw_cursor(): * Read the position of the hardware text cursor. * * all adapters */ static int vga_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { u_int16_t off; int s; if (!vga_init_done) return ENXIO; if (adp->va_info.vi_flags & V_INFO_GRAPHICS) return ENODEV; s = spltty(); outb(adp->va_crtc_addr, 14); off = inb(adp->va_crtc_addr + 1); outb(adp->va_crtc_addr, 15); off = (off << 8) | inb(adp->va_crtc_addr + 1); splx(s); *row = off / adp->va_info.vi_width; *col = off % adp->va_info.vi_width; return 0; } /* * set_hw_cursor(): * Move the hardware text cursor. If col and row are both -1, * the cursor won't be shown. * * all adapters */ static int vga_set_hw_cursor(video_adapter_t *adp, int col, int row) { u_int16_t off; int s; if (!vga_init_done) return ENXIO; if ((col == -1) && (row == -1)) { off = -1; } else { if (adp->va_info.vi_flags & V_INFO_GRAPHICS) return ENODEV; off = row*adp->va_info.vi_width + col; } s = spltty(); outb(adp->va_crtc_addr, 14); outb(adp->va_crtc_addr + 1, off >> 8); outb(adp->va_crtc_addr, 15); outb(adp->va_crtc_addr + 1, off & 0x00ff); splx(s); return 0; } /* * set_hw_cursor_shape(): * Change the shape of the hardware text cursor. If the height is * zero or negative, the cursor won't be shown. * * all adapters */ static int vga_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { int s; if (!vga_init_done) return ENXIO; s = spltty(); switch (adp->va_type) { case KD_VGA: case KD_CGA: case KD_MONO: case KD_HERCULES: default: if (height <= 0) { /* make the cursor invisible */ outb(adp->va_crtc_addr, 10); outb(adp->va_crtc_addr + 1, 32); outb(adp->va_crtc_addr, 11); outb(adp->va_crtc_addr + 1, 0); } else { outb(adp->va_crtc_addr, 10); outb(adp->va_crtc_addr + 1, celsize - base - height); outb(adp->va_crtc_addr, 11); outb(adp->va_crtc_addr + 1, celsize - base - 1); } break; case KD_EGA: if (height <= 0) { /* make the cursor invisible */ outb(adp->va_crtc_addr, 10); outb(adp->va_crtc_addr + 1, celsize); outb(adp->va_crtc_addr, 11); outb(adp->va_crtc_addr + 1, 0); } else { outb(adp->va_crtc_addr, 10); outb(adp->va_crtc_addr + 1, celsize - base - height); outb(adp->va_crtc_addr, 11); outb(adp->va_crtc_addr + 1, celsize - base); } break; } splx(s); return 0; } /* * blank_display() * Put the display in power save/power off mode. * * all adapters */ static int vga_blank_display(video_adapter_t *adp, int mode) { u_char val; int s; s = splhigh(); switch (adp->va_type) { case KD_VGA: switch (mode) { case V_DISPLAY_SUSPEND: case V_DISPLAY_STAND_BY: outb(TSIDX, 0x01); val = inb(TSREG); outb(TSIDX, 0x01); outb(TSREG, val | 0x20); outb(adp->va_crtc_addr, 0x17); val = inb(adp->va_crtc_addr + 1); outb(adp->va_crtc_addr + 1, val & ~0x80); break; case V_DISPLAY_BLANK: outb(TSIDX, 0x01); val = inb(TSREG); outb(TSIDX, 0x01); outb(TSREG, val | 0x20); break; case V_DISPLAY_ON: outb(TSIDX, 0x01); val = inb(TSREG); outb(TSIDX, 0x01); outb(TSREG, val & 0xDF); outb(adp->va_crtc_addr, 0x17); val = inb(adp->va_crtc_addr + 1); outb(adp->va_crtc_addr + 1, val | 0x80); break; } break; case KD_EGA: /* no support yet */ splx(s); return ENODEV; case KD_CGA: switch (mode) { case V_DISPLAY_SUSPEND: case V_DISPLAY_STAND_BY: case V_DISPLAY_BLANK: outb(adp->va_crtc_addr + 4, 0x25); break; case V_DISPLAY_ON: outb(adp->va_crtc_addr + 4, 0x2d); break; } break; case KD_MONO: case KD_HERCULES: switch (mode) { case V_DISPLAY_SUSPEND: case V_DISPLAY_STAND_BY: case V_DISPLAY_BLANK: outb(adp->va_crtc_addr + 4, 0x21); break; case V_DISPLAY_ON: outb(adp->va_crtc_addr + 4, 0x29); break; } break; default: break; } splx(s); return 0; } /* * mmap(): * Mmap frame buffer. * * all adapters */ static int vga_mmap_buf(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { if (adp->va_info.vi_flags & V_INFO_LINEAR) return -1; #if VGA_DEBUG > 0 printf("vga_mmap_buf(): window:0x%jx, offset:0x%jx\n", (uintmax_t)adp->va_info.vi_window, (uintmax_t)offset); #endif /* XXX: is this correct? */ if (offset > adp->va_window_size - PAGE_SIZE) return -1; *paddr = adp->va_info.vi_window + offset; return 0; } #ifndef VGA_NO_MODE_CHANGE static void planar_fill(video_adapter_t *adp, int val) { int length; int at; /* position in the frame buffer */ int l; outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, (val << 8) | 0x00); /* set/reset */ at = 0; length = adp->va_line_width*adp->va_info.vi_height; while (length > 0) { l = imin(length, adp->va_window_size); vidd_set_win_org(adp, at); bzero_io(adp->va_window, l); length -= l; at += l; } outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void packed_fill(video_adapter_t *adp, int val) { int length; int at; /* position in the frame buffer */ int l; at = 0; length = adp->va_line_width*adp->va_info.vi_height; while (length > 0) { l = imin(length, adp->va_window_size); vidd_set_win_org(adp, at); fill_io(val, adp->va_window, l); length -= l; at += l; } } static void direct_fill(video_adapter_t *adp, int val) { int length; int at; /* position in the frame buffer */ int l; at = 0; length = adp->va_line_width*adp->va_info.vi_height; while (length > 0) { l = imin(length, adp->va_window_size); vidd_set_win_org(adp, at); switch (adp->va_info.vi_pixel_size) { case sizeof(u_int16_t): fillw_io(val, adp->va_window, l/sizeof(u_int16_t)); break; case 3: /* FIXME */ break; case sizeof(u_int32_t): filll_io(val, adp->va_window, l/sizeof(u_int32_t)); break; } length -= l; at += l; } } static int vga_clear(video_adapter_t *adp) { switch (adp->va_info.vi_mem_model) { case V_INFO_MM_TEXT: /* do nothing? XXX */ break; case V_INFO_MM_PLANAR: planar_fill(adp, 0); break; case V_INFO_MM_PACKED: packed_fill(adp, 0); break; case V_INFO_MM_DIRECT: direct_fill(adp, 0); break; } return 0; } #ifdef notyet static void planar_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { int banksize; int bank; int pos; int offset; /* offset within window */ int bx; int l; outw(GDCIDX, 0x0005); /* read mode 0, write mode 0 */ outw(GDCIDX, 0x0003); /* data rotate/function select */ outw(GDCIDX, 0x0f01); /* set/reset enable */ outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, (val << 8) | 0x00); /* set/reset */ banksize = adp->va_window_size; bank = -1; while (cy > 0) { pos = adp->va_line_width*y + x/8; if (bank != pos/banksize) { vidd_set_win_org(adp, pos); bank = pos/banksize; } offset = pos%banksize; bx = (x + cx)/8 - x/8; if (x % 8) { outw(GDCIDX, ((0xff00 >> (x % 8)) & 0xff00) | 0x08); writeb(adp->va_window + offset, 0); ++offset; --bx; if (offset >= banksize) { offset = 0; ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); } outw(GDCIDX, 0xff08); /* bit mask */ } while (bx > 0) { l = imin(bx, banksize); bzero_io(adp->va_window + offset, l); offset += l; bx -= l; if (offset >= banksize) { offset = 0; ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); } } if ((x + cx) % 8) { outw(GDCIDX, (~(0xff00 >> ((x + cx) % 8)) & 0xff00) | 0x08); writeb(adp->va_window + offset, 0); ++offset; if (offset >= banksize) { offset = 0; ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); } outw(GDCIDX, 0xff08); /* bit mask */ } ++y; --cy; } outw(GDCIDX, 0xff08); /* bit mask */ outw(GDCIDX, 0x0000); /* set/reset */ outw(GDCIDX, 0x0001); /* set/reset enable */ } static void packed_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { int banksize; int bank; int pos; int offset; /* offset within window */ int end; banksize = adp->va_window_size; bank = -1; cx *= adp->va_info.vi_pixel_size; while (cy > 0) { pos = adp->va_line_width*y + x*adp->va_info.vi_pixel_size; if (bank != pos/banksize) { vidd_set_win_org(adp, pos); bank = pos/banksize; } offset = pos%banksize; end = imin(offset + cx, banksize); fill_io(val, adp->va_window + offset, (end - offset)/adp->va_info.vi_pixel_size); /* the line may cross the window boundary */ if (offset + cx > banksize) { ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); end = offset + cx - banksize; fill_io(val, adp->va_window, end/adp->va_info.vi_pixel_size); } ++y; --cy; } } static void direct_fill_rect16(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { int banksize; int bank; int pos; int offset; /* offset within window */ int end; /* * XXX: the function assumes that banksize is a muliple of * sizeof(u_int16_t). */ banksize = adp->va_window_size; bank = -1; cx *= sizeof(u_int16_t); while (cy > 0) { pos = adp->va_line_width*y + x*sizeof(u_int16_t); if (bank != pos/banksize) { vidd_set_win_org(adp, pos); bank = pos/banksize; } offset = pos%banksize; end = imin(offset + cx, banksize); fillw_io(val, adp->va_window + offset, (end - offset)/sizeof(u_int16_t)); /* the line may cross the window boundary */ if (offset + cx > banksize) { ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); end = offset + cx - banksize; fillw_io(val, adp->va_window, end/sizeof(u_int16_t)); } ++y; --cy; } } static void direct_fill_rect24(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { int banksize; int bank; int pos; int offset; /* offset within window */ int end; int i; int j; u_int8_t b[3]; b[0] = val & 0x0000ff; b[1] = (val >> 8) & 0x0000ff; b[2] = (val >> 16) & 0x0000ff; banksize = adp->va_window_size; bank = -1; cx *= 3; while (cy > 0) { pos = adp->va_line_width*y + x*3; if (bank != pos/banksize) { vidd_set_win_org(adp, pos); bank = pos/banksize; } offset = pos%banksize; end = imin(offset + cx, banksize); for (i = 0, j = offset; j < end; i = (++i)%3, ++j) { writeb(adp->va_window + j, b[i]); } /* the line may cross the window boundary */ if (offset + cx >= banksize) { ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); j = 0; end = offset + cx - banksize; for (; j < end; i = (++i)%3, ++j) { writeb(adp->va_window + j, b[i]); } } ++y; --cy; } } static void direct_fill_rect32(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { int banksize; int bank; int pos; int offset; /* offset within window */ int end; /* * XXX: the function assumes that banksize is a muliple of * sizeof(u_int32_t). */ banksize = adp->va_window_size; bank = -1; cx *= sizeof(u_int32_t); while (cy > 0) { pos = adp->va_line_width*y + x*sizeof(u_int32_t); if (bank != pos/banksize) { vidd_set_win_org(adp, pos); bank = pos/banksize; } offset = pos%banksize; end = imin(offset + cx, banksize); filll_io(val, adp->va_window + offset, (end - offset)/sizeof(u_int32_t)); /* the line may cross the window boundary */ if (offset + cx > banksize) { ++bank; /* next bank */ vidd_set_win_org(adp, bank*banksize); end = offset + cx - banksize; filll_io(val, adp->va_window, end/sizeof(u_int32_t)); } ++y; --cy; } } static int vga_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { switch (adp->va_info.vi_mem_model) { case V_INFO_MM_TEXT: /* do nothing? XXX */ break; case V_INFO_MM_PLANAR: planar_fill_rect(adp, val, x, y, cx, cy); break; case V_INFO_MM_PACKED: packed_fill_rect(adp, val, x, y, cx, cy); break; case V_INFO_MM_DIRECT: switch (adp->va_info.vi_pixel_size) { case sizeof(u_int16_t): direct_fill_rect16(adp, val, x, y, cx, cy); break; case 3: direct_fill_rect24(adp, val, x, y, cx, cy); break; case sizeof(u_int32_t): direct_fill_rect32(adp, val, x, y, cx, cy); break; } break; } return 0; } #else /* !notyet */ static int vga_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { return ENODEV; } #endif /* notyet */ static int vga_bitblt(video_adapter_t *adp,...) { /* FIXME */ return ENODEV; } #endif /* !VGA_NO_MODE_CHANGE */ static int get_palette(video_adapter_t *adp, int base, int count, u_char *red, u_char *green, u_char *blue, u_char *trans) { u_char *r; u_char *g; u_char *b; if (count < 0 || base < 0 || count > 256 || base > 256 || base + count > 256) return EINVAL; r = malloc(count*3, M_DEVBUF, M_WAITOK); g = r + count; b = g + count; if (vga_save_palette2(adp, base, count, r, g, b)) { free(r, M_DEVBUF); return ENODEV; } copyout(r, red, count); copyout(g, green, count); copyout(b, blue, count); if (trans != NULL) { bzero(r, count); copyout(r, trans, count); } free(r, M_DEVBUF); return 0; } static int set_palette(video_adapter_t *adp, int base, int count, u_char *red, u_char *green, u_char *blue, u_char *trans) { u_char *r; u_char *g; u_char *b; int err; if (count < 0 || base < 0 || count > 256 || base > 256 || base + count > 256) return EINVAL; r = malloc(count*3, M_DEVBUF, M_WAITOK); g = r + count; b = g + count; err = copyin(red, r, count); if (!err) err = copyin(green, g, count); if (!err) err = copyin(blue, b, count); if (!err) err = vga_load_palette2(adp, base, count, r, g, b); free(r, M_DEVBUF); return (err ? ENODEV : 0); } static int vga_dev_ioctl(video_adapter_t *adp, u_long cmd, caddr_t arg) { switch (cmd) { case FBIO_GETWINORG: /* get frame buffer window origin */ *(u_int *)arg = 0; return 0; case FBIO_SETWINORG: /* set frame buffer window origin */ return ENODEV; case FBIO_SETDISPSTART: /* set display start address */ return (set_display_start(adp, ((video_display_start_t *)arg)->x, ((video_display_start_t *)arg)->y) ? ENODEV : 0); case FBIO_SETLINEWIDTH: /* set scan line length in pixel */ return (set_line_length(adp, *(u_int *)arg) ? ENODEV : 0); case FBIO_GETPALETTE: /* get color palette */ return get_palette(adp, ((video_color_palette_t *)arg)->index, ((video_color_palette_t *)arg)->count, ((video_color_palette_t *)arg)->red, ((video_color_palette_t *)arg)->green, ((video_color_palette_t *)arg)->blue, ((video_color_palette_t *)arg)->transparent); case FBIO_SETPALETTE: /* set color palette */ return set_palette(adp, ((video_color_palette_t *)arg)->index, ((video_color_palette_t *)arg)->count, ((video_color_palette_t *)arg)->red, ((video_color_palette_t *)arg)->green, ((video_color_palette_t *)arg)->blue, ((video_color_palette_t *)arg)->transparent); case FBIOGTYPE: /* get frame buffer type info. */ ((struct fbtype *)arg)->fb_type = fb_type(adp->va_type); ((struct fbtype *)arg)->fb_height = adp->va_info.vi_height; ((struct fbtype *)arg)->fb_width = adp->va_info.vi_width; ((struct fbtype *)arg)->fb_depth = adp->va_info.vi_depth; if ((adp->va_info.vi_depth <= 1) || (adp->va_info.vi_depth > 8)) ((struct fbtype *)arg)->fb_cmsize = 0; else ((struct fbtype *)arg)->fb_cmsize = 1 << adp->va_info.vi_depth; ((struct fbtype *)arg)->fb_size = adp->va_buffer_size; return 0; case FBIOGETCMAP: /* get color palette */ return get_palette(adp, ((struct fbcmap *)arg)->index, ((struct fbcmap *)arg)->count, ((struct fbcmap *)arg)->red, ((struct fbcmap *)arg)->green, ((struct fbcmap *)arg)->blue, NULL); case FBIOPUTCMAP: /* set color palette */ return set_palette(adp, ((struct fbcmap *)arg)->index, ((struct fbcmap *)arg)->count, ((struct fbcmap *)arg)->red, ((struct fbcmap *)arg)->green, ((struct fbcmap *)arg)->blue, NULL); default: return fb_commonioctl(adp, cmd, arg); } } static void dump_buffer(u_char *buf, size_t len) { int i; for(i = 0; i < len;) { printf("%02x ", buf[i]); if ((++i % 16) == 0) printf("\n"); } } /* * diag(): * Print some information about the video adapter and video modes, * with requested level of details. * * all adapters */ static int vga_diag(video_adapter_t *adp, int level) { u_char *mp; #if FB_DEBUG > 1 video_info_t info; int i; #endif if (!vga_init_done) return ENXIO; #if FB_DEBUG > 1 #ifndef VGA_NO_BIOS printf("vga: RTC equip. code:0x%02x, DCC code:0x%02x\n", rtcin(RTC_EQUIPMENT), readb(BIOS_PADDRTOVADDR(0x488))); printf("vga: CRTC:0x%x, video option:0x%02x, ", readw(BIOS_PADDRTOVADDR(0x463)), readb(BIOS_PADDRTOVADDR(0x487))); printf("rows:%d, cols:%d, font height:%d\n", readb(BIOS_PADDRTOVADDR(0x44a)), readb(BIOS_PADDRTOVADDR(0x484)) + 1, readb(BIOS_PADDRTOVADDR(0x485))); #endif /* VGA_NO_BIOS */ #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) printf("vga: param table EGA/VGA:%p", video_mode_ptr); printf(", CGA/MDA:%p\n", video_mode_ptr2); printf("vga: rows_offset:%d\n", rows_offset); #endif #endif /* FB_DEBUG > 1 */ fb_dump_adp_info(VGA_DRIVER_NAME, adp, level); #if FB_DEBUG > 1 if (adp->va_flags & V_ADP_MODECHANGE) { for (i = 0; bios_vmode[i].vi_mode != EOT; ++i) { if (bios_vmode[i].vi_mode == NA) continue; if (get_mode_param(bios_vmode[i].vi_mode) == NULL) continue; fb_dump_mode_info(VGA_DRIVER_NAME, adp, &bios_vmode[i], level); } } else { vga_get_info(adp, adp->va_initial_mode, &info); /* shouldn't fail */ fb_dump_mode_info(VGA_DRIVER_NAME, adp, &info, level); } #endif /* FB_DEBUG > 1 */ if ((adp->va_type != KD_EGA) && (adp->va_type != KD_VGA)) return 0; #if !defined(VGA_NO_BIOS) && !defined(VGA_NO_MODE_CHANGE) if (video_mode_ptr == NULL) printf("vga%d: %s: WARNING: video mode switching is not " "fully supported on this adapter\n", adp->va_unit, adp->va_name); #endif if (level <= 0) return 0; if (adp->va_type == KD_VGA) { printf("VGA parameters upon power-up\n"); dump_buffer(adpstate.regs, sizeof(adpstate.regs)); printf("VGA parameters in BIOS for mode %d\n", adp->va_initial_mode); dump_buffer(adpstate2.regs, sizeof(adpstate2.regs)); } mp = get_mode_param(adp->va_initial_mode); if (mp == NULL) /* this shouldn't be happening */ return 0; printf("EGA/VGA parameters to be used for mode %d\n", adp->va_initial_mode); dump_buffer(mp, V_MODE_PARAM_SIZE); return 0; } diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c index f2567ccf0ee3..647447032b83 100644 --- a/sys/dev/jme/if_jme.c +++ b/sys/dev/jme/if_jme.c @@ -1,3456 +1,3456 @@ /*- * Copyright (c) 2008, Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Define the following to disable printing Rx errors. */ #undef JME_SHOW_ERRORS #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) MODULE_DEPEND(jme, pci, 1, 1, 1); MODULE_DEPEND(jme, ether, 1, 1, 1); MODULE_DEPEND(jme, miibus, 1, 1, 1); /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; TUNABLE_INT("hw.jme.msi_disable", &msi_disable); TUNABLE_INT("hw.jme.msix_disable", &msix_disable); /* * Devices supported by this driver. */ static struct jme_dev { uint16_t jme_vendorid; uint16_t jme_deviceid; const char *jme_name; } jme_devs[] = { { VENDORID_JMICRON, DEVICEID_JMC250, "JMicron Inc, JMC25x Gigabit Ethernet" }, { VENDORID_JMICRON, DEVICEID_JMC260, "JMicron Inc, JMC26x Fast Ethernet" }, }; static int jme_miibus_readreg(device_t, int, int); static int jme_miibus_writereg(device_t, int, int, int); static void jme_miibus_statchg(device_t); static void jme_mediastatus(struct ifnet *, struct ifmediareq *); static int jme_mediachange(struct ifnet *); static int jme_probe(device_t); static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); static int jme_eeprom_macaddr(struct jme_softc *); static int jme_efuse_macaddr(struct jme_softc *); static void jme_reg_macaddr(struct jme_softc *); static void jme_set_macaddr(struct jme_softc *, uint8_t *); static void jme_map_intr_vector(struct jme_softc *); static int jme_attach(device_t); static int jme_detach(device_t); static void jme_sysctl_node(struct jme_softc *); static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int jme_dma_alloc(struct jme_softc *); static void jme_dma_free(struct jme_softc *); static int jme_shutdown(device_t); static void jme_setlinkspeed(struct jme_softc *); static void jme_setwol(struct jme_softc *); static int jme_suspend(device_t); static int jme_resume(device_t); static int jme_encap(struct jme_softc *, struct mbuf **); static void jme_start(struct ifnet *); static void jme_start_locked(struct ifnet *); static void jme_watchdog(struct jme_softc *); static int jme_ioctl(struct ifnet *, u_long, caddr_t); static void jme_mac_config(struct jme_softc *); static void jme_link_task(void *, int); static int jme_intr(void *); static void jme_int_task(void *, int); static void jme_txeof(struct jme_softc *); static __inline void jme_discard_rxbuf(struct jme_softc *, int); static void jme_rxeof(struct jme_softc *); static int jme_rxintr(struct jme_softc *, int); static void jme_tick(void *); static void jme_reset(struct jme_softc *); static void jme_init(void *); static void jme_init_locked(struct jme_softc *); static void jme_stop(struct jme_softc *); static void jme_stop_tx(struct jme_softc *); static void jme_stop_rx(struct jme_softc *); static int jme_init_rx_ring(struct jme_softc *); static void jme_init_tx_ring(struct jme_softc *); static void jme_init_ssb(struct jme_softc *); static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); static void jme_set_vlan(struct jme_softc *); static void jme_set_filter(struct jme_softc *); static void jme_stats_clear(struct jme_softc *); static void jme_stats_save(struct jme_softc *); static void jme_stats_update(struct jme_softc *); static void jme_phy_down(struct jme_softc *); static void jme_phy_up(struct jme_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS); static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS); static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS); static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS); static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS); static device_method_t jme_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, jme_probe), DEVMETHOD(device_attach, jme_attach), DEVMETHOD(device_detach, jme_detach), DEVMETHOD(device_shutdown, jme_shutdown), DEVMETHOD(device_suspend, jme_suspend), DEVMETHOD(device_resume, jme_resume), /* MII interface. */ DEVMETHOD(miibus_readreg, jme_miibus_readreg), DEVMETHOD(miibus_writereg, jme_miibus_writereg), DEVMETHOD(miibus_statchg, jme_miibus_statchg), { NULL, NULL } }; static driver_t jme_driver = { "jme", jme_methods, sizeof(struct jme_softc) }; static devclass_t jme_devclass; DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0); DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); static struct resource_spec jme_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec jme_irq_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec jme_irq_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; /* * Read a PHY register on the MII of the JMC250. */ static int jme_miibus_readreg(device_t dev, int phy, int reg) { struct jme_softc *sc; uint32_t val; int i; sc = device_get_softc(dev); /* For FPGA version, PHY address 0 should be ignored. */ if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) return (0); CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); for (i = JME_PHY_TIMEOUT; i > 0; i--) { DELAY(1); if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) break; } if (i == 0) { device_printf(sc->jme_dev, "phy read timeout : %d\n", reg); return (0); } return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); } /* * Write a PHY register on the MII of the JMC250. */ static int jme_miibus_writereg(device_t dev, int phy, int reg, int val) { struct jme_softc *sc; int i; sc = device_get_softc(dev); /* For FPGA version, PHY address 0 should be ignored. */ if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) return (0); CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); for (i = JME_PHY_TIMEOUT; i > 0; i--) { DELAY(1); if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) break; } if (i == 0) device_printf(sc->jme_dev, "phy write timeout : %d\n", reg); return (0); } /* * Callback from MII layer when media changes. */ static void jme_miibus_statchg(device_t dev) { struct jme_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task); } /* * Get the current interface media status. */ static void jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct jme_softc *sc; struct mii_data *mii; sc = ifp->if_softc; JME_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { JME_UNLOCK(sc); return; } mii = device_get_softc(sc->jme_miibus); mii_pollstat(mii); ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = mii->mii_media_active; JME_UNLOCK(sc); } /* * Set hardware to newly-selected media. */ static int jme_mediachange(struct ifnet *ifp) { struct jme_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; JME_LOCK(sc); mii = device_get_softc(sc->jme_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); JME_UNLOCK(sc); return (error); } static int jme_probe(device_t dev) { struct jme_dev *sp; int i; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); sp = jme_devs; for (i = 0; i < nitems(jme_devs); i++, sp++) { if (vendor == sp->jme_vendorid && devid == sp->jme_deviceid) { device_set_desc(dev, sp->jme_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) { uint32_t reg; int i; *val = 0; for (i = JME_TIMEOUT; i > 0; i--) { reg = CSR_READ_4(sc, JME_SMBCSR); if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) break; DELAY(1); } if (i == 0) { device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); return (ETIMEDOUT); } reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); for (i = JME_TIMEOUT; i > 0; i--) { DELAY(1); reg = CSR_READ_4(sc, JME_SMBINTF); if ((reg & SMBINTF_CMD_TRIGGER) == 0) break; } if (i == 0) { device_printf(sc->jme_dev, "EEPROM read timeout!\n"); return (ETIMEDOUT); } reg = CSR_READ_4(sc, JME_SMBINTF); *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; return (0); } static int jme_eeprom_macaddr(struct jme_softc *sc) { uint8_t eaddr[ETHER_ADDR_LEN]; uint8_t fup, reg, val; uint32_t offset; int match; offset = 0; if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || fup != JME_EEPROM_SIG0) return (ENOENT); if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || fup != JME_EEPROM_SIG1) return (ENOENT); match = 0; do { if (jme_eeprom_read_byte(sc, offset, &fup) != 0) break; if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) break; if (reg >= JME_PAR0 && reg < JME_PAR0 + ETHER_ADDR_LEN) { if (jme_eeprom_read_byte(sc, offset + 2, &val) != 0) break; eaddr[reg - JME_PAR0] = val; match++; } } /* Check for the end of EEPROM descriptor. */ if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) break; /* Try next eeprom descriptor. */ offset += JME_EEPROM_DESC_BYTES; } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); if (match == ETHER_ADDR_LEN) { bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN); return (0); } return (ENOENT); } static int jme_efuse_macaddr(struct jme_softc *sc) { uint32_t reg; int i; reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) return (ENOENT); /* Reset eFuse controller. */ reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); reg |= EFUSE_CTL2_RESET; pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); reg &= ~EFUSE_CTL2_RESET; pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); /* Have eFuse reload station address to MAC controller. */ reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); reg &= ~EFUSE_CTL1_CMD_MASK; reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE; pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4); /* * Verify completion of eFuse autload command. It should be * completed within 108us. */ DELAY(110); for (i = 10; i > 0; i--) { reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) { DELAY(20); continue; } if ((reg & EFUSE_CTL1_EXECUTE) == 0) break; /* Station address loading is still in progress. */ DELAY(20); } if (i == 0) { device_printf(sc->jme_dev, "eFuse autoload timed out.\n"); return (ETIMEDOUT); } return (0); } static void jme_reg_macaddr(struct jme_softc *sc) { uint32_t par0, par1; /* Read station address. */ par0 = CSR_READ_4(sc, JME_PAR0); par1 = CSR_READ_4(sc, JME_PAR1); par1 &= 0xFFFF; if ((par0 == 0 && par1 == 0) || (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) { device_printf(sc->jme_dev, "Failed to retrieve Ethernet address.\n"); } else { /* * For controllers that use eFuse, the station address * could also be extracted from JME_PCI_PAR0 and * JME_PCI_PAR1 registers in PCI configuration space. * Each register holds exactly half of station address(24bits) * so use JME_PAR0, JME_PAR1 registers instead. */ sc->jme_eaddr[0] = (par0 >> 0) & 0xFF; sc->jme_eaddr[1] = (par0 >> 8) & 0xFF; sc->jme_eaddr[2] = (par0 >> 16) & 0xFF; sc->jme_eaddr[3] = (par0 >> 24) & 0xFF; sc->jme_eaddr[4] = (par1 >> 0) & 0xFF; sc->jme_eaddr[5] = (par1 >> 8) & 0xFF; } } static void jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr) { uint32_t val; int i; if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { /* * Avoid reprogramming station address if the address * is the same as previous one. Note, reprogrammed * station address is permanent as if it was written * to EEPROM. So if station address was changed by * admistrator it's possible to lose factory configured * address when driver fails to restore its address. * (e.g. reboot or system crash) */ if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) { val = JME_EFUSE_EEPROM_FUNC0 << JME_EFUSE_EEPROM_FUNC_SHIFT; val |= JME_EFUSE_EEPROM_PAGE_BAR1 << JME_EFUSE_EEPROM_PAGE_SHIFT; val |= (JME_PAR0 + i) << JME_EFUSE_EEPROM_ADDR_SHIFT; val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT; pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM, val | JME_EFUSE_EEPROM_WRITE, 4); } } } else { CSR_WRITE_4(sc, JME_PAR0, eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); } } static void jme_map_intr_vector(struct jme_softc *sc) { uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES]; bzero(map, sizeof(map)); /* Map Tx interrupts source to MSI/MSIX vector 2. */ map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] = MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP); map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL); map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |= MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO); /* Map Rx interrupts source to MSI/MSIX vector 1. */ map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP); map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP); map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP); map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP); map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY); map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY); map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY); map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY); map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL); map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL); map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL); map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL); map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO); map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO); map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO); map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] = MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO); /* Map all other interrupts source to MSI/MSIX vector 0. */ CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]); CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]); CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]); CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]); } static int jme_attach(device_t dev) { struct jme_softc *sc; struct ifnet *ifp; struct mii_softc *miisc; struct mii_data *mii; uint32_t reg; uint16_t burst; int error, i, mii_flags, msic, msixc, pmc; error = 0; sc = device_get_softc(dev); sc->jme_dev = dev; mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0); TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc); TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc); /* * Map the device. JMC250 supports both memory mapped and I/O * register space access. Because I/O register access should * use different BARs to access registers it's waste of time * to use I/O register spce access. JMC250 uses 16K to map * entire memory space. */ pci_enable_busmaster(dev); sc->jme_res_spec = jme_res_spec_mem; sc->jme_irq_spec = jme_irq_spec_legacy; error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res); if (error != 0) { device_printf(dev, "cannot allocate memory resources.\n"); goto fail; } /* Allocate IRQ resources. */ msixc = pci_msix_count(dev); msic = pci_msi_count(dev); if (bootverbose) { device_printf(dev, "MSIX count : %d\n", msixc); device_printf(dev, "MSI count : %d\n", msic); } /* Use 1 MSI/MSI-X. */ if (msixc > 1) msixc = 1; if (msic > 1) msic = 1; /* Prefer MSIX over MSI. */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && msixc > 0 && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == 1) { device_printf(dev, "Using %d MSIX messages.\n", msixc); sc->jme_flags |= JME_FLAG_MSIX; sc->jme_irq_spec = jme_irq_spec_msi; } else pci_release_msi(dev); } if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 && msic > 0 && pci_alloc_msi(dev, &msic) == 0) { if (msic == 1) { device_printf(dev, "Using %d MSI messages.\n", msic); sc->jme_flags |= JME_FLAG_MSI; sc->jme_irq_spec = jme_irq_spec_msi; } else pci_release_msi(dev); } /* Map interrupt vector 0, 1 and 2. */ if ((sc->jme_flags & JME_FLAG_MSI) != 0 || (sc->jme_flags & JME_FLAG_MSIX) != 0) jme_map_intr_vector(sc); } error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq); if (error != 0) { device_printf(dev, "cannot allocate IRQ resources.\n"); goto fail; } sc->jme_rev = pci_get_device(dev); if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) { sc->jme_flags |= JME_FLAG_FASTETH; sc->jme_flags |= JME_FLAG_NOJUMBO; } reg = CSR_READ_4(sc, JME_CHIPMODE); sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT; if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != CHIPMODE_NOT_FPGA) sc->jme_flags |= JME_FLAG_FPGA; if (bootverbose) { device_printf(dev, "PCI device revision : 0x%04x\n", sc->jme_rev); device_printf(dev, "Chip revision : 0x%02x\n", sc->jme_chip_rev); if ((sc->jme_flags & JME_FLAG_FPGA) != 0) device_printf(dev, "FPGA revision : 0x%04x\n", (reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT); } if (sc->jme_chip_rev == 0xFF) { device_printf(dev, "Unknown chip revision : 0x%02x\n", sc->jme_rev); error = ENXIO; goto fail; } /* Identify controller features and bugs. */ if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) { if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 && CHIPMODE_REVFM(sc->jme_chip_rev) == 2) sc->jme_flags |= JME_FLAG_DMA32BIT; if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD; sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK; sc->jme_flags |= JME_FLAG_HWMIB; } /* Reset the ethernet controller. */ jme_reset(sc); /* Get station address. */ if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { error = jme_efuse_macaddr(sc); if (error == 0) jme_reg_macaddr(sc); } else { error = ENOENT; reg = CSR_READ_4(sc, JME_SMBCSR); if ((reg & SMBCSR_EEPROM_PRESENT) != 0) error = jme_eeprom_macaddr(sc); if (error != 0 && bootverbose) device_printf(sc->jme_dev, "ethernet hardware address not found in EEPROM.\n"); if (error != 0) jme_reg_macaddr(sc); } /* * Save PHY address. * Integrated JR0211 has fixed PHY address whereas FPGA version * requires PHY probing to get correct PHY address. */ if ((sc->jme_flags & JME_FLAG_FPGA) == 0) { sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & GPREG0_PHY_ADDR_MASK; if (bootverbose) device_printf(dev, "PHY is at address %d.\n", sc->jme_phyaddr); } else sc->jme_phyaddr = 0; /* Set max allowable DMA size. */ if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { sc->jme_flags |= JME_FLAG_PCIE; burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); if (bootverbose) { device_printf(dev, "Read request size : %d bytes.\n", 128 << ((burst >> 12) & 0x07)); device_printf(dev, "TLP payload size : %d bytes.\n", 128 << ((burst >> 5) & 0x07)); } switch ((burst >> 12) & 0x07) { case 0: sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; break; case 1: sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; break; default: sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; break; } sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; } else { sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; } /* Create coalescing sysctl node. */ jme_sysctl_node(sc); if ((error = jme_dma_alloc(sc)) != 0) goto fail; ifp = sc->jme_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure.\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = jme_ioctl; ifp->if_start = jme_start; ifp->if_init = jme_init; ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); /* JMC250 supports Tx/Rx checksum offload as well as TSO. */ ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO; if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { sc->jme_flags |= JME_FLAG_PMCAP; ifp->if_capabilities |= IFCAP_WOL_MAGIC; } ifp->if_capenable = ifp->if_capabilities; /* Wakeup PHY. */ jme_phy_up(sc); mii_flags = MIIF_DOPAUSE; /* Ask PHY calibration to PHY driver. */ if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) mii_flags |= MIIF_MACPRIV0; /* Set up MII bus. */ error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange, jme_mediastatus, BMSR_DEFCAPMASK, sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr, MII_OFFSET_ANY, mii_flags); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Force PHY to FPGA mode. */ if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { mii = device_get_softc(sc->jme_miibus); if (mii->mii_instance != 0) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (miisc->mii_phy != 0) { sc->jme_phyaddr = miisc->mii_phy; break; } } if (sc->jme_phyaddr != 0) { device_printf(sc->jme_dev, "FPGA PHY is at %d\n", sc->jme_phyaddr); /* vendor magic. */ jme_miibus_writereg(dev, sc->jme_phyaddr, 27, 0x0004); } } } ether_ifattach(ifp, sc->jme_eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; ifp->if_capenable = ifp->if_capabilities; /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Create local taskq. */ sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->jme_tq); if (sc->jme_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENXIO; goto fail; } taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->jme_dev)); for (i = 0; i < 1; i++) { error = bus_setup_intr(dev, sc->jme_irq[i], INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc, &sc->jme_intrhand[i]); if (error != 0) break; } if (error != 0) { device_printf(dev, "could not set up interrupt handler.\n"); taskqueue_free(sc->jme_tq); sc->jme_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error != 0) jme_detach(dev); return (error); } static int jme_detach(device_t dev) { struct jme_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); ifp = sc->jme_ifp; if (device_is_attached(dev)) { JME_LOCK(sc); sc->jme_flags |= JME_FLAG_DETACH; jme_stop(sc); JME_UNLOCK(sc); callout_drain(&sc->jme_tick_ch); taskqueue_drain(sc->jme_tq, &sc->jme_int_task); taskqueue_drain(taskqueue_swi, &sc->jme_link_task); /* Restore possibly modified station address. */ if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) jme_set_macaddr(sc, sc->jme_eaddr); ether_ifdetach(ifp); } if (sc->jme_tq != NULL) { taskqueue_drain(sc->jme_tq, &sc->jme_int_task); taskqueue_free(sc->jme_tq); sc->jme_tq = NULL; } if (sc->jme_miibus != NULL) { device_delete_child(dev, sc->jme_miibus); sc->jme_miibus = NULL; } bus_generic_detach(dev); jme_dma_free(sc); if (ifp != NULL) { if_free(ifp); sc->jme_ifp = NULL; } for (i = 0; i < 1; i++) { if (sc->jme_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->jme_irq[i], sc->jme_intrhand[i]); sc->jme_intrhand[i] = NULL; } } if (sc->jme_irq[0] != NULL) bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq); if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0) pci_release_msi(dev); if (sc->jme_res[0] != NULL) bus_release_resources(dev, sc->jme_res_spec, sc->jme_res); mtx_destroy(&sc->jme_mtx); return (0); } #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) static void jme_sysctl_node(struct jme_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct jme_hw_stats *stats; int error; stats = &sc->jme_stats; ctx = device_get_sysctl_ctx(sc->jme_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I", "max number of Rx events to process"); /* Pull in device tunables. */ sc->jme_process_limit = JME_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->jme_dev), device_get_unit(sc->jme_dev), "process_limit", &sc->jme_process_limit); if (error == 0) { if (sc->jme_process_limit < JME_PROC_MIN || sc->jme_process_limit > JME_PROC_MAX) { device_printf(sc->jme_dev, "process_limit value out of range; " "using default: %d\n", JME_PROC_DEFAULT); sc->jme_process_limit = JME_PROC_DEFAULT; } } sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; error = resource_int_value(device_get_name(sc->jme_dev), device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to); if (error == 0) { if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN || sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) { device_printf(sc->jme_dev, "tx_coal_to value out of range; " "using default: %d\n", PCCTX_COAL_TO_DEFAULT); sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; } } sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; error = resource_int_value(device_get_name(sc->jme_dev), device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to); if (error == 0) { if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN || sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) { device_printf(sc->jme_dev, "tx_coal_pkt value out of range; " "using default: %d\n", PCCTX_COAL_PKT_DEFAULT); sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; } } sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; error = resource_int_value(device_get_name(sc->jme_dev), device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to); if (error == 0) { if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN || sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) { device_printf(sc->jme_dev, "rx_coal_to value out of range; " "using default: %d\n", PCCRX_COAL_TO_DEFAULT); sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; } } sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; error = resource_int_value(device_get_name(sc->jme_dev), device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to); if (error == 0) { if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN || sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) { device_printf(sc->jme_dev, "tx_coal_pkt value out of range; " "using default: %d\n", PCCRX_COAL_PKT_DEFAULT); sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; } } if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) return; tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "JME statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->rx_good_frames, "Good frames"); JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", &stats->rx_crc_errs, "CRC errors"); JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs", &stats->rx_mii_errs, "MII errors"); JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", &stats->rx_fifo_oflows, "FIFO overflows"); JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty", &stats->rx_desc_empty, "Descriptor empty"); JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", &stats->rx_bad_frames, "Bad frames"); /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->tx_good_frames, "Good frames"); JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", &stats->tx_bad_frames, "Bad frames"); } #undef JME_SYSCTL_STAT_ADD32 struct jme_dmamap_arg { bus_addr_t jme_busaddr; }; static void jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct jme_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct jme_dmamap_arg *)arg; ctx->jme_busaddr = segs[0].ds_addr; } static int jme_dma_alloc(struct jme_softc *sc) { struct jme_dmamap_arg ctx; struct jme_txdesc *txd; struct jme_rxdesc *rxd; bus_addr_t lowaddr, rx_ring_end, tx_ring_end; int error, i; lowaddr = BUS_SPACE_MAXADDR; if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; again: /* Create parent ring tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1, 0, /* algnmnt, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_ring_tag); if (error != 0) { device_printf(sc->jme_dev, "could not create parent ring DMA tag.\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ JME_TX_RING_SIZE, /* maxsize */ 1, /* nsegments */ JME_TX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_tx_ring_tag); if (error != 0) { device_printf(sc->jme_dev, "could not allocate Tx ring DMA tag.\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ JME_RX_RING_SIZE, /* maxsize */ 1, /* nsegments */ JME_RX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_rx_ring_tag); if (error != 0) { device_printf(sc->jme_dev, "could not allocate Rx ring DMA tag.\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, (void **)&sc->jme_rdata.jme_tx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->jme_cdata.jme_tx_ring_map); if (error != 0) { device_printf(sc->jme_dev, "could not allocate DMA'able memory for Tx ring.\n"); goto fail; } ctx.jme_busaddr = 0; error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.jme_busaddr == 0) { device_printf(sc->jme_dev, "could not load DMA'able memory for Tx ring.\n"); goto fail; } sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, (void **)&sc->jme_rdata.jme_rx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->jme_cdata.jme_rx_ring_map); if (error != 0) { device_printf(sc->jme_dev, "could not allocate DMA'able memory for Rx ring.\n"); goto fail; } ctx.jme_busaddr = 0; error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.jme_busaddr == 0) { device_printf(sc->jme_dev, "could not load DMA'able memory for Rx ring.\n"); goto fail; } sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr; if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { /* Tx/Rx descriptor queue should reside within 4GB boundary. */ tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE; rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE; if ((JME_ADDR_HI(tx_ring_end) != JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || (JME_ADDR_HI(rx_ring_end) != JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { device_printf(sc->jme_dev, "4GB boundary crossed, " "switching to 32bit DMA address mode.\n"); jme_dma_free(sc); /* Limit DMA address space to 32bit and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } } lowaddr = BUS_SPACE_MAXADDR; if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; /* Create parent buffer tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1, 0, /* algnmnt, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_buffer_tag); if (error != 0) { device_printf(sc->jme_dev, "could not create parent buffer DMA tag.\n"); goto fail; } /* Create shadow status block tag. */ error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ JME_SSB_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ JME_SSB_SIZE, /* maxsize */ 1, /* nsegments */ JME_SSB_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_ssb_tag); if (error != 0) { device_printf(sc->jme_dev, "could not create shared status block DMA tag.\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ JME_TSO_MAXSIZE, /* maxsize */ JME_MAXTXSEGS, /* nsegments */ JME_TSO_MAXSEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_tx_tag); if (error != 0) { device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->jme_cdata.jme_rx_tag); if (error != 0) { device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); goto fail; } /* * Allocate DMA'able memory and load the DMA map for shared * status block. */ error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, (void **)&sc->jme_rdata.jme_ssb_block, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->jme_cdata.jme_ssb_map); if (error != 0) { device_printf(sc->jme_dev, "could not allocate DMA'able " "memory for shared status block.\n"); goto fail; } ctx.jme_busaddr = 0; error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.jme_busaddr == 0) { device_printf(sc->jme_dev, "could not load DMA'able memory " "for shared status block.\n"); goto fail; } sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < JME_TX_RING_CNT; i++) { txd = &sc->jme_cdata.jme_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->jme_dev, "could not create Tx dmamap.\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, &sc->jme_cdata.jme_rx_sparemap)) != 0) { device_printf(sc->jme_dev, "could not create spare Rx dmamap.\n"); goto fail; } for (i = 0; i < JME_RX_RING_CNT; i++) { rxd = &sc->jme_cdata.jme_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->jme_dev, "could not create Rx dmamap.\n"); goto fail; } } fail: return (error); } static void jme_dma_free(struct jme_softc *sc) { struct jme_txdesc *txd; struct jme_rxdesc *rxd; int i; /* Tx ring */ if (sc->jme_cdata.jme_tx_ring_tag != NULL) { if (sc->jme_rdata.jme_tx_ring_paddr) bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map); if (sc->jme_rdata.jme_tx_ring) bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, sc->jme_rdata.jme_tx_ring, sc->jme_cdata.jme_tx_ring_map); sc->jme_rdata.jme_tx_ring = NULL; sc->jme_rdata.jme_tx_ring_paddr = 0; bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); sc->jme_cdata.jme_tx_ring_tag = NULL; } /* Rx ring */ if (sc->jme_cdata.jme_rx_ring_tag != NULL) { if (sc->jme_rdata.jme_rx_ring_paddr) bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, sc->jme_cdata.jme_rx_ring_map); if (sc->jme_rdata.jme_rx_ring) bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, sc->jme_rdata.jme_rx_ring, sc->jme_cdata.jme_rx_ring_map); sc->jme_rdata.jme_rx_ring = NULL; sc->jme_rdata.jme_rx_ring_paddr = 0; bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); sc->jme_cdata.jme_rx_ring_tag = NULL; } /* Tx buffers */ if (sc->jme_cdata.jme_tx_tag != NULL) { for (i = 0; i < JME_TX_RING_CNT; i++) { txd = &sc->jme_cdata.jme_txdesc[i]; if (txd->tx_dmamap != NULL) { bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); sc->jme_cdata.jme_tx_tag = NULL; } /* Rx buffers */ if (sc->jme_cdata.jme_rx_tag != NULL) { for (i = 0; i < JME_RX_RING_CNT; i++) { rxd = &sc->jme_cdata.jme_rxdesc[i]; if (rxd->rx_dmamap != NULL) { bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->jme_cdata.jme_rx_sparemap != NULL) { bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, sc->jme_cdata.jme_rx_sparemap); sc->jme_cdata.jme_rx_sparemap = NULL; } bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); sc->jme_cdata.jme_rx_tag = NULL; } /* Shared status block. */ if (sc->jme_cdata.jme_ssb_tag != NULL) { if (sc->jme_rdata.jme_ssb_block_paddr) bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map); if (sc->jme_rdata.jme_ssb_block) bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, sc->jme_rdata.jme_ssb_block, sc->jme_cdata.jme_ssb_map); sc->jme_rdata.jme_ssb_block = NULL; sc->jme_rdata.jme_ssb_block_paddr = 0; bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); sc->jme_cdata.jme_ssb_tag = NULL; } if (sc->jme_cdata.jme_buffer_tag != NULL) { bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); sc->jme_cdata.jme_buffer_tag = NULL; } if (sc->jme_cdata.jme_ring_tag != NULL) { bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); sc->jme_cdata.jme_ring_tag = NULL; } } /* * Make sure the interface is stopped at reboot time. */ static int jme_shutdown(device_t dev) { return (jme_suspend(dev)); } /* * Unlike other ethernet controllers, JMC250 requires * explicit resetting link speed to 10/100Mbps as gigabit * link will cunsume more power than 375mA. * Note, we reset the link speed to 10/100Mbps with * auto-negotiation but we don't know whether that operation * would succeed or not as we have no control after powering * off. If the renegotiation fail WOL may not work. Running * at 1Gbps draws more power than 375mA at 3.3V which is * specified in PCI specification and that would result in * complete shutdowning power to ethernet controller. * * TODO * Save current negotiated media speed/duplex/flow-control * to softc and restore the same link again after resuming. * PHY handling such as power down/resetting to 100Mbps * may be better handled in suspend method in phy driver. */ static void jme_setlinkspeed(struct jme_softc *sc) { struct mii_data *mii; int aneg, i; JME_LOCK_ASSERT(sc); mii = device_get_softc(sc->jme_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & IFM_AVALID) != 0) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; default: break; } } jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* Poll link state until jme(4) get a 10/100 link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & IFM_AVALID) != 0) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: jme_mac_config(sc); return; default: break; } } JME_UNLOCK(sc); pause("jmelnk", hz); JME_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->jme_dev, "establishing link failed, " "WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; jme_mac_config(sc); } static void jme_setwol(struct jme_softc *sc) { struct ifnet *ifp; uint32_t gpr, pmcs; uint16_t pmstat; int pmc; JME_LOCK_ASSERT(sc); if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { /* Remove Tx MAC/offload clock to save more power. */ if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) CSR_WRITE_4(sc, JME_GPREG1, CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS); /* No PME capability, PHY power down. */ jme_phy_down(sc); return; } ifp = sc->jme_ifp; gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; pmcs = CSR_READ_4(sc, JME_PMCS); pmcs &= ~PMCS_WOL_ENB_MASK; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; /* Enable PME message. */ gpr |= GPREG0_PME_ENB; /* For gigabit controllers, reset link speed to 10/100. */ if ((sc->jme_flags & JME_FLAG_FASTETH) == 0) jme_setlinkspeed(sc); } CSR_WRITE_4(sc, JME_PMCS, pmcs); CSR_WRITE_4(sc, JME_GPREG0, gpr); /* Remove Tx MAC/offload clock to save more power. */ if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); /* Request PME. */ pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); if ((ifp->if_capenable & IFCAP_WOL) == 0) { /* No WOL, PHY power down. */ jme_phy_down(sc); } } static int jme_suspend(device_t dev) { struct jme_softc *sc; sc = device_get_softc(dev); JME_LOCK(sc); jme_stop(sc); jme_setwol(sc); JME_UNLOCK(sc); return (0); } static int jme_resume(device_t dev) { struct jme_softc *sc; struct ifnet *ifp; uint16_t pmstat; int pmc; sc = device_get_softc(dev); JME_LOCK(sc); if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) { pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); /* Disable PME clear PME status. */ pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } /* Wakeup PHY. */ jme_phy_up(sc); ifp = sc->jme_ifp; if ((ifp->if_flags & IFF_UP) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; jme_init_locked(sc); } JME_UNLOCK(sc); return (0); } static int jme_encap(struct jme_softc *sc, struct mbuf **m_head) { struct jme_txdesc *txd; struct jme_desc *desc; struct mbuf *m; bus_dma_segment_t txsegs[JME_MAXTXSEGS]; int error, i, nsegs, prod; uint32_t cflags, tsosegsz; JME_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * Due to the adherence to NDIS specification JMC250 * assumes upper stack computed TCP pseudo checksum * without including payload length. This breaks * checksum offload for TSO case so recompute TCP * pseudo checksum for JMC250. Hopefully this wouldn't * be much burden on modern CPUs. */ struct ether_header *eh; struct ip *ip; struct tcphdr *tcp; uint32_t ip_off, poff; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(*m_head, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* Check the existence of VLAN tag. */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * Reset IP checksum and recompute TCP pseudo * checksum that NDIS specification requires. */ ip = (struct ip *)(mtod(m, char *) + ip_off); tcp = (struct tcphdr *)(mtod(m, char *) + poff); ip->ip_sum = 0; if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) { tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons((tcp->th_off << 2) + IPPROTO_TCP)); /* No need to TSO, force IP checksum offload. */ (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO; (*m_head)->m_pkthdr.csum_flags |= CSUM_IP; } else tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); *m_head = m; } prod = sc->jme_cdata.jme_tx_prod; txd = &sc->jme_cdata.jme_txdesc[prod]; error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* * Check descriptor overrun. Leave one free descriptor. * Since we always use 64bit address mode for transmitting, * each Tx request requires one more dummy descriptor. */ if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) { bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); return (ENOBUFS); } m = *m_head; cflags = 0; tsosegsz = 0; /* Configure checksum offload and TSO. */ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT; cflags |= JME_TD_TSO; } else { if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) cflags |= JME_TD_IPCSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= JME_TD_TCPCSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= JME_TD_UDPCSUM; } /* Configure VLAN. */ if ((m->m_flags & M_VLANTAG) != 0) { cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); cflags |= JME_TD_VLAN_TAG; } desc = &sc->jme_rdata.jme_tx_ring[prod]; desc->flags = htole32(cflags); desc->buflen = htole32(tsosegsz); desc->addr_hi = htole32(m->m_pkthdr.len); desc->addr_lo = 0; sc->jme_cdata.jme_tx_cnt++; JME_DESC_INC(prod, JME_TX_RING_CNT); for (i = 0; i < nsegs; i++) { desc = &sc->jme_rdata.jme_tx_ring[prod]; desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); desc->buflen = htole32(txsegs[i].ds_len); desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); sc->jme_cdata.jme_tx_cnt++; JME_DESC_INC(prod, JME_TX_RING_CNT); } /* Update producer index. */ sc->jme_cdata.jme_tx_prod = prod; /* * Finally request interrupt and give the first descriptor * owenership to hardware. */ desc = txd->tx_desc; desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); txd->tx_m = m; txd->tx_ndesc = nsegs + 1; /* Sync descriptors. */ bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void jme_start(struct ifnet *ifp) { struct jme_softc *sc; sc = ifp->if_softc; JME_LOCK(sc); jme_start_locked(ifp); JME_UNLOCK(sc); } static void jme_start_locked(struct ifnet *ifp) { struct jme_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; JME_LOCK_ASSERT(sc); if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) jme_txeof(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (jme_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* * Reading TXCSR takes very long time under heavy load * so cache TXCSR value and writes the ORed value with * the kick command to the TXCSR. This saves one register * access cycle. */ CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0)); /* Set a timeout in case the chip goes out to lunch. */ sc->jme_watchdog_timer = JME_TX_TIMEOUT; } } static void jme_watchdog(struct jme_softc *sc) { struct ifnet *ifp; JME_LOCK_ASSERT(sc); if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer) return; ifp = sc->jme_ifp; if ((sc->jme_flags & JME_FLAG_LINK) == 0) { if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; jme_init_locked(sc); return; } jme_txeof(sc); if (sc->jme_cdata.jme_tx_cnt == 0) { if_printf(sc->jme_ifp, "watchdog timeout (missed Tx interrupts) -- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) jme_start_locked(ifp); return; } if_printf(sc->jme_ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; jme_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) jme_start_locked(ifp); } static int jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct jme_softc *sc; struct ifreq *ifr; struct mii_data *mii; uint32_t reg; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 && ifr->ifr_mtu > JME_MAX_MTU)) { error = EINVAL; break; } if (ifp->if_mtu != ifr->ifr_mtu) { /* * No special configuration is required when interface * MTU is changed but availability of TSO/Tx checksum * offload should be chcked against new MTU size as * FIFO size is just 2K. */ JME_LOCK(sc); if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); ifp->if_hwassist &= ~(JME_CSUM_FEATURES | CSUM_TSO); VLAN_CAPABILITIES(ifp); } ifp->if_mtu = ifr->ifr_mtu; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; jme_init_locked(sc); } JME_UNLOCK(sc); } break; case SIOCSIFFLAGS: JME_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->jme_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) jme_set_filter(sc); } else { if ((sc->jme_flags & JME_FLAG_DETACH) == 0) jme_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) jme_stop(sc); } sc->jme_if_flags = ifp->if_flags; JME_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: JME_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) jme_set_filter(sc); JME_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->jme_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: JME_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && ifp->if_mtu < JME_TX_FIFO_SIZE) { if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) ifp->if_hwassist |= JME_CSUM_FEATURES; else ifp->if_hwassist &= ~JME_CSUM_FEATURES; } } if ((mask & IFCAP_RXCSUM) != 0 && (IFCAP_RXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reg = CSR_READ_4(sc, JME_RXMAC); reg &= ~RXMAC_CSUM_ENB; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) reg |= RXMAC_CSUM_ENB; CSR_WRITE_4(sc, JME_RXMAC, reg); } if ((mask & IFCAP_TSO4) != 0 && ifp->if_mtu < JME_TX_FIFO_SIZE) { if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_WOL_MAGIC) != 0 && (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; jme_set_vlan(sc); } JME_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void jme_mac_config(struct jme_softc *sc) { struct mii_data *mii; uint32_t ghc, gpreg, rxmac, txmac, txpause; uint32_t txclk; JME_LOCK_ASSERT(sc); mii = device_get_softc(sc->jme_miibus); CSR_WRITE_4(sc, JME_GHC, GHC_RESET); DELAY(10); CSR_WRITE_4(sc, JME_GHC, 0); ghc = 0; txclk = 0; rxmac = CSR_READ_4(sc, JME_RXMAC); rxmac &= ~RXMAC_FC_ENB; txmac = CSR_READ_4(sc, JME_TXMAC); txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); txpause = CSR_READ_4(sc, JME_TXPFC); txpause &= ~TXPFC_PAUSE_ENB; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { ghc |= GHC_FULL_DUPLEX; rxmac &= ~RXMAC_COLL_DET_ENB; txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF | TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) txpause |= TXPFC_PAUSE_ENB; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) rxmac |= RXMAC_FC_ENB; /* Disable retry transmit timer/retry limit. */ CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); } else { rxmac |= RXMAC_COLL_DET_ENB; txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; /* Enable retry transmit timer/retry limit. */ CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); } /* Reprogram Tx/Rx MACs with resolved speed/duplex. */ switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: ghc |= GHC_SPEED_10; txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; break; case IFM_100_TX: ghc |= GHC_SPEED_100; txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; break; case IFM_1000_T: if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) break; ghc |= GHC_SPEED_1000; txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; break; default: break; } if (sc->jme_rev == DEVICEID_JMC250 && sc->jme_chip_rev == DEVICEREVID_JMC250_A2) { /* * Workaround occasional packet loss issue of JMC250 A2 * when it runs on half-duplex media. */ gpreg = CSR_READ_4(sc, JME_GPREG1); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) gpreg &= ~GPREG1_HDPX_FIX; else gpreg |= GPREG1_HDPX_FIX; CSR_WRITE_4(sc, JME_GPREG1, gpreg); /* Workaround CRC errors at 100Mbps on JMC250 A2. */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { /* Extend interface FIFO depth. */ jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 0x1B, 0x0000); } else { /* Select default interface FIFO depth. */ jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 0x1B, 0x0004); } } if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) ghc |= txclk; CSR_WRITE_4(sc, JME_GHC, ghc); CSR_WRITE_4(sc, JME_RXMAC, rxmac); CSR_WRITE_4(sc, JME_TXMAC, txmac); CSR_WRITE_4(sc, JME_TXPFC, txpause); } static void jme_link_task(void *arg, int pending) { struct jme_softc *sc; struct mii_data *mii; struct ifnet *ifp; struct jme_txdesc *txd; bus_addr_t paddr; int i; sc = (struct jme_softc *)arg; JME_LOCK(sc); mii = device_get_softc(sc->jme_miibus); ifp = sc->jme_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { JME_UNLOCK(sc); return; } sc->jme_flags &= ~JME_FLAG_LINK; if ((mii->mii_media_status & IFM_AVALID) != 0) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->jme_flags |= JME_FLAG_LINK; break; case IFM_1000_T: if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) break; sc->jme_flags |= JME_FLAG_LINK; break; default: break; } } /* * Disabling Rx/Tx MACs have a side-effect of resetting * JME_TXNDA/JME_RXNDA register to the first address of * Tx/Rx descriptor address. So driver should reset its * internal procucer/consumer pointer and reclaim any * allocated resources. Note, just saving the value of * JME_TXNDA and JME_RXNDA registers before stopping MAC * and restoring JME_TXNDA/JME_RXNDA register is not * sufficient to make sure correct MAC state because * stopping MAC operation can take a while and hardware * might have updated JME_TXNDA/JME_RXNDA registers * during the stop operation. */ /* Block execution of task. */ taskqueue_block(sc->jme_tq); /* Disable interrupts and stop driver. */ CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->jme_tick_ch); sc->jme_watchdog_timer = 0; /* Stop receiver/transmitter. */ jme_stop_rx(sc); jme_stop_tx(sc); /* XXX Drain all queued tasks. */ JME_UNLOCK(sc); taskqueue_drain(sc->jme_tq, &sc->jme_int_task); JME_LOCK(sc); if (sc->jme_cdata.jme_rxhead != NULL) m_freem(sc->jme_cdata.jme_rxhead); JME_RXCHAIN_RESET(sc); jme_txeof(sc); if (sc->jme_cdata.jme_tx_cnt != 0) { /* Remove queued packets for transmit. */ for (i = 0; i < JME_TX_RING_CNT; i++) { txd = &sc->jme_cdata.jme_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync( sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload( sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; txd->tx_ndesc = 0; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } } } /* * Reuse configured Rx descriptors and reset * producer/consumer index. */ sc->jme_cdata.jme_rx_cons = 0; sc->jme_morework = 0; jme_init_tx_ring(sc); /* Initialize shadow status block. */ jme_init_ssb(sc); /* Program MAC with resolved speed/duplex/flow-control. */ if ((sc->jme_flags & JME_FLAG_LINK) != 0) { jme_mac_config(sc); jme_stats_clear(sc); CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); /* Set Tx ring address to the hardware. */ paddr = JME_TX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); /* Set Rx ring address to the hardware. */ paddr = JME_RX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); /* Restart receiver/transmitter. */ CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START); CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); /* Lastly enable TX/RX clock. */ if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS); if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) CSR_WRITE_4(sc, JME_GPREG1, CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS); } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); /* Unblock execution of task. */ taskqueue_unblock(sc->jme_tq); /* Reenable interrupts. */ CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); JME_UNLOCK(sc); } static int jme_intr(void *arg) { struct jme_softc *sc; uint32_t status; sc = (struct jme_softc *)arg; status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); if (status == 0 || status == 0xFFFFFFFF) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); return (FILTER_HANDLED); } static void jme_int_task(void *arg, int pending) { struct jme_softc *sc; struct ifnet *ifp; uint32_t status; int more; sc = (struct jme_softc *)arg; ifp = sc->jme_ifp; JME_LOCK(sc); status = CSR_READ_4(sc, JME_INTR_STATUS); if (sc->jme_morework != 0) { sc->jme_morework = 0; status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO; } if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) goto done; /* Reset PCC counter/timer and Ack interrupts. */ status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; CSR_WRITE_4(sc, JME_INTR_STATUS, status); more = 0; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) { more = jme_rxintr(sc, sc->jme_process_limit); if (more != 0) sc->jme_morework = 1; } if ((status & INTR_RXQ_DESC_EMPTY) != 0) { /* * Notify hardware availability of new Rx * buffers. * Reading RXCSR takes very long time under * heavy load so cache RXCSR value and writes * the ORed value with the kick command to * the RXCSR. This saves one register access * cycle. */ CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | RXCSR_RXQ_START); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) jme_start_locked(ifp); } if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) { taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); JME_UNLOCK(sc); return; } done: JME_UNLOCK(sc); /* Reenable interrupts. */ CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); } static void jme_txeof(struct jme_softc *sc) { struct ifnet *ifp; struct jme_txdesc *txd; uint32_t status; int cons, nsegs; JME_LOCK_ASSERT(sc); ifp = sc->jme_ifp; cons = sc->jme_cdata.jme_tx_cons; if (cons == sc->jme_cdata.jme_tx_prod) return; bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Go through our Tx list and free mbufs for those * frames which have been transmitted. */ for (; cons != sc->jme_cdata.jme_tx_prod;) { txd = &sc->jme_cdata.jme_txdesc[cons]; status = le32toh(txd->tx_desc->flags); if ((status & JME_TD_OWN) == JME_TD_OWN) break; if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if ((status & JME_TD_COLLISION) != 0) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, le32toh(txd->tx_desc->buflen) & JME_TD_BUF_LEN_MASK); } /* * Only the first descriptor of multi-descriptor * transmission is updated so driver have to skip entire * chained buffers for the transmiited frame. In other * words, JME_TD_OWN bit is valid only at the first * descriptor of a multi-descriptor transmission. */ for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { sc->jme_rdata.jme_tx_ring[cons].flags = 0; JME_DESC_INC(cons, JME_TX_RING_CNT); } /* Reclaim transferred mbufs. */ bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, ("%s: Active Tx desc counter was garbled\n", __func__)); txd->tx_ndesc = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->jme_cdata.jme_tx_cons = cons; /* Unarm watchog timer when there is no pending descriptors in queue. */ if (sc->jme_cdata.jme_tx_cnt == 0) sc->jme_watchdog_timer = 0; bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static __inline void jme_discard_rxbuf(struct jme_softc *sc, int cons) { struct jme_desc *desc; desc = &sc->jme_rdata.jme_rx_ring[cons]; desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); desc->buflen = htole32(MCLBYTES); } /* Receive a frame. */ static void jme_rxeof(struct jme_softc *sc) { struct ifnet *ifp; struct jme_desc *desc; struct jme_rxdesc *rxd; struct mbuf *mp, *m; uint32_t flags, status; int cons, count, nsegs; JME_LOCK_ASSERT(sc); ifp = sc->jme_ifp; cons = sc->jme_cdata.jme_rx_cons; desc = &sc->jme_rdata.jme_rx_ring[cons]; flags = le32toh(desc->flags); status = le32toh(desc->buflen); nsegs = JME_RX_NSEGS(status); sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; if ((status & JME_RX_ERR_STAT) != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons); #ifdef JME_SHOW_ERRORS device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); #endif sc->jme_cdata.jme_rx_cons += nsegs; sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; return; } for (count = 0; count < nsegs; count++, JME_DESC_INC(cons, JME_RX_RING_CNT)) { rxd = &sc->jme_cdata.jme_rxdesc[cons]; mp = rxd->rx_m; /* Add a new receive buffer to the ring. */ if (jme_newbuf(sc, rxd) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Reuse buffer. */ for (; count < nsegs; count++) { jme_discard_rxbuf(sc, cons); JME_DESC_INC(cons, JME_RX_RING_CNT); } if (sc->jme_cdata.jme_rxhead != NULL) { m_freem(sc->jme_cdata.jme_rxhead); JME_RXCHAIN_RESET(sc); } break; } /* * Assume we've received a full sized frame. * Actual size is fixed when we encounter the end of * multi-segmented frame. */ mp->m_len = MCLBYTES; /* Chain received mbufs. */ if (sc->jme_cdata.jme_rxhead == NULL) { sc->jme_cdata.jme_rxhead = mp; sc->jme_cdata.jme_rxtail = mp; } else { /* * Receive processor can receive a maximum frame * size of 65535 bytes. */ mp->m_flags &= ~M_PKTHDR; sc->jme_cdata.jme_rxtail->m_next = mp; sc->jme_cdata.jme_rxtail = mp; } if (count == nsegs - 1) { /* Last desc. for this frame. */ m = sc->jme_cdata.jme_rxhead; m->m_flags |= M_PKTHDR; m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; if (nsegs > 1) { /* Set first mbuf size. */ m->m_len = MCLBYTES - JME_RX_PAD_BYTES; /* Set last mbuf size. */ mp->m_len = sc->jme_cdata.jme_rxlen - ((MCLBYTES - JME_RX_PAD_BYTES) + (MCLBYTES * (nsegs - 2))); } else m->m_len = sc->jme_cdata.jme_rxlen; m->m_pkthdr.rcvif = ifp; /* * Account for 10bytes auto padding which is used * to align IP header on 32bit boundary. Also note, * CRC bytes is automatically removed by the * hardware. */ m->m_data += JME_RX_PAD_BYTES; /* Set checksum information. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (flags & JME_RD_IPV4) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((flags & JME_RD_IPCSUM) != 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((flags & JME_RD_MORE_FRAG) == 0) && ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == (JME_RD_TCP | JME_RD_TCPCSUM) || (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == (JME_RD_UDP | JME_RD_UDPCSUM))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } /* Check for VLAN tagged packets. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (flags & JME_RD_VLAN_TAG) != 0) { m->m_pkthdr.ether_vtag = flags & JME_RD_VLAN_MASK; m->m_flags |= M_VLANTAG; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* Pass it on. */ JME_UNLOCK(sc); (*ifp->if_input)(ifp, m); JME_LOCK(sc); /* Reset mbuf chains. */ JME_RXCHAIN_RESET(sc); } } sc->jme_cdata.jme_rx_cons += nsegs; sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; } static int jme_rxintr(struct jme_softc *sc, int count) { struct jme_desc *desc; int nsegs, prog, pktlen; bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, sc->jme_cdata.jme_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; count > 0; prog++) { desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) break; if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) break; nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); /* * Check number of segments against received bytes. * Non-matching value would indicate that hardware * is still trying to update Rx descriptors. I'm not * sure whether this check is needed. */ pktlen = JME_RX_BYTES(le32toh(desc->buflen)); - if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES)) + if (nsegs != howmany(pktlen, MCLBYTES)) break; prog++; /* Received a frame. */ jme_rxeof(sc); count -= nsegs; } if (prog > 0) bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, sc->jme_cdata.jme_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (count > 0 ? 0 : EAGAIN); } static void jme_tick(void *arg) { struct jme_softc *sc; struct mii_data *mii; sc = (struct jme_softc *)arg; JME_LOCK_ASSERT(sc); mii = device_get_softc(sc->jme_miibus); mii_tick(mii); /* * Reclaim Tx buffers that have been completed. It's not * needed here but it would release allocated mbuf chains * faster and limit the maximum delay to a hz. */ jme_txeof(sc); jme_stats_update(sc); jme_watchdog(sc); callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); } static void jme_reset(struct jme_softc *sc) { uint32_t ghc, gpreg; /* Stop receiver, transmitter. */ jme_stop_rx(sc); jme_stop_tx(sc); /* Reset controller. */ CSR_WRITE_4(sc, JME_GHC, GHC_RESET); CSR_READ_4(sc, JME_GHC); DELAY(10); /* * Workaround Rx FIFO overruns seen under certain conditions. * Explicitly synchorize TX/RX clock. TX/RX clock should be * enabled only after enabling TX/RX MACs. */ if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) { /* Disable TX clock. */ CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS); /* Disable RX clock. */ gpreg = CSR_READ_4(sc, JME_GPREG1); CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); gpreg = CSR_READ_4(sc, JME_GPREG1); /* De-assert RESET but still disable TX clock. */ CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); ghc = CSR_READ_4(sc, JME_GHC); /* Enable TX clock. */ CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS); /* Enable RX clock. */ CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS); CSR_READ_4(sc, JME_GPREG1); /* Disable TX/RX clock again. */ CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); } else CSR_WRITE_4(sc, JME_GHC, 0); CSR_READ_4(sc, JME_GHC); DELAY(10); } static void jme_init(void *xsc) { struct jme_softc *sc; sc = (struct jme_softc *)xsc; JME_LOCK(sc); jme_init_locked(sc); JME_UNLOCK(sc); } static void jme_init_locked(struct jme_softc *sc) { struct ifnet *ifp; struct mii_data *mii; bus_addr_t paddr; uint32_t reg; int error; JME_LOCK_ASSERT(sc); ifp = sc->jme_ifp; mii = device_get_softc(sc->jme_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel any pending I/O. */ jme_stop(sc); /* * Reset the chip to a known state. */ jme_reset(sc); /* Init descriptors. */ error = jme_init_rx_ring(sc); if (error != 0) { device_printf(sc->jme_dev, "%s: initialization failed: no memory for Rx buffers.\n", __func__); jme_stop(sc); return; } jme_init_tx_ring(sc); /* Initialize shadow status block. */ jme_init_ssb(sc); /* Reprogram the station address. */ jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp)); /* * Configure Tx queue. * Tx priority queue weight value : 0 * Tx FIFO threshold for processing next packet : 16QW * Maximum Tx DMA length : 512 * Allow Tx DMA burst. */ sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; sc->jme_txcsr |= sc->jme_tx_dma_size; sc->jme_txcsr |= TXCSR_DMA_BURST; CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); /* Set Tx descriptor counter. */ CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); /* Set Tx ring address to the hardware. */ paddr = JME_TX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); /* Configure TxMAC parameters. */ reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; reg |= TXMAC_THRESH_1_PKT; reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; CSR_WRITE_4(sc, JME_TXMAC, reg); /* * Configure Rx queue. * FIFO full threshold for transmitting Tx pause packet : 128T * FIFO threshold for processing next packet : 128QW * Rx queue 0 select * Max Rx DMA length : 128 * Rx descriptor retry : 32 * Rx descriptor retry time gap : 256ns * Don't receive runt/bad frame. */ sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; /* * Since Rx FIFO size is 4K bytes, receiving frames larger * than 4K bytes will suffer from Rx FIFO overruns. So * decrease FIFO threshold to reduce the FIFO overruns for * frames larger than 4000 bytes. * For best performance of standard MTU sized frames use * maximum allowable FIFO threshold, 128QW. Note these do * not hold on chip full mask verion >=2. For these * controllers 64QW and 128QW are not valid value. */ if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; else { if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN) > JME_RX_FIFO_SIZE) sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; else sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; } sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); /* Set Rx descriptor counter. */ CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); /* Set Rx ring address to the hardware. */ paddr = JME_RX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); /* Clear receive filter. */ CSR_WRITE_4(sc, JME_RXMAC, 0); /* Set up the receive filter. */ jme_set_filter(sc); jme_set_vlan(sc); /* * Disable all WOL bits as WOL can interfere normal Rx * operation. Also clear WOL detection status bits. */ reg = CSR_READ_4(sc, JME_PMCS); reg &= ~PMCS_WOL_ENB_MASK; CSR_WRITE_4(sc, JME_PMCS, reg); reg = CSR_READ_4(sc, JME_RXMAC); /* * Pad 10bytes right before received frame. This will greatly * help Rx performance on strict-alignment architectures as * it does not need to copy the frame to align the payload. */ reg |= RXMAC_PAD_10BYTES; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) reg |= RXMAC_CSUM_ENB; CSR_WRITE_4(sc, JME_RXMAC, reg); /* Configure general purpose reg0 */ reg = CSR_READ_4(sc, JME_GPREG0); reg &= ~GPREG0_PCC_UNIT_MASK; /* Set PCC timer resolution to micro-seconds unit. */ reg |= GPREG0_PCC_UNIT_US; /* * Disable all shadow register posting as we have to read * JME_INTR_STATUS register in jme_int_task. Also it seems * that it's hard to synchronize interrupt status between * hardware and software with shadow posting due to * requirements of bus_dmamap_sync(9). */ reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; /* Disable posting of DW0. */ reg &= ~GPREG0_POST_DW0_ENB; /* Clear PME message. */ reg &= ~GPREG0_PME_ENB; /* Set PHY address. */ reg &= ~GPREG0_PHY_ADDR_MASK; reg |= sc->jme_phyaddr; CSR_WRITE_4(sc, JME_GPREG0, reg); /* Configure Tx queue 0 packet completion coalescing. */ reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & PCCTX_COAL_TO_MASK; reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & PCCTX_COAL_PKT_MASK; reg |= PCCTX_COAL_TXQ0; CSR_WRITE_4(sc, JME_PCCTX, reg); /* Configure Rx queue 0 packet completion coalescing. */ reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & PCCRX_COAL_TO_MASK; reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & PCCRX_COAL_PKT_MASK; CSR_WRITE_4(sc, JME_PCCRX0, reg); /* * Configure PCD(Packet Completion Deferring). It seems PCD * generates an interrupt when the time interval between two * back-to-back incoming/outgoing packet is long enough for * it to reach its timer value 0. The arrival of new packets * after timer has started causes the PCD timer to restart. * Unfortunately, it's not clear how PCD is useful at this * moment, so just use the same of PCC parameters. */ if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) { sc->jme_rx_pcd_to = sc->jme_rx_coal_to; if (sc->jme_rx_coal_to > PCDRX_TO_MAX) sc->jme_rx_pcd_to = PCDRX_TO_MAX; sc->jme_tx_pcd_to = sc->jme_tx_coal_to; if (sc->jme_tx_coal_to > PCDTX_TO_MAX) sc->jme_tx_pcd_to = PCDTX_TO_MAX; reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT; reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT; CSR_WRITE_4(sc, PCDRX_REG(0), reg); reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT; reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT; CSR_WRITE_4(sc, JME_PCDTX, reg); } /* Configure shadow status block but don't enable posting. */ paddr = sc->jme_rdata.jme_ssb_block_paddr; CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); /* Disable Timer 1 and Timer 2. */ CSR_WRITE_4(sc, JME_TIMER1, 0); CSR_WRITE_4(sc, JME_TIMER2, 0); /* Configure retry transmit period, retry limit value. */ CSR_WRITE_4(sc, JME_TXTRHD, ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & TXTRHD_RT_PERIOD_MASK) | ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & TXTRHD_RT_LIMIT_SHIFT)); /* Disable RSS. */ CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); /* Initialize the interrupt mask. */ CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); /* * Enabling Tx/Rx DMA engines and Rx queue processing is * done after detection of valid link in jme_link_task. */ sc->jme_flags &= ~JME_FLAG_LINK; /* Set the current media. */ mii_mediachg(mii); callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void jme_stop(struct jme_softc *sc) { struct ifnet *ifp; struct jme_txdesc *txd; struct jme_rxdesc *rxd; int i; JME_LOCK_ASSERT(sc); /* * Mark the interface down and cancel the watchdog timer. */ ifp = sc->jme_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->jme_flags &= ~JME_FLAG_LINK; callout_stop(&sc->jme_tick_ch); sc->jme_watchdog_timer = 0; /* * Disable interrupts. */ CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); /* Disable updating shadow status block. */ CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); /* Stop receiver, transmitter. */ jme_stop_rx(sc); jme_stop_tx(sc); /* Reclaim Rx/Tx buffers that have been completed. */ jme_rxintr(sc, JME_RX_RING_CNT); if (sc->jme_cdata.jme_rxhead != NULL) m_freem(sc->jme_cdata.jme_rxhead); JME_RXCHAIN_RESET(sc); jme_txeof(sc); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < JME_RX_RING_CNT; i++) { rxd = &sc->jme_cdata.jme_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < JME_TX_RING_CNT; i++) { txd = &sc->jme_cdata.jme_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; txd->tx_ndesc = 0; } } jme_stats_update(sc); jme_stats_save(sc); } static void jme_stop_tx(struct jme_softc *sc) { uint32_t reg; int i; reg = CSR_READ_4(sc, JME_TXCSR); if ((reg & TXCSR_TX_ENB) == 0) return; reg &= ~TXCSR_TX_ENB; CSR_WRITE_4(sc, JME_TXCSR, reg); for (i = JME_TIMEOUT; i > 0; i--) { DELAY(1); if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) break; } if (i == 0) device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); } static void jme_stop_rx(struct jme_softc *sc) { uint32_t reg; int i; reg = CSR_READ_4(sc, JME_RXCSR); if ((reg & RXCSR_RX_ENB) == 0) return; reg &= ~RXCSR_RX_ENB; CSR_WRITE_4(sc, JME_RXCSR, reg); for (i = JME_TIMEOUT; i > 0; i--) { DELAY(1); if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) break; } if (i == 0) device_printf(sc->jme_dev, "stopping recevier timeout!\n"); } static void jme_init_tx_ring(struct jme_softc *sc) { struct jme_ring_data *rd; struct jme_txdesc *txd; int i; sc->jme_cdata.jme_tx_prod = 0; sc->jme_cdata.jme_tx_cons = 0; sc->jme_cdata.jme_tx_cnt = 0; rd = &sc->jme_rdata; bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); for (i = 0; i < JME_TX_RING_CNT; i++) { txd = &sc->jme_cdata.jme_txdesc[i]; txd->tx_m = NULL; txd->tx_desc = &rd->jme_tx_ring[i]; txd->tx_ndesc = 0; } bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void jme_init_ssb(struct jme_softc *sc) { struct jme_ring_data *rd; rd = &sc->jme_rdata; bzero(rd->jme_ssb_block, JME_SSB_SIZE); bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int jme_init_rx_ring(struct jme_softc *sc) { struct jme_ring_data *rd; struct jme_rxdesc *rxd; int i; sc->jme_cdata.jme_rx_cons = 0; JME_RXCHAIN_RESET(sc); sc->jme_morework = 0; rd = &sc->jme_rdata; bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); for (i = 0; i < JME_RX_RING_CNT; i++) { rxd = &sc->jme_cdata.jme_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_desc = &rd->jme_rx_ring[i]; if (jme_newbuf(sc, rxd) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, sc->jme_cdata.jme_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static int jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) { struct jme_desc *desc; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); /* * JMC250 has 64bit boundary alignment limitation so jme(4) * takes advantage of 10 bytes padding feature of hardware * in order not to copy entire frame to align IP header on * 32bit boundary. */ m->m_len = m->m_pkthdr.len = MCLBYTES; if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag, sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; sc->jme_cdata.jme_rx_sparemap = map; bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->rx_desc; desc->buflen = htole32(segs[0].ds_len); desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr)); desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr)); desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); return (0); } static void jme_set_vlan(struct jme_softc *sc) { struct ifnet *ifp; uint32_t reg; JME_LOCK_ASSERT(sc); ifp = sc->jme_ifp; reg = CSR_READ_4(sc, JME_RXMAC); reg &= ~RXMAC_VLAN_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) reg |= RXMAC_VLAN_ENB; CSR_WRITE_4(sc, JME_RXMAC, reg); } static void jme_set_filter(struct jme_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t crc; uint32_t mchash[2]; uint32_t rxcfg; JME_LOCK_ASSERT(sc); ifp = sc->jme_ifp; rxcfg = CSR_READ_4(sc, JME_RXMAC); rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | RXMAC_ALLMULTI); /* Always accept frames destined to our station address. */ rxcfg |= RXMAC_UNICAST; if ((ifp->if_flags & IFF_BROADCAST) != 0) rxcfg |= RXMAC_BROADCAST; if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxcfg |= RXMAC_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxcfg |= RXMAC_ALLMULTI; CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); CSR_WRITE_4(sc, JME_RXMAC, rxcfg); return; } /* * Set up the multicast address filter by passing all multicast * addresses through a CRC generator, and then using the low-order * 6 bits as an index into the 64 bit multicast hash table. The * high order bits select the register, while the rest of the bits * select the bit within the register. */ rxcfg |= RXMAC_MULTICAST; bzero(mchash, sizeof(mchash)); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 least significant bits. */ crc &= 0x3f; /* Set the corresponding bit in the hash table. */ mchash[crc >> 5] |= 1 << (crc & 0x1f); } if_maddr_runlock(ifp); CSR_WRITE_4(sc, JME_MAR0, mchash[0]); CSR_WRITE_4(sc, JME_MAR1, mchash[1]); CSR_WRITE_4(sc, JME_RXMAC, rxcfg); } static void jme_stats_clear(struct jme_softc *sc) { JME_LOCK_ASSERT(sc); if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) return; /* Disable and clear counters. */ CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); /* Activate hw counters. */ CSR_WRITE_4(sc, JME_STATCSR, 0); CSR_READ_4(sc, JME_STATCSR); bzero(&sc->jme_stats, sizeof(struct jme_hw_stats)); } static void jme_stats_save(struct jme_softc *sc) { JME_LOCK_ASSERT(sc); if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) return; /* Save current counters. */ bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats)); /* Disable and clear counters. */ CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); } static void jme_stats_update(struct jme_softc *sc) { struct jme_hw_stats *stat, *ostat; uint32_t reg; JME_LOCK_ASSERT(sc); if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) return; stat = &sc->jme_stats; ostat = &sc->jme_ostats; stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD); stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD); reg = CSR_READ_4(sc, JME_STAT_CRCMII); stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >> STAT_RX_CRC_ERR_SHIFT; stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >> STAT_RX_MII_ERR_SHIFT; reg = CSR_READ_4(sc, JME_STAT_RXERR); stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >> STAT_RXERR_OFLOW_SHIFT; stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >> STAT_RXERR_MPTY_SHIFT; reg = CSR_READ_4(sc, JME_STAT_FAIL); stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT; stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT; /* Account for previous counters. */ stat->rx_good_frames += ostat->rx_good_frames; stat->rx_crc_errs += ostat->rx_crc_errs; stat->rx_mii_errs += ostat->rx_mii_errs; stat->rx_fifo_oflows += ostat->rx_fifo_oflows; stat->rx_desc_empty += ostat->rx_desc_empty; stat->rx_bad_frames += ostat->rx_bad_frames; stat->tx_good_frames += ostat->tx_good_frames; stat->tx_bad_frames += ostat->tx_bad_frames; } static void jme_phy_down(struct jme_softc *sc) { uint32_t reg; jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN); if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { reg = CSR_READ_4(sc, JME_PHYPOWDN); reg |= 0x0000000F; CSR_WRITE_4(sc, JME_PHYPOWDN, reg); reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); reg &= ~PE1_GIGA_PDOWN_MASK; reg |= PE1_GIGA_PDOWN_D3; pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); } } static void jme_phy_up(struct jme_softc *sc) { uint32_t reg; uint16_t bmcr; bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR); bmcr &= ~BMCR_PDOWN; jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr); if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { reg = CSR_READ_4(sc, JME_PHYPOWDN); reg &= ~0x0000000F; CSR_WRITE_4(sc, JME_PHYPOWDN, reg); reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); reg &= ~PE1_GIGA_PDOWN_MASK; reg |= PE1_GIGA_PDOWN_DIS; pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); } } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX)); } static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX)); } static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX)); } static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX)); } static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, JME_PROC_MIN, JME_PROC_MAX)); } diff --git a/sys/dev/mcd/mcd.c b/sys/dev/mcd/mcd.c index 4d2acd145ca6..dacbb3d3e2f0 100644 --- a/sys/dev/mcd/mcd.c +++ b/sys/dev/mcd/mcd.c @@ -1,1652 +1,1652 @@ /*- * Copyright 1993 by Holger Veit (data part) * Copyright 1993 by Brian Moore (audio part) * Changes Copyright 1993 by Gary Clark II * Changes Copyright (C) 1994-1995 by Andrey A. Chernov, Moscow, Russia * * Rewrote probe routine to work on newer Mitsumi drives. * Additional changes (C) 1994 by Jordan K. Hubbard * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This software was developed by Holger Veit and Brian Moore * for use with "386BSD" and similar operating systems. * "Similar operating systems" includes mainly non-profit oriented * systems for research and education, including but not restricted to * "NetBSD", "FreeBSD", "Mach" (by CMU). * 4. Neither the name of the developer(s) nor the name "386BSD" * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER(S) BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); static const char __used COPYRIGHT[] = "mcd-driver (C)1993 by H.Veit & B.Moore"; #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MCD_TRACE(format, args...) \ { \ if (sc->debug) { \ device_printf(sc->dev, "status=0x%02x: ", \ sc->data.status); \ printf(format, ## args); \ } \ } #define RAW_PART 2 /* flags */ #define MCDVALID 0x0001 /* parameters loaded */ #define MCDINIT 0x0002 /* device is init'd */ #define MCDNEWMODEL 0x0004 /* device is new model */ #define MCDLABEL 0x0008 /* label is read */ #define MCDPROBING 0x0010 /* probing */ #define MCDREADRAW 0x0020 /* read raw mode (2352 bytes) */ #define MCDVOLINFO 0x0040 /* already read volinfo */ #define MCDTOC 0x0080 /* already read toc */ #define MCDMBXBSY 0x0100 /* local mbx is busy */ /* status */ #define MCDAUDIOBSY MCD_ST_AUDIOBSY /* playing audio */ #define MCDDSKCHNG MCD_ST_DSKCHNG /* sensed change of disk */ #define MCDDSKIN MCD_ST_DSKIN /* sensed disk in drive */ #define MCDDOOROPEN MCD_ST_DOOROPEN /* sensed door open */ /* These are apparently the different states a mitsumi can get up to */ #define MCDCDABSENT 0x0030 #define MCDCDPRESENT 0x0020 #define MCDSCLOSED 0x0080 #define MCDSOPEN 0x00a0 #define MCD_MD_UNKNOWN (-1) #define MCD_TYPE_UNKNOWN 0 #define MCD_TYPE_LU002S 1 #define MCD_TYPE_LU005S 2 #define MCD_TYPE_LU006S 3 #define MCD_TYPE_FX001 4 #define MCD_TYPE_FX001D 5 /* reader state machine */ #define MCD_S_BEGIN 0 #define MCD_S_BEGIN1 1 #define MCD_S_WAITSTAT 2 #define MCD_S_WAITMODE 3 #define MCD_S_WAITREAD 4 /* prototypes */ static void mcd_start(struct mcd_softc *); #ifdef NOTYET static void mcd_configure(struct mcd_softc *sc); #endif static int mcd_get(struct mcd_softc *, char *buf, int nmax); static int mcd_setflags(struct mcd_softc *); static int mcd_getstat(struct mcd_softc *,int sflg); static int mcd_send(struct mcd_softc *, int cmd,int nretrys); static void hsg2msf(int hsg, bcd_t *msf); static int msf2hsg(bcd_t *msf, int relative); static int mcd_volinfo(struct mcd_softc *); static int mcd_waitrdy(struct mcd_softc *,int dly); static void mcd_timeout(void *arg); static void mcd_doread(struct mcd_softc *, int state, struct mcd_mbx *mbxin); static void mcd_soft_reset(struct mcd_softc *); static int mcd_hard_reset(struct mcd_softc *); static int mcd_setmode(struct mcd_softc *, int mode); static int mcd_getqchan(struct mcd_softc *, struct mcd_qchninfo *q); static int mcd_subchan(struct mcd_softc *, struct ioc_read_subchannel *sc, int nocopyout); static int mcd_toc_header(struct mcd_softc *, struct ioc_toc_header *th); static int mcd_read_toc(struct mcd_softc *); static int mcd_toc_entrys(struct mcd_softc *, struct ioc_read_toc_entry *te); #if 0 static int mcd_toc_entry(struct mcd_softc *, struct ioc_read_toc_single_entry *te); #endif static int mcd_stop(struct mcd_softc *); static int mcd_eject(struct mcd_softc *); static int mcd_inject(struct mcd_softc *); static int mcd_playtracks(struct mcd_softc *, struct ioc_play_track *pt); static int mcd_play(struct mcd_softc *, struct mcd_read2 *pb); static int mcd_playmsf(struct mcd_softc *, struct ioc_play_msf *pt); static int mcd_playblocks(struct mcd_softc *, struct ioc_play_blocks *); static int mcd_pause(struct mcd_softc *); static int mcd_resume(struct mcd_softc *); static int mcd_lock_door(struct mcd_softc *, int lock); static int mcd_close_tray(struct mcd_softc *); static int mcd_size(struct cdev *dev); static d_open_t mcdopen; static d_close_t mcdclose; static d_ioctl_t mcdioctl; static d_strategy_t mcdstrategy; static struct cdevsw mcd_cdevsw = { .d_version = D_VERSION, .d_open = mcdopen, .d_close = mcdclose, .d_read = physread, .d_ioctl = mcdioctl, .d_strategy = mcdstrategy, .d_name = "mcd", .d_flags = D_DISK, }; #define MCD_RETRYS 5 #define MCD_RDRETRYS 8 #define CLOSE_TRAY_SECS 8 #define DISK_SENSE_SECS 3 #define WAIT_FRAC 4 /* several delays */ #define RDELAY_WAITSTAT 300 #define RDELAY_WAITMODE 300 #define RDELAY_WAITREAD 800 #define MIN_DELAY 15 #define DELAY_GETREPLY 5000000 int mcd_attach(struct mcd_softc *sc) { int unit; unit = device_get_unit(sc->dev); MCD_LOCK(sc); sc->data.flags |= MCDINIT; mcd_soft_reset(sc); bioq_init(&sc->data.head); #ifdef NOTYET /* wire controller for interrupts and dma */ mcd_configure(sc); #endif MCD_UNLOCK(sc); /* name filled in probe */ sc->mcd_dev_t = make_dev(&mcd_cdevsw, 8 * unit, UID_ROOT, GID_OPERATOR, 0640, "mcd%d", unit); sc->mcd_dev_t->si_drv1 = (void *)sc; callout_init_mtx(&sc->timer, &sc->mtx, 0); return (0); } static int mcdopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mcd_softc *sc; int r,retry; sc = (struct mcd_softc *)dev->si_drv1; /* invalidated in the meantime? mark all open part's invalid */ MCD_LOCK(sc); if (!(sc->data.flags & MCDVALID) && sc->data.openflags) { MCD_UNLOCK(sc); return (ENXIO); } if (mcd_getstat(sc, 1) == -1) { MCD_UNLOCK(sc); return (EIO); } if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN)) || !(sc->data.status & MCDDSKIN)) for (retry = 0; retry < DISK_SENSE_SECS * WAIT_FRAC; retry++) { (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH, "mcdsn1", hz/WAIT_FRAC); if ((r = mcd_getstat(sc, 1)) == -1) { MCD_UNLOCK(sc); return (EIO); } if (r != -2) break; } if (sc->data.status & MCDDOOROPEN) { MCD_UNLOCK(sc); device_printf(sc->dev, "door is open\n"); return (ENXIO); } if (!(sc->data.status & MCDDSKIN)) { MCD_UNLOCK(sc); device_printf(sc->dev, "no CD inside\n"); return (ENXIO); } if (sc->data.status & MCDDSKCHNG) { MCD_UNLOCK(sc); device_printf(sc->dev, "CD not sensed\n"); return (ENXIO); } if (mcd_size(dev) < 0) { MCD_UNLOCK(sc); device_printf(sc->dev, "failed to get disk size\n"); return (ENXIO); } sc->data.openflags = 1; sc->data.partflags |= MCDREADRAW; sc->data.flags |= MCDVALID; (void) mcd_lock_door(sc, MCD_LK_LOCK); if (!(sc->data.flags & MCDVALID)) { MCD_UNLOCK(sc); return (ENXIO); } r = mcd_read_toc(sc); MCD_UNLOCK(sc); return (r); } static int mcdclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mcd_softc *sc; sc = (struct mcd_softc *)dev->si_drv1; MCD_LOCK(sc); KASSERT(sc->data.openflags, ("device not open")); (void) mcd_lock_door(sc, MCD_LK_UNLOCK); sc->data.openflags = 0; sc->data.partflags &= ~MCDREADRAW; MCD_UNLOCK(sc); return (0); } static void mcdstrategy(struct bio *bp) { struct mcd_softc *sc; sc = (struct mcd_softc *)bp->bio_dev->si_drv1; /* if device invalidated (e.g. media change, door open), error */ MCD_LOCK(sc); if (!(sc->data.flags & MCDVALID)) { device_printf(sc->dev, "media changed\n"); bp->bio_error = EIO; goto bad; } /* read only */ if (!(bp->bio_cmd == BIO_READ)) { bp->bio_error = EROFS; goto bad; } /* no data to read */ if (bp->bio_bcount == 0) goto done; if (!(sc->data.flags & MCDTOC)) { bp->bio_error = EIO; goto bad; } bp->bio_resid = 0; /* queue it */ bioq_disksort(&sc->data.head, bp); /* now check whether we can perform processing */ mcd_start(sc); MCD_UNLOCK(sc); return; bad: bp->bio_flags |= BIO_ERROR; done: MCD_UNLOCK(sc); bp->bio_resid = bp->bio_bcount; biodone(bp); return; } static void mcd_start(struct mcd_softc *sc) { struct bio *bp; MCD_ASSERT_LOCKED(sc); if (sc->data.flags & MCDMBXBSY) { return; } bp = bioq_takefirst(&sc->data.head); if (bp != 0) { /* block found to process, dequeue */ /*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/ sc->data.flags |= MCDMBXBSY; } else { /* nothing to do */ return; } sc->data.mbx.retry = MCD_RETRYS; sc->data.mbx.bp = bp; mcd_doread(sc, MCD_S_BEGIN,&(sc->data.mbx)); return; } static int mcdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { struct mcd_softc *sc; int retry,r; sc = (struct mcd_softc *)dev->si_drv1; MCD_LOCK(sc); if (mcd_getstat(sc, 1) == -1) { /* detect disk change too */ MCD_UNLOCK(sc); return (EIO); } MCD_TRACE("ioctl called 0x%lx\n", cmd); switch (cmd) { case CDIOCSETPATCH: case CDIOCGETVOL: case CDIOCSETVOL: case CDIOCSETMONO: case CDIOCSETSTERIO: case CDIOCSETMUTE: case CDIOCSETLEFT: case CDIOCSETRIGHT: MCD_UNLOCK(sc); return (EINVAL); case CDIOCEJECT: r = mcd_eject(sc); MCD_UNLOCK(sc); return (r); case CDIOCSETDEBUG: sc->data.debug = 1; MCD_UNLOCK(sc); return (0); case CDIOCCLRDEBUG: sc->data.debug = 0; MCD_UNLOCK(sc); return (0); case CDIOCRESET: r = mcd_hard_reset(sc); MCD_UNLOCK(sc); return (r); case CDIOCALLOW: r = mcd_lock_door(sc, MCD_LK_UNLOCK); MCD_UNLOCK(sc); return (r); case CDIOCPREVENT: r = mcd_lock_door(sc, MCD_LK_LOCK); MCD_UNLOCK(sc); return (r); case CDIOCCLOSE: r = mcd_inject(sc); MCD_UNLOCK(sc); return (r); } if (!(sc->data.flags & MCDVALID)) { if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN)) || !(sc->data.status & MCDDSKIN)) for (retry = 0; retry < DISK_SENSE_SECS * WAIT_FRAC; retry++) { (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH, "mcdsn2", hz/WAIT_FRAC); if ((r = mcd_getstat(sc, 1)) == -1) { MCD_UNLOCK(sc); return (EIO); } if (r != -2) break; } if ( (sc->data.status & (MCDDOOROPEN|MCDDSKCHNG)) || !(sc->data.status & MCDDSKIN) || mcd_size(dev) < 0 ) { MCD_UNLOCK(sc); return (ENXIO); } sc->data.flags |= MCDVALID; sc->data.partflags |= MCDREADRAW; (void) mcd_lock_door(sc, MCD_LK_LOCK); if (!(sc->data.flags & MCDVALID)) { MCD_UNLOCK(sc); return (ENXIO); } } switch (cmd) { case DIOCGMEDIASIZE: *(off_t *)addr = (off_t)sc->data.disksize * sc->data.blksize; r = 0; break; case DIOCGSECTORSIZE: *(u_int *)addr = sc->data.blksize; r = 0; break; case CDIOCPLAYTRACKS: r = mcd_playtracks(sc, (struct ioc_play_track *) addr); break; case CDIOCPLAYBLOCKS: r = mcd_playblocks(sc, (struct ioc_play_blocks *) addr); break; case CDIOCPLAYMSF: r = mcd_playmsf(sc, (struct ioc_play_msf *) addr); break; case CDIOCREADSUBCHANNEL_SYSSPACE: return mcd_subchan(sc, (struct ioc_read_subchannel *) addr, 1); case CDIOCREADSUBCHANNEL: return mcd_subchan(sc, (struct ioc_read_subchannel *) addr, 0); case CDIOREADTOCHEADER: r = mcd_toc_header(sc, (struct ioc_toc_header *) addr); break; case CDIOREADTOCENTRYS: return mcd_toc_entrys(sc, (struct ioc_read_toc_entry *) addr); case CDIOCRESUME: r = mcd_resume(sc); break; case CDIOCPAUSE: r = mcd_pause(sc); break; case CDIOCSTART: if (mcd_setmode(sc, MCD_MD_COOKED) != 0) r = EIO; else r = 0; break; case CDIOCSTOP: r = mcd_stop(sc); break; default: r = ENOTTY; } MCD_UNLOCK(sc); return (r); } static int mcd_size(struct cdev *dev) { struct mcd_softc *sc; int size; sc = (struct mcd_softc *)dev->si_drv1; if (mcd_volinfo(sc) == 0) { sc->data.blksize = MCDBLK; size = msf2hsg(sc->data.volinfo.vol_msf, 0); sc->data.disksize = size * (MCDBLK/DEV_BSIZE); return (0); } return (-1); } /*************************************************************** * lower level of driver starts here **************************************************************/ #ifdef NOTDEF static char irqs[] = { 0x00,0x00,0x10,0x20,0x00,0x30,0x00,0x00, 0x00,0x10,0x40,0x50,0x00,0x00,0x00,0x00 }; static char drqs[] = { 0x00,0x01,0x00,0x03,0x00,0x05,0x06,0x07, }; #endif #ifdef NOT_YET static void mcd_configure(struct mcd_softc *sc) { MCD_WRITE(sc, MCD_REG_CONFIG, sc->data.config); } #endif /* Wait for non-busy - return 0 on timeout */ static int twiddle_thumbs(struct mcd_softc *sc, int count, char *whine) { int i; for (i = 0; i < count; i++) { if (!(MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL)) return (1); } if (bootverbose) device_printf(sc->dev, "timeout %s\n", whine); return (0); } /* check to see if a Mitsumi CD-ROM is attached to the ISA bus */ int mcd_probe(struct mcd_softc *sc) { int i, j; unsigned char stbytes[3]; sc->data.flags = MCDPROBING; #ifdef NOTDEF /* get irq/drq configuration word */ sc->data.config = irqs[dev->id_irq]; /* | drqs[dev->id_drq];*/ #else sc->data.config = 0; #endif /* send a reset */ MCD_WRITE(sc, MCD_FLAGS, M_RESET); /* * delay awhile by getting any pending garbage (old data) and * throwing it away. */ for (i = 1000000; i != 0; i--) (void)MCD_READ(sc, MCD_FLAGS); /* Get status */ MCD_WRITE(sc, MCD_DATA, MCD_CMDGETSTAT); if (!twiddle_thumbs(sc, 1000000, "getting status")) return (ENXIO); /* Timeout */ /* Get version information */ MCD_WRITE(sc, MCD_DATA, MCD_CMDCONTINFO); for (j = 0; j < 3; j++) { if (!twiddle_thumbs(sc, 3000, "getting version info")) return (ENXIO); stbytes[j] = (MCD_READ(sc, MCD_DATA) & 0xFF); } if (stbytes[1] == stbytes[2]) return (ENXIO); if (stbytes[2] >= 4 || stbytes[1] != 'M') { MCD_WRITE(sc, MCD_CTRL, M_PICKLE); sc->data.flags |= MCDNEWMODEL; } sc->data.read_command = MCD_CMDSINGLESPEEDREAD; switch (stbytes[1]) { case 'M': if (stbytes[2] <= 2) { sc->data.type = MCD_TYPE_LU002S; sc->data.name = "Mitsumi LU002S"; } else if (stbytes[2] <= 5) { sc->data.type = MCD_TYPE_LU005S; sc->data.name = "Mitsumi LU005S"; } else { sc->data.type = MCD_TYPE_LU006S; sc->data.name = "Mitsumi LU006S"; } break; case 'F': sc->data.type = MCD_TYPE_FX001; sc->data.name = "Mitsumi FX001"; break; case 'D': sc->data.type = MCD_TYPE_FX001D; sc->data.name = "Mitsumi FX001D"; sc->data.read_command = MCD_CMDDOUBLESPEEDREAD; break; default: sc->data.type = MCD_TYPE_UNKNOWN; sc->data.name = "Mitsumi ???"; break; } if (bootverbose) device_printf(sc->dev, "type %s, version info: %c %x\n", sc->data.name, stbytes[1], stbytes[2]); return (0); } static int mcd_waitrdy(struct mcd_softc *sc, int dly) { int i; /* wait until flag port senses status ready */ for (i=0; idev, "timeout getreply\n"); return (-1); } /* get the data */ return (MCD_READ(sc, MCD_REG_STATUS) & 0xFF); } static int mcd_getstat(struct mcd_softc *sc, int sflg) { int i; /* get the status */ if (sflg) MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDGETSTAT); i = mcd_getreply(sc, DELAY_GETREPLY); if (i<0 || (i & MCD_ST_CMDCHECK)) { sc->data.curr_mode = MCD_MD_UNKNOWN; return (-1); } sc->data.status = i; if (mcd_setflags(sc) < 0) return (-2); return (sc->data.status); } static int mcd_setflags(struct mcd_softc *sc) { /* check flags */ if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN)) || !(sc->data.status & MCDDSKIN)) { MCD_TRACE("setflags: sensed DSKCHNG or DOOROPEN or !DSKIN\n"); mcd_soft_reset(sc); return (-1); } if (sc->data.status & MCDAUDIOBSY) sc->data.audio_status = CD_AS_PLAY_IN_PROGRESS; else if (sc->data.audio_status == CD_AS_PLAY_IN_PROGRESS) sc->data.audio_status = CD_AS_PLAY_COMPLETED; return (0); } static int mcd_get(struct mcd_softc *sc, char *buf, int nmax) { int i,k; for (i=0; idev, "timeout mcd_get\n"); return (-1); } buf[i] = k; } return (i); } static int mcd_send(struct mcd_softc *sc, int cmd,int nretrys) { int i,k=0; /*MCD_TRACE("mcd_send: command = 0x%02x\n",cmd,0,0,0);*/ for (i=0; idev, "media changed\n"); return (-1); } if (i == nretrys) { device_printf(sc->dev, "mcd_send retry cnt exceeded\n"); return (-1); } /*MCD_TRACE("mcd_send: done\n",0,0,0,0);*/ return (0); } static void hsg2msf(int hsg, bcd_t *msf) { hsg += 150; F_msf(msf) = bin2bcd(hsg % 75); hsg /= 75; S_msf(msf) = bin2bcd(hsg % 60); hsg /= 60; M_msf(msf) = bin2bcd(hsg); } static int msf2hsg(bcd_t *msf, int relative) { return (bcd2bin(M_msf(msf)) * 60 + bcd2bin(S_msf(msf))) * 75 + bcd2bin(F_msf(msf)) - (!relative) * 150; } static int mcd_volinfo(struct mcd_softc *sc) { /* Just return if we already have it */ if (sc->data.flags & MCDVOLINFO) return (0); /*MCD_TRACE("mcd_volinfo: enter\n",0,0,0,0);*/ /* send volume info command */ if (mcd_send(sc, MCD_CMDGETVOLINFO,MCD_RETRYS) < 0) return (EIO); /* get data */ if (mcd_get(sc, (char*) &sc->data.volinfo,sizeof(struct mcd_volinfo)) < 0) { device_printf(sc->dev, "mcd_volinfo: error read data\n"); return (EIO); } if (sc->data.volinfo.trk_low > 0 && sc->data.volinfo.trk_high >= sc->data.volinfo.trk_low ) { sc->data.flags |= MCDVOLINFO; /* volinfo is OK */ return (0); } return (EINVAL); } /* state machine to process read requests * initialize with MCD_S_BEGIN: calculate sizes, and read status * MCD_S_WAITSTAT: wait for status reply, set mode * MCD_S_WAITMODE: waits for status reply from set mode, set read command * MCD_S_WAITREAD: wait for read ready, read data */ static void mcd_timeout(void *arg) { struct mcd_softc *sc; sc = (struct mcd_softc *)arg; MCD_ASSERT_LOCKED(sc); mcd_doread(sc, sc->ch_state, sc->ch_mbxsave); } static void mcd_doread(struct mcd_softc *sc, int state, struct mcd_mbx *mbxin) { struct mcd_mbx *mbx; struct bio *bp; int rm, i, k; struct mcd_read2 rbuf; int blknum; caddr_t addr; MCD_ASSERT_LOCKED(sc); mbx = (state!=MCD_S_BEGIN) ? sc->ch_mbxsave : mbxin; bp = mbx->bp; loop: switch (state) { case MCD_S_BEGIN: mbx = sc->ch_mbxsave = mbxin; case MCD_S_BEGIN1: retry_status: /* get status */ MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDGETSTAT); mbx->count = RDELAY_WAITSTAT; sc->ch_state = MCD_S_WAITSTAT; callout_reset(&sc->timer, hz/100, mcd_timeout, sc); /* XXX */ return; case MCD_S_WAITSTAT: sc->ch_state = MCD_S_WAITSTAT; callout_stop(&sc->timer); if (mbx->count-- >= 0) { if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL) { sc->ch_state = MCD_S_WAITSTAT; callout_reset(&sc->timer, hz/100, mcd_timeout, sc); /* XXX */ return; } sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF; if (sc->data.status & MCD_ST_CMDCHECK) goto retry_status; if (mcd_setflags(sc) < 0) goto changed; MCD_TRACE("got WAITSTAT delay=%d\n", RDELAY_WAITSTAT-mbx->count); /* reject, if audio active */ if (sc->data.status & MCDAUDIOBSY) { device_printf(sc->dev, "audio is active\n"); goto readerr; } retry_mode: /* to check for raw/cooked mode */ if (sc->data.flags & MCDREADRAW) { rm = MCD_MD_RAW; mbx->sz = MCDRBLK; } else { rm = MCD_MD_COOKED; mbx->sz = sc->data.blksize; } if (rm == sc->data.curr_mode) goto modedone; mbx->count = RDELAY_WAITMODE; sc->data.curr_mode = MCD_MD_UNKNOWN; mbx->mode = rm; MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDSETMODE); MCD_WRITE(sc, MCD_REG_COMMAND, rm); sc->ch_state = MCD_S_WAITMODE; callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */ return; } else { device_printf(sc->dev, "timeout getstatus\n"); goto readerr; } case MCD_S_WAITMODE: sc->ch_state = MCD_S_WAITMODE; callout_stop(&sc->timer); if (mbx->count-- < 0) { device_printf(sc->dev, "timeout set mode\n"); goto readerr; } if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL) { sc->ch_state = MCD_S_WAITMODE; callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); return; } sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF; if (sc->data.status & MCD_ST_CMDCHECK) { sc->data.curr_mode = MCD_MD_UNKNOWN; goto retry_mode; } if (mcd_setflags(sc) < 0) goto changed; sc->data.curr_mode = mbx->mode; MCD_TRACE("got WAITMODE delay=%d\n", RDELAY_WAITMODE-mbx->count); modedone: /* for first block */ - mbx->nblk = (bp->bio_bcount + (mbx->sz-1)) / mbx->sz; + mbx->nblk = howmany(bp->bio_bcount, mbx->sz); mbx->skip = 0; nextblock: blknum = bp->bio_offset / mbx->sz + mbx->skip/mbx->sz; MCD_TRACE("mcd_doread: read blknum=%d for bp=%p\n", blknum, bp); /* build parameter block */ hsg2msf(blknum,rbuf.start_msf); retry_read: /* send the read command */ MCD_WRITE(sc, MCD_REG_COMMAND, sc->data.read_command); MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[0]); MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[1]); MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[2]); MCD_WRITE(sc, MCD_REG_COMMAND, 0); MCD_WRITE(sc, MCD_REG_COMMAND, 0); MCD_WRITE(sc, MCD_REG_COMMAND, 1); /* Spin briefly (<= 2ms) to avoid missing next block */ for (i = 0; i < 20; i++) { k = MCD_READ(sc, MCD_FLAGS); if (!(k & MFL_DATA_NOT_AVAIL)) goto got_it; DELAY(100); } mbx->count = RDELAY_WAITREAD; sc->ch_state = MCD_S_WAITREAD; callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */ return; case MCD_S_WAITREAD: sc->ch_state = MCD_S_WAITREAD; callout_stop(&sc->timer); if (mbx->count-- > 0) { k = MCD_READ(sc, MCD_FLAGS); if (!(k & MFL_DATA_NOT_AVAIL)) { /* XXX */ MCD_TRACE("got data delay=%d\n", RDELAY_WAITREAD-mbx->count); got_it: /* data is ready */ addr = bp->bio_data + mbx->skip; MCD_WRITE(sc, MCD_REG_CTL2,0x04); /* XXX */ for (i=0; isz; i++) *addr++ = MCD_READ(sc, MCD_REG_RDATA); MCD_WRITE(sc, MCD_REG_CTL2,0x0c); /* XXX */ k = MCD_READ(sc, MCD_FLAGS); /* If we still have some junk, read it too */ if (!(k & MFL_DATA_NOT_AVAIL)) { MCD_WRITE(sc, MCD_REG_CTL2, 0x04); /* XXX */ (void)MCD_READ(sc, MCD_REG_RDATA); (void)MCD_READ(sc, MCD_REG_RDATA); MCD_WRITE(sc, MCD_REG_CTL2, 0x0c); /* XXX */ } if (--mbx->nblk > 0) { mbx->skip += mbx->sz; goto nextblock; } /* return buffer */ bp->bio_resid = 0; biodone(bp); sc->data.flags &= ~(MCDMBXBSY|MCDREADRAW); mcd_start(sc); return; } if (!(k & MFL_STATUS_NOT_AVAIL)) { sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF; if (sc->data.status & MCD_ST_CMDCHECK) goto retry_read; if (mcd_setflags(sc) < 0) goto changed; } sc->ch_state = MCD_S_WAITREAD; callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */ return; } else { device_printf(sc->dev, "timeout read data\n"); goto readerr; } } readerr: if (mbx->retry-- > 0) { device_printf(sc->dev, "retrying\n"); state = MCD_S_BEGIN1; goto loop; } harderr: /* invalidate the buffer */ bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); sc->data.flags &= ~(MCDMBXBSY|MCDREADRAW); mcd_start(sc); return; changed: device_printf(sc->dev, "media changed\n"); goto harderr; #ifdef NOTDEF device_printf(sc->dev, "unit timeout, resetting\n"); MCD_WRITE(sc, MCD_REG_RESET, MCD_CMDRESET); DELAY(300000); (void)mcd_getstat(sc, 1); (void)mcd_getstat(sc, 1); /*sc->data.status &= ~MCDDSKCHNG; */ sc->data.debug = 1; /* preventive set debug mode */ #endif } static int mcd_lock_door(struct mcd_softc *sc, int lock) { MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDLOCKDRV); MCD_WRITE(sc, MCD_REG_COMMAND, lock); if (mcd_getstat(sc, 0) == -1) return (EIO); return (0); } static int mcd_close_tray(struct mcd_softc *sc) { int retry, r; if (mcd_getstat(sc, 1) == -1) return (EIO); if (sc->data.status & MCDDOOROPEN) { MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDCLOSETRAY); for (retry = 0; retry < CLOSE_TRAY_SECS * WAIT_FRAC; retry++) { if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL) (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH, "mcdcls", hz/WAIT_FRAC); else { if ((r = mcd_getstat(sc, 0)) == -1) return (EIO); return (0); } } return (ENXIO); } return (0); } static int mcd_eject(struct mcd_softc *sc) { int r; if (mcd_getstat(sc, 1) == -1) /* detect disk change too */ return (EIO); if (sc->data.status & MCDDOOROPEN) return (0); if ((r = mcd_stop(sc)) == EIO) return (r); MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDEJECTDISK); if (mcd_getstat(sc, 0) == -1) return (EIO); return (0); } static int mcd_inject(struct mcd_softc *sc) { if (mcd_getstat(sc, 1) == -1) /* detect disk change too */ return (EIO); if (sc->data.status & MCDDOOROPEN) return mcd_close_tray(sc); return (0); } static int mcd_hard_reset(struct mcd_softc *sc) { MCD_WRITE(sc, MCD_REG_RESET, MCD_CMDRESET); sc->data.curr_mode = MCD_MD_UNKNOWN; sc->data.audio_status = CD_AS_AUDIO_INVALID; return (0); } static void mcd_soft_reset(struct mcd_softc *sc) { sc->data.flags &= (MCDINIT|MCDPROBING|MCDNEWMODEL); sc->data.curr_mode = MCD_MD_UNKNOWN; sc->data.partflags = 0; sc->data.audio_status = CD_AS_AUDIO_INVALID; } static int mcd_setmode(struct mcd_softc *sc, int mode) { int retry, st; if (sc->data.curr_mode == mode) return (0); if (sc->data.debug) device_printf(sc->dev, "setting mode to %d\n", mode); for(retry=0; retrydata.curr_mode = MCD_MD_UNKNOWN; MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDSETMODE); MCD_WRITE(sc, MCD_REG_COMMAND, mode); if ((st = mcd_getstat(sc, 0)) >= 0) { sc->data.curr_mode = mode; return (0); } if (st == -2) { device_printf(sc->dev, "media changed\n"); break; } } return (-1); } static int mcd_toc_header(struct mcd_softc *sc, struct ioc_toc_header *th) { int r; if ((r = mcd_volinfo(sc)) != 0) return (r); th->starting_track = bcd2bin(sc->data.volinfo.trk_low); th->ending_track = bcd2bin(sc->data.volinfo.trk_high); th->len = 2 * sizeof(u_char) /* start & end tracks */ + (th->ending_track + 1 - th->starting_track + 1) * sizeof(struct cd_toc_entry); return (0); } static int mcd_read_toc(struct mcd_softc *sc) { struct ioc_toc_header th; struct mcd_qchninfo q; int rc, trk, idx, retry; /* Only read TOC if needed */ if (sc->data.flags & MCDTOC) return (0); if (sc->data.debug) device_printf(sc->dev, "reading toc header\n"); if ((rc = mcd_toc_header(sc, &th)) != 0) return (rc); if (mcd_send(sc, MCD_CMDSTOPAUDIO, MCD_RETRYS) < 0) return (EIO); if (mcd_setmode(sc, MCD_MD_TOC) != 0) return (EIO); if (sc->data.debug) device_printf(sc->dev, "get_toc reading qchannel info\n"); for(trk=th.starting_track; trk<=th.ending_track; trk++) sc->data.toc[trk].idx_no = 0; trk = th.ending_track - th.starting_track + 1; for(retry=0; retry<600 && trk>0; retry++) { if (mcd_getqchan(sc, &q) < 0) break; idx = bcd2bin(q.idx_no); if (idx>=th.starting_track && idx<=th.ending_track && q.trk_no==0) { if (sc->data.toc[idx].idx_no == 0) { sc->data.toc[idx] = q; trk--; } } } if (mcd_setmode(sc, MCD_MD_COOKED) != 0) return (EIO); if (trk != 0) return (ENXIO); /* add a fake last+1 */ idx = th.ending_track + 1; sc->data.toc[idx].control = sc->data.toc[idx-1].control; sc->data.toc[idx].addr_type = sc->data.toc[idx-1].addr_type; sc->data.toc[idx].trk_no = 0; sc->data.toc[idx].idx_no = MCD_LASTPLUS1; sc->data.toc[idx].hd_pos_msf[0] = sc->data.volinfo.vol_msf[0]; sc->data.toc[idx].hd_pos_msf[1] = sc->data.volinfo.vol_msf[1]; sc->data.toc[idx].hd_pos_msf[2] = sc->data.volinfo.vol_msf[2]; if (sc->data.debug) { int i; for (i = th.starting_track; i <= idx; i++) device_printf(sc->dev, "trk %d idx %d pos %d %d %d\n", i, sc->data.toc[i].idx_no > 0x99 ? sc->data.toc[i].idx_no : bcd2bin(sc->data.toc[i].idx_no), bcd2bin(sc->data.toc[i].hd_pos_msf[0]), bcd2bin(sc->data.toc[i].hd_pos_msf[1]), bcd2bin(sc->data.toc[i].hd_pos_msf[2])); } sc->data.flags |= MCDTOC; return (0); } #if 0 static int mcd_toc_entry(struct mcd_softc *sc, struct ioc_read_toc_single_entry *te) { struct ioc_toc_header th; int rc, trk; if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) return (EINVAL); /* Copy the toc header */ if ((rc = mcd_toc_header(sc, &th)) != 0) return (rc); /* verify starting track */ trk = te->track; if (trk == 0) trk = th.starting_track; else if (trk == MCD_LASTPLUS1) trk = th.ending_track + 1; else if (trk < th.starting_track || trk > th.ending_track + 1) return (EINVAL); /* Make sure we have a valid toc */ if ((rc=mcd_read_toc(sc)) != 0) return (rc); /* Copy the TOC data. */ if (sc->data.toc[trk].idx_no == 0) return (EIO); te->entry.control = sc->data.toc[trk].control; te->entry.addr_type = sc->data.toc[trk].addr_type; te->entry.track = sc->data.toc[trk].idx_no > 0x99 ? sc->data.toc[trk].idx_no : bcd2bin(sc->data.toc[trk].idx_no); switch (te->address_format) { case CD_MSF_FORMAT: te->entry.addr.msf.unused = 0; te->entry.addr.msf.minute = bcd2bin(sc->data.toc[trk].hd_pos_msf[0]); te->entry.addr.msf.second = bcd2bin(sc->data.toc[trk].hd_pos_msf[1]); te->entry.addr.msf.frame = bcd2bin(sc->data.toc[trk].hd_pos_msf[2]); break; case CD_LBA_FORMAT: te->entry.addr.lba = htonl(msf2hsg(sc->data.toc[trk].hd_pos_msf, 0)); break; } return (0); } #endif static int mcd_toc_entrys(struct mcd_softc *sc, struct ioc_read_toc_entry *te) { struct cd_toc_entry entries[MCD_MAXTOCS]; struct ioc_toc_header th; int rc, n, trk, len; if ( te->data_len < sizeof(entries[0]) || (te->data_len % sizeof(entries[0])) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) ) return (EINVAL); /* Copy the toc header */ if ((rc = mcd_toc_header(sc, &th)) != 0) return (rc); /* verify starting track */ trk = te->starting_track; if (trk == 0) trk = th.starting_track; else if (trk == MCD_LASTPLUS1) trk = th.ending_track + 1; else if (trk < th.starting_track || trk > th.ending_track + 1) return (EINVAL); len = ((th.ending_track + 1 - trk) + 1) * sizeof(entries[0]); if (te->data_len < len) len = te->data_len; if (len > sizeof(entries)) return (EINVAL); /* Make sure we have a valid toc */ if ((rc=mcd_read_toc(sc)) != 0) return (rc); /* Copy the TOC data. */ for (n = 0; len > 0 && trk <= th.ending_track + 1; trk++) { if (sc->data.toc[trk].idx_no == 0) continue; entries[n].control = sc->data.toc[trk].control; entries[n].addr_type = sc->data.toc[trk].addr_type; entries[n].track = sc->data.toc[trk].idx_no > 0x99 ? sc->data.toc[trk].idx_no : bcd2bin(sc->data.toc[trk].idx_no); switch (te->address_format) { case CD_MSF_FORMAT: entries[n].addr.msf.unused = 0; entries[n].addr.msf.minute = bcd2bin(sc->data.toc[trk].hd_pos_msf[0]); entries[n].addr.msf.second = bcd2bin(sc->data.toc[trk].hd_pos_msf[1]); entries[n].addr.msf.frame = bcd2bin(sc->data.toc[trk].hd_pos_msf[2]); break; case CD_LBA_FORMAT: entries[n].addr.lba = htonl(msf2hsg(sc->data.toc[trk].hd_pos_msf, 0)); break; } len -= sizeof(struct cd_toc_entry); n++; } /* copy the data back */ MCD_UNLOCK(sc); return copyout(entries, te->data, n * sizeof(struct cd_toc_entry)); } static int mcd_stop(struct mcd_softc *sc) { /* Verify current status */ if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS && sc->data.audio_status != CD_AS_PLAY_PAUSED && sc->data.audio_status != CD_AS_PLAY_COMPLETED) { if (sc->data.debug) device_printf(sc->dev, "stop attempted when not playing, audio status %d\n", sc->data.audio_status); return (EINVAL); } if (sc->data.audio_status == CD_AS_PLAY_IN_PROGRESS) if (mcd_send(sc, MCD_CMDSTOPAUDIO, MCD_RETRYS) < 0) return (EIO); sc->data.audio_status = CD_AS_PLAY_COMPLETED; return (0); } static int mcd_getqchan(struct mcd_softc *sc, struct mcd_qchninfo *q) { if (mcd_send(sc, MCD_CMDGETQCHN, MCD_RETRYS) < 0) return (-1); if (mcd_get(sc, (char *) q, sizeof(struct mcd_qchninfo)) < 0) return (-1); if (sc->data.debug) { device_printf(sc->dev, "getqchan control=0x%x addr_type=0x%x trk=%d ind=%d ttm=%d:%d.%d dtm=%d:%d.%d\n", q->control, q->addr_type, bcd2bin(q->trk_no), bcd2bin(q->idx_no), bcd2bin(q->trk_size_msf[0]), bcd2bin(q->trk_size_msf[1]), bcd2bin(q->trk_size_msf[2]), bcd2bin(q->hd_pos_msf[0]), bcd2bin(q->hd_pos_msf[1]), bcd2bin(q->hd_pos_msf[2])); } return (0); } static int mcd_subchan(struct mcd_softc *sc, struct ioc_read_subchannel *sch, int nocopyout) { struct mcd_qchninfo q; struct cd_sub_channel_info data; int lba; if (sc->data.debug) device_printf(sc->dev, "subchan af=%d, df=%d\n", sch->address_format, sch->data_format); if (sch->address_format != CD_MSF_FORMAT && sch->address_format != CD_LBA_FORMAT) return (EINVAL); if (sch->data_format != CD_CURRENT_POSITION && sch->data_format != CD_MEDIA_CATALOG) return (EINVAL); if (mcd_setmode(sc, MCD_MD_COOKED) != 0) return (EIO); if (mcd_getqchan(sc, &q) < 0) return (EIO); data.header.audio_status = sc->data.audio_status; data.what.position.data_format = sch->data_format; switch (sch->data_format) { case CD_MEDIA_CATALOG: data.what.media_catalog.mc_valid = 1; data.what.media_catalog.mc_number[0] = '\0'; break; case CD_CURRENT_POSITION: data.what.position.control = q.control; data.what.position.addr_type = q.addr_type; data.what.position.track_number = bcd2bin(q.trk_no); data.what.position.index_number = bcd2bin(q.idx_no); switch (sch->address_format) { case CD_MSF_FORMAT: data.what.position.reladdr.msf.unused = 0; data.what.position.reladdr.msf.minute = bcd2bin(q.trk_size_msf[0]); data.what.position.reladdr.msf.second = bcd2bin(q.trk_size_msf[1]); data.what.position.reladdr.msf.frame = bcd2bin(q.trk_size_msf[2]); data.what.position.absaddr.msf.unused = 0; data.what.position.absaddr.msf.minute = bcd2bin(q.hd_pos_msf[0]); data.what.position.absaddr.msf.second = bcd2bin(q.hd_pos_msf[1]); data.what.position.absaddr.msf.frame = bcd2bin(q.hd_pos_msf[2]); break; case CD_LBA_FORMAT: lba = msf2hsg(q.trk_size_msf, 1); /* * Pre-gap has index number of 0, and decreasing MSF * address. Must be converted to negative LBA, per * SCSI spec. */ if (data.what.position.index_number == 0) lba = -lba; data.what.position.reladdr.lba = htonl(lba); data.what.position.absaddr.lba = htonl(msf2hsg(q.hd_pos_msf, 0)); break; } break; } MCD_UNLOCK(sc); if (nocopyout == 0) return copyout(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len)); bcopy(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len)); return (0); } static int mcd_playmsf(struct mcd_softc *sc, struct ioc_play_msf *p) { struct mcd_read2 pb; if (sc->data.debug) device_printf(sc->dev, "playmsf: from %d:%d.%d to %d:%d.%d\n", p->start_m, p->start_s, p->start_f, p->end_m, p->end_s, p->end_f); if ((p->start_m * 60 * 75 + p->start_s * 75 + p->start_f) >= (p->end_m * 60 * 75 + p->end_s * 75 + p->end_f) || (p->end_m * 60 * 75 + p->end_s * 75 + p->end_f) > M_msf(sc->data.volinfo.vol_msf) * 60 * 75 + S_msf(sc->data.volinfo.vol_msf) * 75 + F_msf(sc->data.volinfo.vol_msf)) return (EINVAL); pb.start_msf[0] = bin2bcd(p->start_m); pb.start_msf[1] = bin2bcd(p->start_s); pb.start_msf[2] = bin2bcd(p->start_f); pb.end_msf[0] = bin2bcd(p->end_m); pb.end_msf[1] = bin2bcd(p->end_s); pb.end_msf[2] = bin2bcd(p->end_f); if (mcd_setmode(sc, MCD_MD_COOKED) != 0) return (EIO); return mcd_play(sc, &pb); } static int mcd_playtracks(struct mcd_softc *sc, struct ioc_play_track *pt) { struct mcd_read2 pb; int a = pt->start_track; int z = pt->end_track; int rc, i; if ((rc = mcd_read_toc(sc)) != 0) return (rc); if (sc->data.debug) device_printf(sc->dev, "playtracks from %d:%d to %d:%d\n", a, pt->start_index, z, pt->end_index); if ( a < bcd2bin(sc->data.volinfo.trk_low) || a > bcd2bin(sc->data.volinfo.trk_high) || a > z || z < bcd2bin(sc->data.volinfo.trk_low) || z > bcd2bin(sc->data.volinfo.trk_high)) return (EINVAL); for (i = 0; i < 3; i++) { pb.start_msf[i] = sc->data.toc[a].hd_pos_msf[i]; pb.end_msf[i] = sc->data.toc[z+1].hd_pos_msf[i]; } if (mcd_setmode(sc, MCD_MD_COOKED) != 0) return (EIO); return mcd_play(sc, &pb); } static int mcd_playblocks(struct mcd_softc *sc, struct ioc_play_blocks *p) { struct mcd_read2 pb; if (sc->data.debug) device_printf(sc->dev, "playblocks: blkno %d length %d\n", p->blk, p->len); if (p->blk > sc->data.disksize || p->len > sc->data.disksize || p->blk < 0 || p->len < 0 || (p->blk + p->len) > sc->data.disksize) return (EINVAL); hsg2msf(p->blk, pb.start_msf); hsg2msf(p->blk + p->len, pb.end_msf); if (mcd_setmode(sc, MCD_MD_COOKED) != 0) return (EIO); return mcd_play(sc, &pb); } static int mcd_play(struct mcd_softc *sc, struct mcd_read2 *pb) { int retry, st = -1, status; sc->data.lastpb = *pb; for(retry=0; retrystart_msf[0]); MCD_WRITE(sc, MCD_REG_COMMAND, pb->start_msf[1]); MCD_WRITE(sc, MCD_REG_COMMAND, pb->start_msf[2]); MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[0]); MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[1]); MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[2]); critical_exit(); status=mcd_getstat(sc, 0); if (status == -1) continue; else if (status != -2) st = 0; break; } if (status == -2) { device_printf(sc->dev, "media changed\n"); return (ENXIO); } if (sc->data.debug) device_printf(sc->dev, "mcd_play retry=%d, status=0x%02x\n", retry, status); if (st < 0) return (ENXIO); sc->data.audio_status = CD_AS_PLAY_IN_PROGRESS; return (0); } static int mcd_pause(struct mcd_softc *sc) { struct mcd_qchninfo q; int rc; /* Verify current status */ if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS && sc->data.audio_status != CD_AS_PLAY_PAUSED) { if (sc->data.debug) device_printf(sc->dev, "pause attempted when not playing, audio status %d\n", sc->data.audio_status); return (EINVAL); } /* Get the current position */ if (mcd_getqchan(sc, &q) < 0) return (EIO); /* Copy it into lastpb */ sc->data.lastpb.start_msf[0] = q.hd_pos_msf[0]; sc->data.lastpb.start_msf[1] = q.hd_pos_msf[1]; sc->data.lastpb.start_msf[2] = q.hd_pos_msf[2]; /* Stop playing */ if ((rc=mcd_stop(sc)) != 0) return (rc); /* Set the proper status and exit */ sc->data.audio_status = CD_AS_PLAY_PAUSED; return (0); } static int mcd_resume(struct mcd_softc *sc) { if (sc->data.audio_status != CD_AS_PLAY_PAUSED) return (EINVAL); return mcd_play(sc, &sc->data.lastpb); } diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c index c224186045aa..05751ba6dfa0 100644 --- a/sys/dev/mfi/mfi.c +++ b/sys/dev/mfi/mfi.c @@ -1,3805 +1,3805 @@ /*- * Copyright (c) 2006 IronPort Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2007 LSI Corp. * Copyright (c) 2007 Rajesh Prabhakaran. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_mfi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mfi_alloc_commands(struct mfi_softc *); static int mfi_comms_init(struct mfi_softc *); static int mfi_get_controller_info(struct mfi_softc *); static int mfi_get_log_state(struct mfi_softc *, struct mfi_evt_log_state **); static int mfi_parse_entries(struct mfi_softc *, int, int); static void mfi_data_cb(void *, bus_dma_segment_t *, int, int); static void mfi_startup(void *arg); static void mfi_intr(void *arg); static void mfi_ldprobe(struct mfi_softc *sc); static void mfi_syspdprobe(struct mfi_softc *sc); static void mfi_handle_evt(void *context, int pending); static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale); static void mfi_aen_complete(struct mfi_command *); static int mfi_add_ld(struct mfi_softc *sc, int); static void mfi_add_ld_complete(struct mfi_command *); static int mfi_add_sys_pd(struct mfi_softc *sc, int); static void mfi_add_sys_pd_complete(struct mfi_command *); static struct mfi_command * mfi_bio_command(struct mfi_softc *); static void mfi_bio_complete(struct mfi_command *); static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*); static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*); static int mfi_send_frame(struct mfi_softc *, struct mfi_command *); static int mfi_std_send_frame(struct mfi_softc *, struct mfi_command *); static int mfi_abort(struct mfi_softc *, struct mfi_command **); static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *); static void mfi_timeout(void *); static int mfi_user_command(struct mfi_softc *, struct mfi_ioc_passthru *); static void mfi_enable_intr_xscale(struct mfi_softc *sc); static void mfi_enable_intr_ppc(struct mfi_softc *sc); static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc); static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc); static int mfi_check_clear_intr_xscale(struct mfi_softc *sc); static int mfi_check_clear_intr_ppc(struct mfi_softc *sc); static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt); static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt); static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode); static void mfi_config_unlock(struct mfi_softc *sc, int locked); static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm); static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm); static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm); SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters"); static int mfi_event_locale = MFI_EVT_LOCALE_ALL; SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale, 0, "event message locale"); static int mfi_event_class = MFI_EVT_CLASS_INFO; SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class, 0, "event message class"); static int mfi_max_cmds = 128; SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds, 0, "Max commands limit (-1 = controller limit)"); static int mfi_detect_jbod_change = 1; SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN, &mfi_detect_jbod_change, 0, "Detect a change to a JBOD"); int mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS; SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN, &mfi_polled_cmd_timeout, 0, "Polled command timeout - used for firmware flash etc (in seconds)"); static int mfi_cmd_timeout = MFI_CMD_TIMEOUT; SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout, 0, "Command timeout (in seconds)"); /* Management interface */ static d_open_t mfi_open; static d_close_t mfi_close; static d_ioctl_t mfi_ioctl; static d_poll_t mfi_poll; static struct cdevsw mfi_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = mfi_open, .d_close = mfi_close, .d_ioctl = mfi_ioctl, .d_poll = mfi_poll, .d_name = "mfi", }; MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver"); #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH struct mfi_skinny_dma_info mfi_skinny; static void mfi_enable_intr_xscale(struct mfi_softc *sc) { MFI_WRITE4(sc, MFI_OMSK, 0x01); } static void mfi_enable_intr_ppc(struct mfi_softc *sc) { if (sc->mfi_flags & MFI_FLAGS_1078) { MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM); } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF); MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM); } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { MFI_WRITE4(sc, MFI_OMSK, ~0x00000001); } } static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc) { return MFI_READ4(sc, MFI_OMSG0); } static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc) { return MFI_READ4(sc, MFI_OSP0); } static int mfi_check_clear_intr_xscale(struct mfi_softc *sc) { int32_t status; status = MFI_READ4(sc, MFI_OSTS); if ((status & MFI_OSTS_INTR_VALID) == 0) return 1; MFI_WRITE4(sc, MFI_OSTS, status); return 0; } static int mfi_check_clear_intr_ppc(struct mfi_softc *sc) { int32_t status; status = MFI_READ4(sc, MFI_OSTS); if (sc->mfi_flags & MFI_FLAGS_1078) { if (!(status & MFI_1078_RM)) { return 1; } } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { if (!(status & MFI_GEN2_RM)) { return 1; } } else if (sc->mfi_flags & MFI_FLAGS_SKINNY) { if (!(status & MFI_SKINNY_RM)) { return 1; } } if (sc->mfi_flags & MFI_FLAGS_SKINNY) MFI_WRITE4(sc, MFI_OSTS, status); else MFI_WRITE4(sc, MFI_ODCR0, status); return 0; } static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) { MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt); } static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt) { if (sc->mfi_flags & MFI_FLAGS_SKINNY) { MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 ); MFI_WRITE4(sc, MFI_IQPH, 0x00000000); } else { MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 ); } } int mfi_transition_firmware(struct mfi_softc *sc) { uint32_t fw_state, cur_state; int max_wait, i; uint32_t cur_abs_reg_val = 0; uint32_t prev_abs_reg_val = 0; cur_abs_reg_val = sc->mfi_read_fw_status(sc); fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; while (fw_state != MFI_FWSTATE_READY) { if (bootverbose) device_printf(sc->mfi_dev, "Waiting for firmware to " "become ready\n"); cur_state = fw_state; switch (fw_state) { case MFI_FWSTATE_FAULT: device_printf(sc->mfi_dev, "Firmware fault\n"); return (ENXIO); case MFI_FWSTATE_WAIT_HANDSHAKE: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE); max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_OPERATIONAL: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, 7); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY); max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_UNDEFINED: case MFI_FWSTATE_BB_INIT: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_FW_INIT_2: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_FW_INIT: case MFI_FWSTATE_FLUSH_CACHE: max_wait = MFI_RESET_WAIT_TIME; break; case MFI_FWSTATE_DEVICE_SCAN: max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */ prev_abs_reg_val = cur_abs_reg_val; break; case MFI_FWSTATE_BOOT_MESSAGE_PENDING: if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT) MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG); else MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG); max_wait = MFI_RESET_WAIT_TIME; break; default: device_printf(sc->mfi_dev, "Unknown firmware state %#x\n", fw_state); return (ENXIO); } for (i = 0; i < (max_wait * 10); i++) { cur_abs_reg_val = sc->mfi_read_fw_status(sc); fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK; if (fw_state == cur_state) DELAY(100000); else break; } if (fw_state == MFI_FWSTATE_DEVICE_SCAN) { /* Check the device scanning progress */ if (prev_abs_reg_val != cur_abs_reg_val) { continue; } } if (fw_state == cur_state) { device_printf(sc->mfi_dev, "Firmware stuck in state " "%#x\n", fw_state); return (ENXIO); } } return (0); } static void mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } int mfi_attach(struct mfi_softc *sc) { uint32_t status; int error, commsz, framessz, sensesz; int frames, unit, max_fw_sge, max_fw_cmds; uint32_t tb_mem_size = 0; struct cdev *dev_t; if (sc == NULL) return EINVAL; device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n", MEGASAS_VERSION); mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF); sx_init(&sc->mfi_config_lock, "MFI config"); TAILQ_INIT(&sc->mfi_ld_tqh); TAILQ_INIT(&sc->mfi_syspd_tqh); TAILQ_INIT(&sc->mfi_ld_pend_tqh); TAILQ_INIT(&sc->mfi_syspd_pend_tqh); TAILQ_INIT(&sc->mfi_evt_queue); TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc); TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc); TAILQ_INIT(&sc->mfi_aen_pids); TAILQ_INIT(&sc->mfi_cam_ccbq); mfi_initq_free(sc); mfi_initq_ready(sc); mfi_initq_busy(sc); mfi_initq_bio(sc); sc->adpreset = 0; sc->last_seq_num = 0; sc->disableOnlineCtrlReset = 1; sc->issuepend_done = 1; sc->hw_crit_error = 0; if (sc->mfi_flags & MFI_FLAGS_1064R) { sc->mfi_enable_intr = mfi_enable_intr_xscale; sc->mfi_read_fw_status = mfi_read_fw_status_xscale; sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale; sc->mfi_issue_cmd = mfi_issue_cmd_xscale; } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) { sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc; sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc; sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc; sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc; sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc; sc->mfi_adp_reset = mfi_tbolt_adp_reset; sc->mfi_tbolt = 1; TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh); } else { sc->mfi_enable_intr = mfi_enable_intr_ppc; sc->mfi_read_fw_status = mfi_read_fw_status_ppc; sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc; sc->mfi_issue_cmd = mfi_issue_cmd_ppc; } /* Before we get too far, see if the firmware is working */ if ((error = mfi_transition_firmware(sc)) != 0) { device_printf(sc->mfi_dev, "Firmware not in READY state, " "error %d\n", error); return (ENXIO); } /* Start: LSIP200113393 */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */ 1, /* msegments */ MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->verbuf_h_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf, BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n"); return (ENOMEM); } bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t)); bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap, sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t), mfi_addr_cb, &sc->verbuf_h_busaddr, 0); /* End: LSIP200113393 */ /* * Get information needed for sizing the contiguous memory for the * frame pool. Size down the sgl parameter since we know that * we will never need more than what's required for MAXPHYS. * It would be nice if these constants were available at runtime * instead of compile time. */ status = sc->mfi_read_fw_status(sc); max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK; if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) { device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n", max_fw_cmds, mfi_max_cmds); sc->mfi_max_fw_cmds = mfi_max_cmds; } else { sc->mfi_max_fw_cmds = max_fw_cmds; } max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16; sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1)); /* ThunderBolt Support get the contiguous memory */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { mfi_tbolt_init_globals(sc); device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, " "MaxSgl = %d, state = %#x\n", max_fw_cmds, sc->mfi_max_fw_cmds, sc->mfi_max_sge, status); tb_mem_size = mfi_tbolt_get_memory_requirement(sc); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tb_mem_size, /* maxsize */ 1, /* msegments */ tb_mem_size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool, BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->request_message_pool, tb_mem_size); bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap, sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0); /* For ThunderBolt memory init */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 0x100, 0, /* alignmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MFI_FRAME_SIZE, /* maxsize */ 1, /* msegments */ MFI_FRAME_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_init_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init, BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate init memory\n"); return (ENOMEM); } bzero(sc->mfi_tb_init, MFI_FRAME_SIZE); bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap, sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb, &sc->mfi_tb_init_busaddr, 0); if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool, tb_mem_size)) { device_printf(sc->mfi_dev, "Thunderbolt pool preparation error\n"); return 0; } /* Allocate DMA memory mapping for MPI2 IOC Init descriptor, we are taking it diffrent from what we have allocated for Request and reply descriptors to avoid confusion later */ tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tb_mem_size, /* maxsize */ 1, /* msegments */ tb_mem_size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_tb_ioc_init_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat, (void **)&sc->mfi_tb_ioc_init_desc, BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size); bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap, sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_ioc_init_busaddr, 0); } /* * Create the dma tag for data buffers. Used both for block I/O * and for various internal data queries. */ if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ sc->mfi_max_sge, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mfi_io_lock, /* lockfuncarg */ &sc->mfi_buffer_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n"); return (ENOMEM); } /* * Allocate DMA memory for the comms queues. Keep it under 4GB for * efficiency. The mfi_hwcomms struct includes space for 1 reply queue * entry, so the calculated size here will be will be 1 more than * mfi_max_fw_cmds. This is apparently a requirement of the hardware. */ commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) + sizeof(struct mfi_hwcomms); if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ commsz, /* maxsize */ 1, /* msegments */ commsz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_comms_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms, BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate comms memory\n"); return (ENOMEM); } bzero(sc->mfi_comms, commsz); bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap, sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0); /* * Allocate DMA memory for the command frames. Keep them in the * lower 4GB for efficiency. Calculate the size of the commands at * the same time; each command is one 64 byte frame plus a set of * additional frames for holding sg lists or other data. * The assumption here is that the SG list will start at the second * frame and not use the unused bytes in the first frame. While this * isn't technically correct, it simplifies the calculation and allows * for command frames that might be larger than an mfi_io_frame. */ if (sizeof(bus_addr_t) == 8) { sc->mfi_sge_size = sizeof(struct mfi_sg64); sc->mfi_flags |= MFI_FLAGS_SG64; } else { sc->mfi_sge_size = sizeof(struct mfi_sg32); } if (sc->mfi_flags & MFI_FLAGS_SKINNY) sc->mfi_sge_size = sizeof(struct mfi_sg_skinny); frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2; sc->mfi_cmd_size = frames * MFI_FRAME_SIZE; framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds; if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 64, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ framessz, /* maxsize */ 1, /* nsegments */ framessz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_frames_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames, BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate frames memory\n"); return (ENOMEM); } bzero(sc->mfi_frames, framessz); bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap, sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0); /* * Allocate DMA memory for the frame sense data. Keep them in the * lower 4GB for efficiency */ sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN; if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sensesz, /* maxsize */ 1, /* nsegments */ sensesz, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_sense_dmat)) { device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense, BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) { device_printf(sc->mfi_dev, "Cannot allocate sense memory\n"); return (ENOMEM); } bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap, sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0); if ((error = mfi_alloc_commands(sc)) != 0) return (error); /* Before moving the FW to operational state, check whether * hostmemory is required by the FW or not */ /* ThunderBolt MFI_IOC2 INIT */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { sc->mfi_disable_intr(sc); mtx_lock(&sc->mfi_io_lock); if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) { device_printf(sc->mfi_dev, "TB Init has failed with error %d\n",error); mtx_unlock(&sc->mfi_io_lock); return error; } mtx_unlock(&sc->mfi_io_lock); if ((error = mfi_tbolt_alloc_cmd(sc)) != 0) return error; if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc, &sc->mfi_intr)) { device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); return (EINVAL); } sc->mfi_intr_ptr = mfi_intr_tbolt; sc->mfi_enable_intr(sc); } else { if ((error = mfi_comms_init(sc)) != 0) return (error); if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) { device_printf(sc->mfi_dev, "Cannot set up interrupt\n"); return (EINVAL); } sc->mfi_intr_ptr = mfi_intr; sc->mfi_enable_intr(sc); } if ((error = mfi_get_controller_info(sc)) != 0) return (error); sc->disableOnlineCtrlReset = 0; /* Register a config hook to probe the bus for arrays */ sc->mfi_ich.ich_func = mfi_startup; sc->mfi_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mfi_ich) != 0) { device_printf(sc->mfi_dev, "Cannot establish configuration " "hook\n"); return (EINVAL); } mtx_lock(&sc->mfi_io_lock); if ((error = mfi_aen_setup(sc, 0), 0) != 0) { mtx_unlock(&sc->mfi_io_lock); return (error); } mtx_unlock(&sc->mfi_io_lock); /* * Register a shutdown handler. */ if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown, sc, SHUTDOWN_PRI_DEFAULT)) == NULL) { device_printf(sc->mfi_dev, "Warning: shutdown event " "registration failed\n"); } /* * Create the control device for doing management */ unit = device_get_unit(sc->mfi_dev); sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "mfi%d", unit); if (unit == 0) make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t, sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node"); if (sc->mfi_cdev != NULL) sc->mfi_cdev->si_drv1 = sc; SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), OID_AUTO, "delete_busy_volumes", CTLFLAG_RW, &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes"); SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)), OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW, &sc->mfi_keep_deleted_volumes, 0, "Don't detach the mfid device for a busy volume that is deleted"); device_add_child(sc->mfi_dev, "mfip", -1); bus_generic_attach(sc->mfi_dev); /* Start the timeout watchdog */ callout_init(&sc->mfi_watchdog_callout, 1); callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); if (sc->mfi_flags & MFI_FLAGS_TBOLT) { mtx_lock(&sc->mfi_io_lock); mfi_tbolt_sync_map_info(sc); mtx_unlock(&sc->mfi_io_lock); } return (0); } static int mfi_alloc_commands(struct mfi_softc *sc) { struct mfi_command *cm; int i, j; /* * XXX Should we allocate all the commands up front, or allocate on * demand later like 'aac' does? */ sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) * sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->mfi_max_fw_cmds; i++) { cm = &sc->mfi_commands[i]; cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames + sc->mfi_cmd_size * i); cm->cm_frame_busaddr = sc->mfi_frames_busaddr + sc->mfi_cmd_size * i; cm->cm_frame->header.context = i; cm->cm_sense = &sc->mfi_sense[i]; cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i; cm->cm_sc = sc; cm->cm_index = i; if (bus_dmamap_create(sc->mfi_buffer_dmat, 0, &cm->cm_dmamap) == 0) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } else { device_printf(sc->mfi_dev, "Failed to allocate %d " "command blocks, only allocated %d\n", sc->mfi_max_fw_cmds, i - 1); for (j = 0; j < i; j++) { cm = &sc->mfi_commands[i]; bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); } free(sc->mfi_commands, M_MFIBUF); sc->mfi_commands = NULL; return (ENOMEM); } } return (0); } void mfi_release_command(struct mfi_command *cm) { struct mfi_frame_header *hdr; uint32_t *hdr_data; mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED); /* * Zero out the important fields of the frame, but make sure the * context field is preserved. For efficiency, handle the fields * as 32 bit words. Clear out the first S/G entry too for safety. */ hdr = &cm->cm_frame->header; if (cm->cm_data != NULL && hdr->sg_count) { cm->cm_sg->sg32[0].len = 0; cm->cm_sg->sg32[0].addr = 0; } /* * Command may be on other queues e.g. busy queue depending on the * flow of a previous call to mfi_mapcmd, so ensure its dequeued * properly */ if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) mfi_remove_busy(cm); if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0) mfi_remove_ready(cm); /* We're not expecting it to be on any other queue but check */ if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) { panic("Command %p is still on another queue, flags = %#x", cm, cm->cm_flags); } /* tbolt cleanup */ if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) { mfi_tbolt_return_cmd(cm->cm_sc, cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1], cm); } hdr_data = (uint32_t *)cm->cm_frame; hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */ hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */ hdr_data[4] = 0; /* flags, timeout */ hdr_data[5] = 0; /* data_len */ cm->cm_extra_frames = 0; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_data = NULL; cm->cm_sg = 0; cm->cm_total_frame_size = 0; cm->retry_for_fw_reset = 0; mfi_enqueue_free(cm); } int mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode, void **bufp, size_t bufsize) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; void *buf = NULL; uint32_t context = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); cm = mfi_dequeue_free(sc); if (cm == NULL) return (EBUSY); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO); if (buf == NULL) { mfi_release_command(cm); return (ENOMEM); } *bufp = buf; } else { buf = *bufp; } } dcmd = &cm->cm_frame->dcmd; bzero(dcmd->mbox, MFI_MBOX_SIZE); dcmd->header.cmd = MFI_CMD_DCMD; dcmd->header.timeout = 0; dcmd->header.flags = 0; dcmd->header.data_len = bufsize; dcmd->header.scsi_status = 0; dcmd->opcode = opcode; cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_flags = 0; cm->cm_data = buf; cm->cm_private = buf; cm->cm_len = bufsize; *cmp = cm; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; return (0); } static int mfi_comms_init(struct mfi_softc *sc) { struct mfi_command *cm; struct mfi_init_frame *init; struct mfi_init_qinfo *qinfo; int error; uint32_t context = 0; mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; /* * Abuse the SG list area of the frame to hold the init_qinfo * object; */ init = &cm->cm_frame->init; qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE); bzero(qinfo, sizeof(struct mfi_init_qinfo)); qinfo->rq_entries = sc->mfi_max_fw_cmds + 1; qinfo->rq_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_reply_q); qinfo->pi_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_pi); qinfo->ci_addr_lo = sc->mfi_comms_busaddr + offsetof(struct mfi_hwcomms, hw_ci); init->header.cmd = MFI_CMD_INIT; init->header.data_len = sizeof(struct mfi_init_qinfo); init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE; cm->cm_data = NULL; cm->cm_flags = MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed to send init command\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_get_controller_info(struct mfi_softc *sc) { struct mfi_command *cm = NULL; struct mfi_ctrl_info *ci = NULL; uint32_t max_sectors_1, max_sectors_2; int error; mtx_lock(&sc->mfi_io_lock); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO, (void **)&ci, sizeof(*ci)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get controller info\n"); sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE / MFI_SECTOR_LEN; error = 0; goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io; max_sectors_2 = ci->max_request_size; sc->mfi_max_io = min(max_sectors_1, max_sectors_2); sc->disableOnlineCtrlReset = ci->properties.OnOffProperties.disableOnlineCtrlReset; out: if (ci) free(ci, M_MFIBUF); if (cm) mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state) { struct mfi_command *cm = NULL; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO, (void **)log_state, sizeof(**log_state)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get log state\n"); goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); out: if (cm) mfi_release_command(cm); return (error); } int mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start) { struct mfi_evt_log_state *log_state = NULL; union mfi_evt class_locale; int error = 0; uint32_t seq; mtx_assert(&sc->mfi_io_lock, MA_OWNED); class_locale.members.reserved = 0; class_locale.members.locale = mfi_event_locale; class_locale.members.evt_class = mfi_event_class; if (seq_start == 0) { if ((error = mfi_get_log_state(sc, &log_state)) != 0) goto out; sc->mfi_boot_seq_num = log_state->boot_seq_num; /* * Walk through any events that fired since the last * shutdown. */ if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num, log_state->newest_seq_num)) != 0) goto out; seq = log_state->newest_seq_num; } else seq = seq_start; error = mfi_aen_register(sc, seq, class_locale.word); out: free(log_state, M_MFIBUF); return (error); } int mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm) { mtx_assert(&sc->mfi_io_lock, MA_OWNED); cm->cm_complete = NULL; /* * MegaCli can issue a DCMD of 0. In this case do nothing * and return 0 to it as status */ if (cm->cm_frame->dcmd.opcode == 0) { cm->cm_frame->header.cmd_status = MFI_STAT_OK; cm->cm_error = 0; return (cm->cm_error); } mfi_enqueue_ready(cm); mfi_startio(sc); if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0) msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0); return (cm->cm_error); } void mfi_free(struct mfi_softc *sc) { struct mfi_command *cm; int i; callout_drain(&sc->mfi_watchdog_callout); if (sc->mfi_cdev != NULL) destroy_dev(sc->mfi_cdev); if (sc->mfi_commands != NULL) { for (i = 0; i < sc->mfi_max_fw_cmds; i++) { cm = &sc->mfi_commands[i]; bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap); } free(sc->mfi_commands, M_MFIBUF); sc->mfi_commands = NULL; } if (sc->mfi_intr) bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr); if (sc->mfi_irq != NULL) bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid, sc->mfi_irq); if (sc->mfi_sense_busaddr != 0) bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap); if (sc->mfi_sense != NULL) bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense, sc->mfi_sense_dmamap); if (sc->mfi_sense_dmat != NULL) bus_dma_tag_destroy(sc->mfi_sense_dmat); if (sc->mfi_frames_busaddr != 0) bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap); if (sc->mfi_frames != NULL) bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames, sc->mfi_frames_dmamap); if (sc->mfi_frames_dmat != NULL) bus_dma_tag_destroy(sc->mfi_frames_dmat); if (sc->mfi_comms_busaddr != 0) bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap); if (sc->mfi_comms != NULL) bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms, sc->mfi_comms_dmamap); if (sc->mfi_comms_dmat != NULL) bus_dma_tag_destroy(sc->mfi_comms_dmat); /* ThunderBolt contiguous memory free here */ if (sc->mfi_flags & MFI_FLAGS_TBOLT) { if (sc->mfi_tb_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap); if (sc->request_message_pool != NULL) bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool, sc->mfi_tb_dmamap); if (sc->mfi_tb_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_dmat); /* Version buffer memory free */ /* Start LSIP200113393 */ if (sc->verbuf_h_busaddr != 0) bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap); if (sc->verbuf != NULL) bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf, sc->verbuf_h_dmamap); if (sc->verbuf_h_dmat != NULL) bus_dma_tag_destroy(sc->verbuf_h_dmat); /* End LSIP200113393 */ /* ThunderBolt INIT packet memory Free */ if (sc->mfi_tb_init_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap); if (sc->mfi_tb_init != NULL) bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init, sc->mfi_tb_init_dmamap); if (sc->mfi_tb_init_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_init_dmat); /* ThunderBolt IOC Init Desc memory free here */ if (sc->mfi_tb_ioc_init_busaddr != 0) bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap); if (sc->mfi_tb_ioc_init_desc != NULL) bus_dmamem_free(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_desc, sc->mfi_tb_ioc_init_dmamap); if (sc->mfi_tb_ioc_init_dmat != NULL) bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat); if (sc->mfi_cmd_pool_tbolt != NULL) { for (int i = 0; i < sc->mfi_max_fw_cmds; i++) { if (sc->mfi_cmd_pool_tbolt[i] != NULL) { free(sc->mfi_cmd_pool_tbolt[i], M_MFIBUF); sc->mfi_cmd_pool_tbolt[i] = NULL; } } free(sc->mfi_cmd_pool_tbolt, M_MFIBUF); sc->mfi_cmd_pool_tbolt = NULL; } if (sc->request_desc_pool != NULL) { free(sc->request_desc_pool, M_MFIBUF); sc->request_desc_pool = NULL; } } if (sc->mfi_buffer_dmat != NULL) bus_dma_tag_destroy(sc->mfi_buffer_dmat); if (sc->mfi_parent_dmat != NULL) bus_dma_tag_destroy(sc->mfi_parent_dmat); if (mtx_initialized(&sc->mfi_io_lock)) { mtx_destroy(&sc->mfi_io_lock); sx_destroy(&sc->mfi_config_lock); } return; } static void mfi_startup(void *arg) { struct mfi_softc *sc; sc = (struct mfi_softc *)arg; config_intrhook_disestablish(&sc->mfi_ich); sc->mfi_enable_intr(sc); sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_ldprobe(sc); if (sc->mfi_flags & MFI_FLAGS_SKINNY) mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); } static void mfi_intr(void *arg) { struct mfi_softc *sc; struct mfi_command *cm; uint32_t pi, ci, context; sc = (struct mfi_softc *)arg; if (sc->mfi_check_clear_intr(sc)) return; restart: pi = sc->mfi_comms->hw_pi; ci = sc->mfi_comms->hw_ci; mtx_lock(&sc->mfi_io_lock); while (ci != pi) { context = sc->mfi_comms->hw_reply_q[ci]; if (context < sc->mfi_max_fw_cmds) { cm = &sc->mfi_commands[context]; mfi_remove_busy(cm); cm->cm_error = 0; mfi_complete(sc, cm); } if (++ci == (sc->mfi_max_fw_cmds + 1)) ci = 0; } sc->mfi_comms->hw_ci = ci; /* Give defered I/O a chance to run */ sc->mfi_flags &= ~MFI_FLAGS_QFRZN; mfi_startio(sc); mtx_unlock(&sc->mfi_io_lock); /* * Dummy read to flush the bus; this ensures that the indexes are up * to date. Restart processing if more commands have come it. */ (void)sc->mfi_read_fw_status(sc); if (pi != sc->mfi_comms->hw_pi) goto restart; return; } int mfi_shutdown(struct mfi_softc *sc) { struct mfi_dcmd_frame *dcmd; struct mfi_command *cm; int error; if (sc->mfi_aen_cm != NULL) { sc->cm_aen_abort = 1; mfi_abort(sc, &sc->mfi_aen_cm); } if (sc->mfi_map_sync_cm != NULL) { sc->cm_map_abort = 1; mfi_abort(sc, &sc->mfi_map_sync_cm); } mtx_lock(&sc->mfi_io_lock); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0); if (error) { mtx_unlock(&sc->mfi_io_lock); return (error); } dcmd = &cm->cm_frame->dcmd; dcmd->header.flags = MFI_FRAME_DIR_NONE; cm->cm_flags = MFI_CMD_POLLED; cm->cm_data = NULL; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "Failed to shutdown controller\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); return (error); } static void mfi_syspdprobe(struct mfi_softc *sc) { struct mfi_frame_header *hdr; struct mfi_command *cm = NULL; struct mfi_pd_list *pdlist = NULL; struct mfi_system_pd *syspd, *tmp; struct mfi_system_pending *syspd_pend; int error, i, found; sx_assert(&sc->mfi_config_lock, SA_XLOCKED); mtx_assert(&sc->mfi_io_lock, MA_OWNED); /* Add SYSTEM PD's */ error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY, (void **)&pdlist, sizeof(*pdlist)); if (error) { device_printf(sc->mfi_dev, "Error while forming SYSTEM PD list\n"); goto out; } cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; cm->cm_frame->dcmd.mbox[1] = 0; if (mfi_mapcmd(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get syspd device listing\n"); goto out; } bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); hdr = &cm->cm_frame->header; if (hdr->cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status); goto out; } /* Get each PD and add it to the system */ for (i = 0; i < pdlist->count; i++) { if (pdlist->addr[i].device_id == pdlist->addr[i].encl_device_id) continue; found = 0; TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == pdlist->addr[i].device_id) found = 1; } TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) { if (syspd_pend->pd_id == pdlist->addr[i].device_id) found = 1; } if (found == 0) mfi_add_sys_pd(sc, pdlist->addr[i].device_id); } /* Delete SYSPD's whose state has been changed */ TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) { found = 0; for (i = 0; i < pdlist->count; i++) { if (syspd->pd_id == pdlist->addr[i].device_id) { found = 1; break; } } if (found == 0) { printf("DELETE\n"); mtx_unlock(&sc->mfi_io_lock); mtx_lock(&Giant); device_delete_child(sc->mfi_dev, syspd->pd_dev); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); } } out: if (pdlist) free(pdlist, M_MFIBUF); if (cm) mfi_release_command(cm); return; } static void mfi_ldprobe(struct mfi_softc *sc) { struct mfi_frame_header *hdr; struct mfi_command *cm = NULL; struct mfi_ld_list *list = NULL; struct mfi_disk *ld; struct mfi_disk_pending *ld_pend; int error, i; sx_assert(&sc->mfi_config_lock, SA_XLOCKED); mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST, (void **)&list, sizeof(*list)); if (error) goto out; cm->cm_flags = MFI_CMD_DATAIN; if (mfi_wait_command(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get device listing\n"); goto out; } hdr = &cm->cm_frame->header; if (hdr->cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n", hdr->cmd_status); goto out; } for (i = 0; i < list->ld_count; i++) { TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == list->ld_list[i].ld.v.target_id) goto skip_add; } TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) { if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id) goto skip_add; } mfi_add_ld(sc, list->ld_list[i].ld.v.target_id); skip_add:; } out: if (list) free(list, M_MFIBUF); if (cm) mfi_release_command(cm); return; } /* * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If * the bits in 24-31 are all set, then it is the number of seconds since * boot. */ static const char * format_timestamp(uint32_t timestamp) { static char buffer[32]; if ((timestamp & 0xff000000) == 0xff000000) snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 0x00ffffff); else snprintf(buffer, sizeof(buffer), "%us", timestamp); return (buffer); } static const char * format_class(int8_t class) { static char buffer[6]; switch (class) { case MFI_EVT_CLASS_DEBUG: return ("debug"); case MFI_EVT_CLASS_PROGRESS: return ("progress"); case MFI_EVT_CLASS_INFO: return ("info"); case MFI_EVT_CLASS_WARNING: return ("WARN"); case MFI_EVT_CLASS_CRITICAL: return ("CRIT"); case MFI_EVT_CLASS_FATAL: return ("FATAL"); case MFI_EVT_CLASS_DEAD: return ("DEAD"); default: snprintf(buffer, sizeof(buffer), "%d", class); return (buffer); } } static void mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) { struct mfi_system_pd *syspd = NULL; device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq, format_timestamp(detail->time), detail->evt_class.members.locale, format_class(detail->evt_class.members.evt_class), detail->description); /* Don't act on old AEN's or while shutting down */ if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching) return; switch (detail->arg_type) { case MR_EVT_ARGS_NONE: if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) { device_printf(sc->mfi_dev, "HostBus scan raised\n"); if (mfi_detect_jbod_change) { /* * Probe for new SYSPD's and Delete * invalid SYSPD's */ sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); } } break; case MR_EVT_ARGS_LD_STATE: /* During load time driver reads all the events starting * from the one that has been logged after shutdown. Avoid * these old events. */ if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) { /* Remove the LD */ struct mfi_disk *ld; TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == detail->args.ld_state.ld.target_id) break; } /* Fix: for kernel panics when SSCD is removed KASSERT(ld != NULL, ("volume dissappeared")); */ if (ld != NULL) { mtx_lock(&Giant); device_delete_child(sc->mfi_dev, ld->ld_dev); mtx_unlock(&Giant); } } break; case MR_EVT_ARGS_PD: if (detail->code == MR_EVT_PD_REMOVED) { if (mfi_detect_jbod_change) { /* * If the removed device is a SYSPD then * delete it */ TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == detail->args.pd.device_id) { mtx_lock(&Giant); device_delete_child( sc->mfi_dev, syspd->pd_dev); mtx_unlock(&Giant); break; } } } } if (detail->code == MR_EVT_PD_INSERTED) { if (mfi_detect_jbod_change) { /* Probe for new SYSPD's */ sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); mfi_syspdprobe(sc); mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); } } if (sc->mfi_cam_rescan_cb != NULL && (detail->code == MR_EVT_PD_INSERTED || detail->code == MR_EVT_PD_REMOVED)) { sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id); } break; } } static void mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail) { struct mfi_evt_queue_elm *elm; mtx_assert(&sc->mfi_io_lock, MA_OWNED); elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO); if (elm == NULL) return; memcpy(&elm->detail, detail, sizeof(*detail)); TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link); taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task); } static void mfi_handle_evt(void *context, int pending) { TAILQ_HEAD(,mfi_evt_queue_elm) queue; struct mfi_softc *sc; struct mfi_evt_queue_elm *elm; sc = context; TAILQ_INIT(&queue); mtx_lock(&sc->mfi_io_lock); TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link); mtx_unlock(&sc->mfi_io_lock); while ((elm = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, elm, link); mfi_decode_evt(sc, &elm->detail); free(elm, M_MFIBUF); } } static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; union mfi_evt current_aen, prior_aen; struct mfi_evt_detail *ed = NULL; int error = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); current_aen.word = locale; if (sc->mfi_aen_cm != NULL) { prior_aen.word = ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1]; if (prior_aen.members.evt_class <= current_aen.members.evt_class && !((prior_aen.members.locale & current_aen.members.locale) ^current_aen.members.locale)) { return (0); } else { prior_aen.members.locale |= current_aen.members.locale; if (prior_aen.members.evt_class < current_aen.members.evt_class) current_aen.members.evt_class = prior_aen.members.evt_class; mfi_abort(sc, &sc->mfi_aen_cm); } } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT, (void **)&ed, sizeof(*ed)); if (error) goto out; dcmd = &cm->cm_frame->dcmd; ((uint32_t *)&dcmd->mbox)[0] = seq; ((uint32_t *)&dcmd->mbox)[1] = locale; cm->cm_flags = MFI_CMD_DATAIN; cm->cm_complete = mfi_aen_complete; sc->last_seq_num = seq; sc->mfi_aen_cm = cm; mfi_enqueue_ready(cm); mfi_startio(sc); out: return (error); } static void mfi_aen_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_softc *sc; struct mfi_evt_detail *detail; struct mfi_aen *mfi_aen_entry, *tmp; int seq = 0, aborted = 0; sc = cm->cm_sc; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if (sc->mfi_aen_cm == NULL) return; hdr = &cm->cm_frame->header; if (sc->cm_aen_abort || hdr->cmd_status == MFI_STAT_INVALID_STATUS) { sc->cm_aen_abort = 0; aborted = 1; } else { sc->mfi_aen_triggered = 1; if (sc->mfi_poll_waiting) { sc->mfi_poll_waiting = 0; selwakeup(&sc->mfi_select); } detail = cm->cm_data; mfi_queue_evt(sc, detail); seq = detail->seq + 1; TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); PROC_LOCK(mfi_aen_entry->p); kern_psignal(mfi_aen_entry->p, SIGIO); PROC_UNLOCK(mfi_aen_entry->p); free(mfi_aen_entry, M_MFIBUF); } } free(cm->cm_data, M_MFIBUF); wakeup(&sc->mfi_aen_cm); sc->mfi_aen_cm = NULL; mfi_release_command(cm); /* set it up again so the driver can catch more events */ if (!aborted) mfi_aen_setup(sc, seq); } #define MAX_EVENTS 15 static int mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; struct mfi_evt_list *el; union mfi_evt class_locale; int error, i, seq, size; mtx_assert(&sc->mfi_io_lock, MA_OWNED); class_locale.members.reserved = 0; class_locale.members.locale = mfi_event_locale; class_locale.members.evt_class = mfi_event_class; size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail) * (MAX_EVENTS - 1); el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO); if (el == NULL) return (ENOMEM); for (seq = start_seq;;) { if ((cm = mfi_dequeue_free(sc)) == NULL) { free(el, M_MFIBUF); return (EBUSY); } dcmd = &cm->cm_frame->dcmd; bzero(dcmd->mbox, MFI_MBOX_SIZE); dcmd->header.cmd = MFI_CMD_DCMD; dcmd->header.timeout = 0; dcmd->header.data_len = size; dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET; ((uint32_t *)&dcmd->mbox)[0] = seq; ((uint32_t *)&dcmd->mbox)[1] = class_locale.word; cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; cm->cm_data = el; cm->cm_len = size; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get controller entries\n"); mfi_release_command(cm); break; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) { mfi_release_command(cm); break; } if (dcmd->header.cmd_status != MFI_STAT_OK) { device_printf(sc->mfi_dev, "Error %d fetching controller entries\n", dcmd->header.cmd_status); mfi_release_command(cm); error = EIO; break; } mfi_release_command(cm); for (i = 0; i < el->count; i++) { /* * If this event is newer than 'stop_seq' then * break out of the loop. Note that the log * is a circular buffer so we have to handle * the case that our stop point is earlier in * the buffer than our start point. */ if (el->event[i].seq >= stop_seq) { if (start_seq <= stop_seq) break; else if (el->event[i].seq < start_seq) break; } mfi_queue_evt(sc, &el->event[i]); } seq = el->event[el->count - 1].seq + 1; } free(el, M_MFIBUF); return (error); } static int mfi_add_ld(struct mfi_softc *sc, int id) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd = NULL; struct mfi_ld_info *ld_info = NULL; struct mfi_disk_pending *ld_pend; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO); if (ld_pend != NULL) { ld_pend->ld_id = id; TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link); } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO, (void **)&ld_info, sizeof(*ld_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error); if (ld_info) free(ld_info, M_MFIBUF); return (error); } cm->cm_flags = MFI_CMD_DATAIN; dcmd = &cm->cm_frame->dcmd; dcmd->mbox[0] = id; if (mfi_wait_command(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to get logical drive: %d\n", id); free(ld_info, M_MFIBUF); return (0); } if (ld_info->ld_config.params.isSSCD != 1) mfi_add_ld_complete(cm); else { mfi_release_command(cm); if (ld_info) /* SSCD drives ld_info free here */ free(ld_info, M_MFIBUF); } return (0); } static void mfi_add_ld_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_ld_info *ld_info; struct mfi_softc *sc; device_t child; sc = cm->cm_sc; hdr = &cm->cm_frame->header; ld_info = cm->cm_private; if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) { free(ld_info, M_MFIBUF); wakeup(&sc->mfi_map_sync_cm); mfi_release_command(cm); return; } wakeup(&sc->mfi_map_sync_cm); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); mtx_lock(&Giant); if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) { device_printf(sc->mfi_dev, "Failed to add logical disk\n"); free(ld_info, M_MFIBUF); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); return; } device_set_ivars(child, ld_info); device_set_desc(child, "MFI Logical Disk"); bus_generic_attach(sc->mfi_dev); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); } static int mfi_add_sys_pd(struct mfi_softc *sc, int id) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd = NULL; struct mfi_pd_info *pd_info = NULL; struct mfi_system_pending *syspd_pend; int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO); if (syspd_pend != NULL) { syspd_pend->pd_id = id; TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link); } error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO, (void **)&pd_info, sizeof(*pd_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n", error); if (pd_info) free(pd_info, M_MFIBUF); return (error); } cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED; dcmd = &cm->cm_frame->dcmd; dcmd->mbox[0]=id; dcmd->header.scsi_status = 0; dcmd->header.pad0 = 0; if ((error = mfi_mapcmd(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Failed to get physical drive info %d\n", id); free(pd_info, M_MFIBUF); mfi_release_command(cm); return (error); } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_add_sys_pd_complete(cm); return (0); } static void mfi_add_sys_pd_complete(struct mfi_command *cm) { struct mfi_frame_header *hdr; struct mfi_pd_info *pd_info; struct mfi_softc *sc; device_t child; sc = cm->cm_sc; hdr = &cm->cm_frame->header; pd_info = cm->cm_private; if (hdr->cmd_status != MFI_STAT_OK) { free(pd_info, M_MFIBUF); mfi_release_command(cm); return; } if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) { device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n", pd_info->ref.v.device_id); free(pd_info, M_MFIBUF); mfi_release_command(cm); return; } mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); mtx_lock(&Giant); if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) { device_printf(sc->mfi_dev, "Failed to add system pd\n"); free(pd_info, M_MFIBUF); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); return; } device_set_ivars(child, pd_info); device_set_desc(child, "MFI System PD"); bus_generic_attach(sc->mfi_dev); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); } static struct mfi_command * mfi_bio_command(struct mfi_softc *sc) { struct bio *bio; struct mfi_command *cm = NULL; /*reserving two commands to avoid starvation for IOCTL*/ if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) { return (NULL); } if ((bio = mfi_dequeue_bio(sc)) == NULL) { return (NULL); } if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) { cm = mfi_build_ldio(sc, bio); } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) { cm = mfi_build_syspdio(sc, bio); } if (!cm) mfi_enqueue_bio(sc, bio); return cm; } /* * mostly copied from cam/scsi/scsi_all.c:scsi_read_write */ int mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb) { int cdb_len; if (((lba & 0x1fffff) == lba) && ((block_count & 0xff) == block_count) && (byte2 == 0)) { /* We can fit in a 6 byte cdb */ struct scsi_rw_6 *scsi_cmd; scsi_cmd = (struct scsi_rw_6 *)cdb; scsi_cmd->opcode = readop ? READ_6 : WRITE_6; scsi_ulto3b(lba, scsi_cmd->addr); scsi_cmd->length = block_count & 0xff; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* Need a 10 byte CDB */ struct scsi_rw_10 *scsi_cmd; scsi_cmd = (struct scsi_rw_10 *)cdb; scsi_cmd->opcode = readop ? READ_10 : WRITE_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else if (((block_count & 0xffffffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* Block count is too big for 10 byte CDB use a 12 byte CDB */ struct scsi_rw_12 *scsi_cmd; scsi_cmd = (struct scsi_rw_12 *)cdb; scsi_cmd->opcode = readop ? READ_12 : WRITE_12; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32 */ struct scsi_rw_16 *scsi_cmd; scsi_cmd = (struct scsi_rw_16 *)cdb; scsi_cmd->opcode = readop ? READ_16 : WRITE_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } return cdb_len; } extern char *unmapped_buf; static struct mfi_command * mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio) { struct mfi_command *cm; struct mfi_pass_frame *pass; uint32_t context = 0; int flags = 0, blkcount = 0, readop; uint8_t cdb_len; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm = mfi_dequeue_free(sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; pass = &cm->cm_frame->pass; bzero(pass->cdb, 16); pass->header.cmd = MFI_CMD_PD_SCSI_IO; switch (bio->bio_cmd) { case BIO_READ: flags = MFI_CMD_DATAIN | MFI_CMD_BIO; readop = 1; break; case BIO_WRITE: flags = MFI_CMD_DATAOUT | MFI_CMD_BIO; readop = 0; break; default: /* TODO: what about BIO_DELETE??? */ panic("Unsupported bio command %x\n", bio->bio_cmd); } /* Cheat with the sector length to avoid a non-constant division */ - blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN); /* Fill the LBA and Transfer length in CDB */ cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount, pass->cdb); pass->header.target_id = (uintptr_t)bio->bio_driver1; pass->header.lun_id = 0; pass->header.timeout = 0; pass->header.flags = 0; pass->header.scsi_status = 0; pass->header.sense_len = MFI_SENSE_LEN; pass->header.data_len = bio->bio_bcount; pass->header.cdb_len = cdb_len; pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); cm->cm_complete = mfi_bio_complete; cm->cm_private = bio; cm->cm_data = unmapped_buf; cm->cm_len = bio->bio_bcount; cm->cm_sg = &pass->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_flags = flags; return (cm); } static struct mfi_command * mfi_build_ldio(struct mfi_softc *sc, struct bio *bio) { struct mfi_io_frame *io; struct mfi_command *cm; int flags; uint32_t blkcount; uint32_t context = 0; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm = mfi_dequeue_free(sc)) == NULL) return (NULL); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; io = &cm->cm_frame->io; switch (bio->bio_cmd) { case BIO_READ: io->header.cmd = MFI_CMD_LD_READ; flags = MFI_CMD_DATAIN | MFI_CMD_BIO; break; case BIO_WRITE: io->header.cmd = MFI_CMD_LD_WRITE; flags = MFI_CMD_DATAOUT | MFI_CMD_BIO; break; default: /* TODO: what about BIO_DELETE??? */ panic("Unsupported bio command %x\n", bio->bio_cmd); } /* Cheat with the sector length to avoid a non-constant division */ - blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN); io->header.target_id = (uintptr_t)bio->bio_driver1; io->header.timeout = 0; io->header.flags = 0; io->header.scsi_status = 0; io->header.sense_len = MFI_SENSE_LEN; io->header.data_len = blkcount; io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32; io->lba_lo = bio->bio_pblkno & 0xffffffff; cm->cm_complete = mfi_bio_complete; cm->cm_private = bio; cm->cm_data = unmapped_buf; cm->cm_len = bio->bio_bcount; cm->cm_sg = &io->sgl; cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; cm->cm_flags = flags; return (cm); } static void mfi_bio_complete(struct mfi_command *cm) { struct bio *bio; struct mfi_frame_header *hdr; struct mfi_softc *sc; bio = cm->cm_private; hdr = &cm->cm_frame->header; sc = cm->cm_sc; if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) { bio->bio_flags |= BIO_ERROR; bio->bio_error = EIO; device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, " "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status); mfi_print_sense(cm->cm_sc, cm->cm_sense); } else if (cm->cm_error != 0) { bio->bio_flags |= BIO_ERROR; bio->bio_error = cm->cm_error; device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n", cm, cm->cm_error); } mfi_release_command(cm); mfi_disk_complete(bio); } void mfi_startio(struct mfi_softc *sc) { struct mfi_command *cm; struct ccb_hdr *ccbh; for (;;) { /* Don't bother if we're short on resources */ if (sc->mfi_flags & MFI_FLAGS_QFRZN) break; /* Try a command that has already been prepared */ cm = mfi_dequeue_ready(sc); if (cm == NULL) { if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL) cm = sc->mfi_cam_start(ccbh); } /* Nope, so look for work on the bioq */ if (cm == NULL) cm = mfi_bio_command(sc); /* No work available, so exit */ if (cm == NULL) break; /* Send the command to the controller */ if (mfi_mapcmd(sc, cm) != 0) { device_printf(sc->mfi_dev, "Failed to startio\n"); mfi_requeue_ready(cm); break; } } } int mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm) { int error, polled; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) { polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0; if (cm->cm_flags & MFI_CMD_CCB) error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm, polled); else if (cm->cm_flags & MFI_CMD_BIO) error = bus_dmamap_load_bio(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm, polled); else error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled); if (error == EINPROGRESS) { sc->mfi_flags |= MFI_FLAGS_QFRZN; return (0); } } else { error = mfi_send_frame(sc, cm); } return (error); } static void mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mfi_frame_header *hdr; struct mfi_command *cm; union mfi_sgl *sgl; struct mfi_softc *sc; int i, j, first, dir; int sge_size, locked; cm = (struct mfi_command *)arg; sc = cm->cm_sc; hdr = &cm->cm_frame->header; sgl = cm->cm_sg; /* * We need to check if we have the lock as this is async * callback so even though our caller mfi_mapcmd asserts * it has the lock, there is no garantee that hasn't been * dropped if bus_dmamap_load returned prior to our * completion. */ if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0) mtx_lock(&sc->mfi_io_lock); if (error) { printf("error %d in callback\n", error); cm->cm_error = error; mfi_complete(sc, cm); goto out; } /* Use IEEE sgl only for IO's on a SKINNY controller * For other commands on a SKINNY controller use either * sg32 or sg64 based on the sizeof(bus_addr_t). * Also calculate the total frame size based on the type * of SGL used. */ if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) || (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) || (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) && (sc->mfi_flags & MFI_FLAGS_SKINNY)) { for (i = 0; i < nsegs; i++) { sgl->sg_skinny[i].addr = segs[i].ds_addr; sgl->sg_skinny[i].len = segs[i].ds_len; sgl->sg_skinny[i].flag = 0; } hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; sge_size = sizeof(struct mfi_sg_skinny); hdr->sg_count = nsegs; } else { j = 0; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { first = cm->cm_stp_len; if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { sgl->sg32[j].addr = segs[0].ds_addr; sgl->sg32[j++].len = first; } else { sgl->sg64[j].addr = segs[0].ds_addr; sgl->sg64[j++].len = first; } } else first = 0; if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) { for (i = 0; i < nsegs; i++) { sgl->sg32[j].addr = segs[i].ds_addr + first; sgl->sg32[j++].len = segs[i].ds_len - first; first = 0; } } else { for (i = 0; i < nsegs; i++) { sgl->sg64[j].addr = segs[i].ds_addr + first; sgl->sg64[j++].len = segs[i].ds_len - first; first = 0; } hdr->flags |= MFI_FRAME_SGL64; } hdr->sg_count = j; sge_size = sc->mfi_sge_size; } dir = 0; if (cm->cm_flags & MFI_CMD_DATAIN) { dir |= BUS_DMASYNC_PREREAD; hdr->flags |= MFI_FRAME_DIR_READ; } if (cm->cm_flags & MFI_CMD_DATAOUT) { dir |= BUS_DMASYNC_PREWRITE; hdr->flags |= MFI_FRAME_DIR_WRITE; } bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); cm->cm_flags |= MFI_CMD_MAPPED; /* * Instead of calculating the total number of frames in the * compound frame, it's already assumed that there will be at * least 1 frame, so don't compensate for the modulo of the * following division. */ cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs); cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; if ((error = mfi_send_frame(sc, cm)) != 0) { printf("error %d in callback from mfi_send_frame\n", error); cm->cm_error = error; mfi_complete(sc, cm); goto out; } out: /* leave the lock in the state we found it */ if (locked == 0) mtx_unlock(&sc->mfi_io_lock); return; } static int mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm) { int error; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if (sc->MFA_enabled) error = mfi_tbolt_send_frame(sc, cm); else error = mfi_std_send_frame(sc, cm); if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) mfi_remove_busy(cm); return (error); } static int mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_frame_header *hdr; int tm = mfi_polled_cmd_timeout * 1000; hdr = &cm->cm_frame->header; if ((cm->cm_flags & MFI_CMD_POLLED) == 0) { cm->cm_timestamp = time_uptime; mfi_enqueue_busy(cm); } else { hdr->cmd_status = MFI_STAT_INVALID_STATUS; hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; } /* * The bus address of the command is aligned on a 64 byte boundary, * leaving the least 6 bits as zero. For whatever reason, the * hardware wants the address shifted right by three, leaving just * 3 zero bits. These three bits are then used as a prefetching * hint for the hardware to predict how many frames need to be * fetched across the bus. If a command has more than 8 frames * then the 3 bits are set to 0x7 and the firmware uses other * information in the command to determine the total amount to fetch. * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames * is enough for both 32bit and 64bit systems. */ if (cm->cm_extra_frames > 7) cm->cm_extra_frames = 7; sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames); if ((cm->cm_flags & MFI_CMD_POLLED) == 0) return (0); /* This is a polled command, so busy-wait for it to complete. */ while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { DELAY(1000); tm -= 1; if (tm <= 0) break; } if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) { device_printf(sc->mfi_dev, "Frame %p timed out " "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode); return (ETIMEDOUT); } return (0); } void mfi_complete(struct mfi_softc *sc, struct mfi_command *cm) { int dir; mtx_assert(&sc->mfi_io_lock, MA_OWNED); if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) { dir = 0; if ((cm->cm_flags & MFI_CMD_DATAIN) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) dir |= BUS_DMASYNC_POSTREAD; if (cm->cm_flags & MFI_CMD_DATAOUT) dir |= BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); cm->cm_flags &= ~MFI_CMD_MAPPED; } cm->cm_flags |= MFI_CMD_COMPLETED; if (cm->cm_complete != NULL) cm->cm_complete(cm); else wakeup(cm); } static int mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort) { struct mfi_command *cm; struct mfi_abort_frame *abort; int i = 0, error; uint32_t context = 0; mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; abort = &cm->cm_frame->abort; abort->header.cmd = MFI_CMD_ABORT; abort->header.flags = 0; abort->header.scsi_status = 0; abort->abort_context = (*cm_abort)->cm_frame->header.context; abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr; abort->abort_mfi_addr_hi = (uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32); cm->cm_data = NULL; cm->cm_flags = MFI_CMD_POLLED; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed to abort command\n"); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); while (i < 5 && *cm_abort != NULL) { tsleep(cm_abort, 0, "mfiabort", 5 * hz); i++; } if (*cm_abort != NULL) { /* Force a complete if command didn't abort */ mtx_lock(&sc->mfi_io_lock); (*cm_abort)->cm_complete(*cm_abort); mtx_unlock(&sc->mfi_io_lock); } return (error); } int mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) { struct mfi_command *cm; struct mfi_io_frame *io; int error; uint32_t context = 0; if ((cm = mfi_dequeue_free(sc)) == NULL) return (EBUSY); /* Zero out the MFI frame */ context = cm->cm_frame->header.context; bzero(cm->cm_frame, sizeof(union mfi_frame)); cm->cm_frame->header.context = context; io = &cm->cm_frame->io; io->header.cmd = MFI_CMD_LD_WRITE; io->header.target_id = id; io->header.timeout = 0; io->header.flags = 0; io->header.scsi_status = 0; io->header.sense_len = MFI_SENSE_LEN; - io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + io->header.data_len = howmany(len, MFI_SECTOR_LEN); io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); io->lba_hi = (lba & 0xffffffff00000000) >> 32; io->lba_lo = lba & 0xffffffff; cm->cm_data = virt; cm->cm_len = len; cm->cm_sg = &io->sgl; cm->cm_total_frame_size = MFI_IO_FRAME_SIZE; cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed dump blocks\n"); bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_release_command(cm); return (error); } int mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len) { struct mfi_command *cm; struct mfi_pass_frame *pass; int error, readop, cdb_len; uint32_t blkcount; if ((cm = mfi_dequeue_free(sc)) == NULL) return (EBUSY); pass = &cm->cm_frame->pass; bzero(pass->cdb, 16); pass->header.cmd = MFI_CMD_PD_SCSI_IO; readop = 0; - blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN; + blkcount = howmany(len, MFI_SECTOR_LEN); cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb); pass->header.target_id = id; pass->header.timeout = 0; pass->header.flags = 0; pass->header.scsi_status = 0; pass->header.sense_len = MFI_SENSE_LEN; pass->header.data_len = len; pass->header.cdb_len = cdb_len; pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); cm->cm_data = virt; cm->cm_len = len; cm->cm_sg = &pass->sgl; cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE; cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI; if ((error = mfi_mapcmd(sc, cm)) != 0) device_printf(sc->mfi_dev, "failed dump blocks\n"); bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap); mfi_release_command(cm); return (error); } static int mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mfi_softc *sc; int error; sc = dev->si_drv1; mtx_lock(&sc->mfi_io_lock); if (sc->mfi_detaching) error = ENXIO; else { sc->mfi_flags |= MFI_FLAGS_OPEN; error = 0; } mtx_unlock(&sc->mfi_io_lock); return (error); } static int mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mfi_softc *sc; struct mfi_aen *mfi_aen_entry, *tmp; sc = dev->si_drv1; mtx_lock(&sc->mfi_io_lock); sc->mfi_flags &= ~MFI_FLAGS_OPEN; TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) { if (mfi_aen_entry->p == curproc) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); free(mfi_aen_entry, M_MFIBUF); } } mtx_unlock(&sc->mfi_io_lock); return (0); } static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode) { switch (opcode) { case MFI_DCMD_LD_DELETE: case MFI_DCMD_CFG_ADD: case MFI_DCMD_CFG_CLEAR: case MFI_DCMD_CFG_FOREIGN_IMPORT: sx_xlock(&sc->mfi_config_lock); return (1); default: return (0); } } static void mfi_config_unlock(struct mfi_softc *sc, int locked) { if (locked) sx_xunlock(&sc->mfi_config_lock); } /* * Perform pre-issue checks on commands from userland and possibly veto * them. */ static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_disk *ld, *ld2; int error; struct mfi_system_pd *syspd = NULL; uint16_t syspd_id; uint16_t *mbox; mtx_assert(&sc->mfi_io_lock, MA_OWNED); error = 0; switch (cm->cm_frame->dcmd.opcode) { case MFI_DCMD_LD_DELETE: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) break; } if (ld == NULL) error = ENOENT; else error = mfi_disk_disable(ld); break; case MFI_DCMD_CFG_CLEAR: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { error = mfi_disk_disable(ld); if (error) break; } if (error) { TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) { if (ld2 == ld) break; mfi_disk_enable(ld2); } } break; case MFI_DCMD_PD_STATE_SET: mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; syspd_id = mbox[0]; if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) { if (syspd->pd_id == syspd_id) break; } } else break; if (syspd) error = mfi_syspd_disable(syspd); break; default: break; } return (error); } /* Perform post-issue checks on commands from userland. */ static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_disk *ld, *ldn; struct mfi_system_pd *syspd = NULL; uint16_t syspd_id; uint16_t *mbox; switch (cm->cm_frame->dcmd.opcode) { case MFI_DCMD_LD_DELETE: TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == cm->cm_frame->dcmd.mbox[0]) break; } KASSERT(ld != NULL, ("volume dissappeared")); if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { mtx_unlock(&sc->mfi_io_lock); mtx_lock(&Giant); device_delete_child(sc->mfi_dev, ld->ld_dev); mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); } else mfi_disk_enable(ld); break; case MFI_DCMD_CFG_CLEAR: if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) { mtx_unlock(&sc->mfi_io_lock); mtx_lock(&Giant); TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) { device_delete_child(sc->mfi_dev, ld->ld_dev); } mtx_unlock(&Giant); mtx_lock(&sc->mfi_io_lock); } else { TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) mfi_disk_enable(ld); } break; case MFI_DCMD_CFG_ADD: mfi_ldprobe(sc); break; case MFI_DCMD_CFG_FOREIGN_IMPORT: mfi_ldprobe(sc); break; case MFI_DCMD_PD_STATE_SET: mbox = (uint16_t *) cm->cm_frame->dcmd.mbox; syspd_id = mbox[0]; if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) { TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) { if (syspd->pd_id == syspd_id) break; } } else break; /* If the transition fails then enable the syspd again */ if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK) mfi_syspd_enable(syspd); break; } } static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm) { struct mfi_config_data *conf_data; struct mfi_command *ld_cm = NULL; struct mfi_ld_info *ld_info = NULL; struct mfi_ld_config *ld; char *p; int error = 0; conf_data = (struct mfi_config_data *)cm->cm_data; if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) { p = (char *)conf_data->array; p += conf_data->array_size * conf_data->array_count; ld = (struct mfi_ld_config *)p; if (ld->params.isSSCD == 1) error = 1; } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) { error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO, (void **)&ld_info, sizeof(*ld_info)); if (error) { device_printf(sc->mfi_dev, "Failed to allocate" "MFI_DCMD_LD_GET_INFO %d", error); if (ld_info) free(ld_info, M_MFIBUF); return 0; } ld_cm->cm_flags = MFI_CMD_DATAIN; ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0]; ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0]; if (mfi_wait_command(sc, ld_cm) != 0) { device_printf(sc->mfi_dev, "failed to get log drv\n"); mfi_release_command(ld_cm); free(ld_info, M_MFIBUF); return 0; } if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) { free(ld_info, M_MFIBUF); mfi_release_command(ld_cm); return 0; } else ld_info = (struct mfi_ld_info *)ld_cm->cm_private; if (ld_info->ld_config.params.isSSCD == 1) error = 1; mfi_release_command(ld_cm); free(ld_info, M_MFIBUF); } return error; } static int mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg) { uint8_t i; struct mfi_ioc_packet *ioc; ioc = (struct mfi_ioc_packet *)arg; int sge_size, error; struct megasas_sge *kern_sge; memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr)); kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off); cm->cm_frame->header.sg_count = ioc->mfi_sge_count; if (sizeof(bus_addr_t) == 8) { cm->cm_frame->header.flags |= MFI_FRAME_SGL64; cm->cm_extra_frames = 2; sge_size = sizeof(struct mfi_sg64); } else { cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE; sge_size = sizeof(struct mfi_sg32); } cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count); for (i = 0; i < ioc->mfi_sge_count; i++) { if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ioc->mfi_sgl[i].iov_len,/* maxsize */ 2, /* nsegments */ ioc->mfi_sgl[i].iov_len,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_kbuff_arr_dmat[i])) { device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmat tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i], (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT, &sc->mfi_kbuff_arr_dmamap[i])) { device_printf(sc->mfi_dev, "Cannot allocate mfi_kbuff_arr_dmamap memory\n"); return (ENOMEM); } bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i], ioc->mfi_sgl[i].iov_len, mfi_addr_cb, &sc->mfi_kbuff_arr_busaddr[i], 0); if (!sc->kbuff_arr[i]) { device_printf(sc->mfi_dev, "Could not allocate memory for kbuff_arr info\n"); return -1; } kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i]; kern_sge[i].length = ioc->mfi_sgl[i].iov_len; if (sizeof(bus_addr_t) == 8) { cm->cm_frame->stp.sgl.sg64[i].addr = kern_sge[i].phys_addr; cm->cm_frame->stp.sgl.sg64[i].len = ioc->mfi_sgl[i].iov_len; } else { cm->cm_frame->stp.sgl.sg32[i].addr = kern_sge[i].phys_addr; cm->cm_frame->stp.sgl.sg32[i].len = ioc->mfi_sgl[i].iov_len; } error = copyin(ioc->mfi_sgl[i].iov_base, sc->kbuff_arr[i], ioc->mfi_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); return error; } } cm->cm_flags |=MFI_CMD_MAPPED; return 0; } static int mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc) { struct mfi_command *cm; struct mfi_dcmd_frame *dcmd; void *ioc_buf = NULL; uint32_t context; int error = 0, locked; if (ioc->buf_size > 0) { if (ioc->buf_size > 1024 * 1024) return (ENOMEM); ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK); error = copyin(ioc->buf, ioc_buf, ioc->buf_size); if (error) { device_printf(sc->mfi_dev, "failed to copyin\n"); free(ioc_buf, M_MFIBUF); return (error); } } locked = mfi_config_lock(sc, ioc->ioc_frame.opcode); mtx_lock(&sc->mfi_io_lock); while ((cm = mfi_dequeue_free(sc)) == NULL) msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz); /* Save context for later */ context = cm->cm_frame->header.context; dcmd = &cm->cm_frame->dcmd; bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame)); cm->cm_sg = &dcmd->sgl; cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE; cm->cm_data = ioc_buf; cm->cm_len = ioc->buf_size; /* restore context */ cm->cm_frame->header.context = context; /* Cheat since we don't know if we're writing or reading */ cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT; error = mfi_check_command_pre(sc, cm); if (error) goto out; error = mfi_wait_command(sc, cm); if (error) { device_printf(sc->mfi_dev, "ioctl failed %d\n", error); goto out; } bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame)); mfi_check_command_post(sc, cm); out: mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); mfi_config_unlock(sc, locked); if (ioc->buf_size > 0) error = copyout(ioc_buf, ioc->buf, ioc->buf_size); if (ioc_buf) free(ioc_buf, M_MFIBUF); return (error); } #define PTRIN(p) ((void *)(uintptr_t)(p)) static int mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mfi_softc *sc; union mfi_statrequest *ms; struct mfi_ioc_packet *ioc; #ifdef COMPAT_FREEBSD32 struct mfi_ioc_packet32 *ioc32; #endif struct mfi_ioc_aen *aen; struct mfi_command *cm = NULL; uint32_t context = 0; union mfi_sense_ptr sense_ptr; uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0; size_t len; int i, res; struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg; #ifdef COMPAT_FREEBSD32 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg; struct mfi_ioc_passthru iop_swab; #endif int error, locked; union mfi_sgl *sgl; sc = dev->si_drv1; error = 0; if (sc->adpreset) return EBUSY; if (sc->hw_crit_error) return EBUSY; if (sc->issuepend_done == 0) return EBUSY; switch (cmd) { case MFIIO_STATS: ms = (union mfi_statrequest *)arg; switch (ms->ms_item) { case MFIQ_FREE: case MFIQ_BIO: case MFIQ_READY: case MFIQ_BUSY: bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat, sizeof(struct mfi_qstat)); break; default: error = ENOIOCTL; break; } break; case MFIIO_QUERY_DISK: { struct mfi_query_disk *qd; struct mfi_disk *ld; qd = (struct mfi_query_disk *)arg; mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) { if (ld->ld_id == qd->array_id) break; } if (ld == NULL) { qd->present = 0; mtx_unlock(&sc->mfi_io_lock); return (0); } qd->present = 1; if (ld->ld_flags & MFI_DISK_FLAGS_OPEN) qd->open = 1; bzero(qd->devname, SPECNAMELEN + 1); snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit); mtx_unlock(&sc->mfi_io_lock); break; } case MFI_CMD: #ifdef COMPAT_FREEBSD32 case MFI_CMD32: #endif { devclass_t devclass; ioc = (struct mfi_ioc_packet *)arg; int adapter; adapter = ioc->mfi_adapter_no; if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) { devclass = devclass_find("mfi"); sc = devclass_get_softc(devclass, adapter); } mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } mtx_unlock(&sc->mfi_io_lock); locked = 0; /* * save off original context since copying from user * will clobber some data */ context = cm->cm_frame->header.context; cm->cm_frame->header.context = cm->cm_index; bcopy(ioc->mfi_frame.raw, cm->cm_frame, 2 * MEGAMFI_FRAME_SIZE); cm->cm_total_frame_size = (sizeof(union mfi_sgl) * ioc->mfi_sge_count) + ioc->mfi_sgl_off; cm->cm_frame->header.scsi_status = 0; cm->cm_frame->header.pad0 = 0; if (ioc->mfi_sge_count) { cm->cm_sg = (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off]; } sgl = cm->cm_sg; cm->cm_flags = 0; if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) cm->cm_flags |= MFI_CMD_DATAIN; if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) cm->cm_flags |= MFI_CMD_DATAOUT; /* Legacy app shim */ if (cm->cm_flags == 0) cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT; cm->cm_len = cm->cm_frame->header.data_len; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ cm->cm_stp_len = ioc->mfi_sgl[0].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len; } #endif cm->cm_len += cm->cm_stp_len; } if (cm->cm_len && (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, M_WAITOK | M_ZERO); if (cm->cm_data == NULL) { device_printf(sc->mfi_dev, "Malloc failed\n"); goto out; } } else { cm->cm_data = 0; } /* restore header context */ cm->cm_frame->header.context = context; if (cm->cm_frame->header.cmd == MFI_CMD_STP) { res = mfi_stp_cmd(sc, cm, arg); if (res != 0) goto out; } else { temp = data; if ((cm->cm_flags & MFI_CMD_DATAOUT) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) { for (i = 0; i < ioc->mfi_sge_count; i++) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ addr = ioc->mfi_sgl[i].iov_base; len = ioc->mfi_sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; addr = PTRIN(ioc32->mfi_sgl[i].iov_base); len = ioc32->mfi_sgl[i].iov_len; } #endif error = copyin(addr, temp, len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); goto out; } temp = &temp[len]; } } } if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { cm->cm_frame->pass.sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; cm->cm_frame->pass.sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); } mtx_lock(&sc->mfi_io_lock); skip_pre_post = mfi_check_for_sscd (sc, cm); if (!skip_pre_post) { error = mfi_check_command_pre(sc, cm); if (error) { mtx_unlock(&sc->mfi_io_lock); goto out; } } if ((error = mfi_wait_command(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Controller polled failed\n"); mtx_unlock(&sc->mfi_io_lock); goto out; } if (!skip_pre_post) { mfi_check_command_post(sc, cm); } mtx_unlock(&sc->mfi_io_lock); if (cm->cm_frame->header.cmd != MFI_CMD_STP) { temp = data; if ((cm->cm_flags & MFI_CMD_DATAIN) || (cm->cm_frame->header.cmd == MFI_CMD_STP)) { for (i = 0; i < ioc->mfi_sge_count; i++) { #ifdef COMPAT_FREEBSD32 if (cmd == MFI_CMD) { #endif /* Native */ addr = ioc->mfi_sgl[i].iov_base; len = ioc->mfi_sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { /* 32bit on 64bit */ ioc32 = (struct mfi_ioc_packet32 *)ioc; addr = PTRIN(ioc32->mfi_sgl[i].iov_base); len = ioc32->mfi_sgl[i].iov_len; } #endif error = copyout(temp, addr, len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } temp = &temp[len]; } } } if (ioc->mfi_sense_len) { /* get user-space sense ptr then copy out sense */ bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off], &sense_ptr.sense_ptr_data[0], sizeof(sense_ptr.sense_ptr_data)); #ifdef COMPAT_FREEBSD32 if (cmd != MFI_CMD) { /* * not 64bit native so zero out any address * over 32bit */ sense_ptr.addr.high = 0; } #endif error = copyout(cm->cm_sense, sense_ptr.user_space, ioc->mfi_sense_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } } ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status; out: mfi_config_unlock(sc, locked); if (data) free(data, M_MFIBUF); if (cm->cm_frame->header.cmd == MFI_CMD_STP) { for (i = 0; i < 2; i++) { if (sc->kbuff_arr[i]) { if (sc->mfi_kbuff_arr_busaddr != 0) bus_dmamap_unload( sc->mfi_kbuff_arr_dmat[i], sc->mfi_kbuff_arr_dmamap[i] ); if (sc->kbuff_arr[i] != NULL) bus_dmamem_free( sc->mfi_kbuff_arr_dmat[i], sc->kbuff_arr[i], sc->mfi_kbuff_arr_dmamap[i] ); if (sc->mfi_kbuff_arr_dmat[i] != NULL) bus_dma_tag_destroy( sc->mfi_kbuff_arr_dmat[i]); } } } if (cm) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } break; } case MFI_SET_AEN: aen = (struct mfi_ioc_aen *)arg; mtx_lock(&sc->mfi_io_lock); error = mfi_aen_register(sc, aen->aen_seq_num, aen->aen_class_locale); mtx_unlock(&sc->mfi_io_lock); break; case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ { devclass_t devclass; struct mfi_linux_ioc_packet l_ioc; int adapter; devclass = devclass_find("mfi"); if (devclass == NULL) return (ENOENT); error = copyin(arg, &l_ioc, sizeof(l_ioc)); if (error) return (error); adapter = l_ioc.lioc_adapter_no; sc = devclass_get_softc(devclass, adapter); if (sc == NULL) return (ENOENT); return (mfi_linux_ioctl_int(sc->mfi_cdev, cmd, arg, flag, td)); break; } case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ { devclass_t devclass; struct mfi_linux_ioc_aen l_aen; int adapter; devclass = devclass_find("mfi"); if (devclass == NULL) return (ENOENT); error = copyin(arg, &l_aen, sizeof(l_aen)); if (error) return (error); adapter = l_aen.laen_adapter_no; sc = devclass_get_softc(devclass, adapter); if (sc == NULL) return (ENOENT); return (mfi_linux_ioctl_int(sc->mfi_cdev, cmd, arg, flag, td)); break; } #ifdef COMPAT_FREEBSD32 case MFIIO_PASSTHRU32: if (!SV_CURPROC_FLAG(SV_ILP32)) { error = ENOTTY; break; } iop_swab.ioc_frame = iop32->ioc_frame; iop_swab.buf_size = iop32->buf_size; iop_swab.buf = PTRIN(iop32->buf); iop = &iop_swab; /* FALLTHROUGH */ #endif case MFIIO_PASSTHRU: error = mfi_user_command(sc, iop); #ifdef COMPAT_FREEBSD32 if (cmd == MFIIO_PASSTHRU32) iop32->ioc_frame = iop_swab.ioc_frame; #endif break; default: device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); error = ENOTTY; break; } return (error); } static int mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mfi_softc *sc; struct mfi_linux_ioc_packet l_ioc; struct mfi_linux_ioc_aen l_aen; struct mfi_command *cm = NULL; struct mfi_aen *mfi_aen_entry; union mfi_sense_ptr sense_ptr; uint32_t context = 0; uint8_t *data = NULL, *temp; int i; int error, locked; sc = dev->si_drv1; error = 0; switch (cmd) { case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */ error = copyin(arg, &l_ioc, sizeof(l_ioc)); if (error != 0) return (error); if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) { return (EINVAL); } mtx_lock(&sc->mfi_io_lock); if ((cm = mfi_dequeue_free(sc)) == NULL) { mtx_unlock(&sc->mfi_io_lock); return (EBUSY); } mtx_unlock(&sc->mfi_io_lock); locked = 0; /* * save off original context since copying from user * will clobber some data */ context = cm->cm_frame->header.context; bcopy(l_ioc.lioc_frame.raw, cm->cm_frame, 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */ cm->cm_total_frame_size = (sizeof(union mfi_sgl) * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off; cm->cm_frame->header.scsi_status = 0; cm->cm_frame->header.pad0 = 0; if (l_ioc.lioc_sge_count) cm->cm_sg = (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off]; cm->cm_flags = 0; if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN) cm->cm_flags |= MFI_CMD_DATAIN; if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT) cm->cm_flags |= MFI_CMD_DATAOUT; cm->cm_len = cm->cm_frame->header.data_len; if (cm->cm_len && (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) { cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF, M_WAITOK | M_ZERO); if (cm->cm_data == NULL) { device_printf(sc->mfi_dev, "Malloc failed\n"); goto out; } } else { cm->cm_data = 0; } /* restore header context */ cm->cm_frame->header.context = context; temp = data; if (cm->cm_flags & MFI_CMD_DATAOUT) { for (i = 0; i < l_ioc.lioc_sge_count; i++) { error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base), temp, l_ioc.lioc_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy in failed\n"); goto out; } temp = &temp[l_ioc.lioc_sgl[i].iov_len]; } } if (cm->cm_frame->header.cmd == MFI_CMD_DCMD) locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode); if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) { cm->cm_frame->pass.sense_addr_lo = (uint32_t)cm->cm_sense_busaddr; cm->cm_frame->pass.sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32); } mtx_lock(&sc->mfi_io_lock); error = mfi_check_command_pre(sc, cm); if (error) { mtx_unlock(&sc->mfi_io_lock); goto out; } if ((error = mfi_wait_command(sc, cm)) != 0) { device_printf(sc->mfi_dev, "Controller polled failed\n"); mtx_unlock(&sc->mfi_io_lock); goto out; } mfi_check_command_post(sc, cm); mtx_unlock(&sc->mfi_io_lock); temp = data; if (cm->cm_flags & MFI_CMD_DATAIN) { for (i = 0; i < l_ioc.lioc_sge_count; i++) { error = copyout(temp, PTRIN(l_ioc.lioc_sgl[i].iov_base), l_ioc.lioc_sgl[i].iov_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } temp = &temp[l_ioc.lioc_sgl[i].iov_len]; } } if (l_ioc.lioc_sense_len) { /* get user-space sense ptr then copy out sense */ bcopy(&((struct mfi_linux_ioc_packet*)arg) ->lioc_frame.raw[l_ioc.lioc_sense_off], &sense_ptr.sense_ptr_data[0], sizeof(sense_ptr.sense_ptr_data)); #ifdef __amd64__ /* * only 32bit Linux support so zero out any * address over 32bit */ sense_ptr.addr.high = 0; #endif error = copyout(cm->cm_sense, sense_ptr.user_space, l_ioc.lioc_sense_len); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } } error = copyout(&cm->cm_frame->header.cmd_status, &((struct mfi_linux_ioc_packet*)arg) ->lioc_frame.hdr.cmd_status, 1); if (error != 0) { device_printf(sc->mfi_dev, "Copy out failed\n"); goto out; } out: mfi_config_unlock(sc, locked); if (data) free(data, M_MFIBUF); if (cm) { mtx_lock(&sc->mfi_io_lock); mfi_release_command(cm); mtx_unlock(&sc->mfi_io_lock); } return (error); case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */ error = copyin(arg, &l_aen, sizeof(l_aen)); if (error != 0) return (error); printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid); mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF, M_WAITOK); mtx_lock(&sc->mfi_io_lock); if (mfi_aen_entry != NULL) { mfi_aen_entry->p = curproc; TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); } error = mfi_aen_register(sc, l_aen.laen_seq_num, l_aen.laen_class_locale); if (error != 0) { TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry, aen_link); free(mfi_aen_entry, M_MFIBUF); } mtx_unlock(&sc->mfi_io_lock); return (error); default: device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd); error = ENOENT; break; } return (error); } static int mfi_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mfi_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mfi_aen_triggered != 0) { revents |= poll_events & (POLLIN | POLLRDNORM); sc->mfi_aen_triggered = 0; } if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) { revents |= POLLERR; } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { sc->mfi_poll_waiting = 1; selrecord(td, &sc->mfi_select); } } return revents; } static void mfi_dump_all(void) { struct mfi_softc *sc; struct mfi_command *cm; devclass_t dc; time_t deadline; int timedout; int i; dc = devclass_find("mfi"); if (dc == NULL) { printf("No mfi dev class\n"); return; } for (i = 0; ; i++) { sc = devclass_get_softc(dc, i); if (sc == NULL) break; device_printf(sc->mfi_dev, "Dumping\n\n"); timedout = 0; deadline = time_uptime - mfi_cmd_timeout; mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) { if (cm->cm_timestamp <= deadline) { device_printf(sc->mfi_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime - cm->cm_timestamp)); MFI_PRINT_CMD(cm); timedout++; } } #if 0 if (timedout) MFI_DUMP_CMDS(sc); #endif mtx_unlock(&sc->mfi_io_lock); } return; } static void mfi_timeout(void *data) { struct mfi_softc *sc = (struct mfi_softc *)data; struct mfi_command *cm, *tmp; time_t deadline; int timedout = 0; deadline = time_uptime - mfi_cmd_timeout; if (sc->adpreset == 0) { if (!mfi_tbolt_reset(sc)) { callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); return; } } mtx_lock(&sc->mfi_io_lock); TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) { if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm) continue; if (cm->cm_timestamp <= deadline) { if (sc->adpreset != 0 && sc->issuepend_done == 0) { cm->cm_timestamp = time_uptime; } else { device_printf(sc->mfi_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime - cm->cm_timestamp) ); MFI_PRINT_CMD(cm); MFI_VALIDATE_CMD(sc, cm); /* * While commands can get stuck forever we do * not fail them as there is no way to tell if * the controller has actually processed them * or not. * * In addition its very likely that force * failing a command here would cause a panic * e.g. in UFS. */ timedout++; } } } #if 0 if (timedout) MFI_DUMP_CMDS(sc); #endif mtx_unlock(&sc->mfi_io_lock); callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz, mfi_timeout, sc); if (0) mfi_dump_all(); return; } diff --git a/sys/dev/mlx/mlx.c b/sys/dev/mlx/mlx.c index 6d9c49b925f2..970053c1ec71 100644 --- a/sys/dev/mlx/mlx.c +++ b/sys/dev/mlx/mlx.c @@ -1,3074 +1,3074 @@ /*- * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for the Mylex DAC960 family of RAID controllers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct cdevsw mlx_cdevsw = { .d_version = D_VERSION, .d_open = mlx_open, .d_close = mlx_close, .d_ioctl = mlx_ioctl, .d_name = "mlx", }; devclass_t mlx_devclass; /* * Per-interface accessor methods */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v3_intaction(struct mlx_softc *sc, int action); static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v4_intaction(struct mlx_softc *sc, int action); static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v5_intaction(struct mlx_softc *sc, int action); static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); /* * Status monitoring */ static void mlx_periodic(void *data); static void mlx_periodic_enquiry(struct mlx_command *mc); static void mlx_periodic_eventlog_poll(struct mlx_softc *sc); static void mlx_periodic_eventlog_respond(struct mlx_command *mc); static void mlx_periodic_rebuild(struct mlx_command *mc); /* * Channel Pause */ static void mlx_pause_action(struct mlx_softc *sc); static void mlx_pause_done(struct mlx_command *mc); /* * Command submission. */ static void *mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (*complete)(struct mlx_command *mc)); static int mlx_flush(struct mlx_softc *sc); static int mlx_check(struct mlx_softc *sc, int drive); static int mlx_rebuild(struct mlx_softc *sc, int channel, int target); static int mlx_wait_command(struct mlx_command *mc); static int mlx_poll_command(struct mlx_command *mc); void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_startio(struct mlx_softc *sc); static void mlx_completeio(struct mlx_command *mc); static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu); void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); /* * Command buffer allocation. */ static struct mlx_command *mlx_alloccmd(struct mlx_softc *sc); static void mlx_releasecmd(struct mlx_command *mc); static void mlx_freecmd(struct mlx_command *mc); /* * Command management. */ static int mlx_getslot(struct mlx_command *mc); static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_unmapcmd(struct mlx_command *mc); static int mlx_shutdown_locked(struct mlx_softc *sc); static int mlx_start(struct mlx_command *mc); static int mlx_done(struct mlx_softc *sc, int startio); static void mlx_complete(struct mlx_softc *sc); /* * Debugging. */ static char *mlx_diagnose_command(struct mlx_command *mc); static void mlx_describe_controller(struct mlx_softc *sc); static int mlx_fw_message(struct mlx_softc *sc, int status, int param1, int param2); /* * Utility functions. */ static struct mlx_sysdrive *mlx_findunit(struct mlx_softc *sc, int unit); /******************************************************************************** ******************************************************************************** Public Interfaces ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void mlx_free(struct mlx_softc *sc) { struct mlx_command *mc; debug_called(1); /* destroy control device */ if (sc->mlx_dev_t != NULL) destroy_dev(sc->mlx_dev_t); if (sc->mlx_intr) bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr); /* cancel status timeout */ MLX_IO_LOCK(sc); callout_stop(&sc->mlx_timeout); /* throw away any command buffers */ while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) { TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); mlx_freecmd(mc); } MLX_IO_UNLOCK(sc); callout_drain(&sc->mlx_timeout); /* destroy data-transfer DMA tag */ if (sc->mlx_buffer_dmat) bus_dma_tag_destroy(sc->mlx_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); /* disconnect the interrupt handler */ if (sc->mlx_irq != NULL) bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq); /* destroy the parent DMA tag */ if (sc->mlx_parent_dmat) bus_dma_tag_destroy(sc->mlx_parent_dmat); /* release the register window mapping */ if (sc->mlx_mem != NULL) bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem); /* free controller enquiry data */ if (sc->mlx_enq2 != NULL) free(sc->mlx_enq2, M_DEVBUF); sx_destroy(&sc->mlx_config_lock); mtx_destroy(&sc->mlx_io_lock); } /******************************************************************************** * Map the scatter/gather table into bus space */ static void mlx_dma_map_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mlx_sgbusaddr = segs->ds_addr; } static int mlx_sglist_map(struct mlx_softc *sc) { size_t segsize; int error, ncmd; debug_called(1); /* destroy any existing mappings */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); sc->mlx_sgbusaddr = 0; sc->mlx_sgtable = NULL; sc->mlx_sg_dmat = NULL; /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. If we're called early on, we don't know how * many commands we're going to be asked to support, so only allocate enough * for a couple. */ if (sc->mlx_enq2 == NULL) { ncmd = 2; } else { ncmd = sc->mlx_enq2->me_max_commands; } segsize = sizeof(struct mlx_sgentry) * MLX_NSEG * ncmd; error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mlx_sg_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. We may need to switch to a more complex arrangement * where we allocate in smaller chunks and keep a lookup table from slot * to bus address. */ error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void **)&sc->mlx_sgtable, BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap); if (error) { device_printf(sc->mlx_dev, "can't allocate s/g table\n"); return(ENOMEM); } (void)bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable, segsize, mlx_dma_map_sg, sc, 0); return(0); } /******************************************************************************** * Initialise the controller and softc */ int mlx_attach(struct mlx_softc *sc) { struct mlx_enquiry_old *meo; int rid, error, fwminor, hscode, hserror, hsparam1, hsparam2, hsmsg; debug_called(1); /* * Initialise per-controller queues. */ TAILQ_INIT(&sc->mlx_work); TAILQ_INIT(&sc->mlx_freecmds); bioq_init(&sc->mlx_bioq); /* * Select accessor methods based on controller interface type. */ switch(sc->mlx_iftype) { case MLX_IFTYPE_2: case MLX_IFTYPE_3: sc->mlx_tryqueue = mlx_v3_tryqueue; sc->mlx_findcomplete = mlx_v3_findcomplete; sc->mlx_intaction = mlx_v3_intaction; sc->mlx_fw_handshake = mlx_v3_fw_handshake; break; case MLX_IFTYPE_4: sc->mlx_tryqueue = mlx_v4_tryqueue; sc->mlx_findcomplete = mlx_v4_findcomplete; sc->mlx_intaction = mlx_v4_intaction; sc->mlx_fw_handshake = mlx_v4_fw_handshake; break; case MLX_IFTYPE_5: sc->mlx_tryqueue = mlx_v5_tryqueue; sc->mlx_findcomplete = mlx_v5_findcomplete; sc->mlx_intaction = mlx_v5_intaction; sc->mlx_fw_handshake = mlx_v5_fw_handshake; break; default: return(ENXIO); /* should never happen */ } /* disable interrupts before we start talking to the controller */ MLX_IO_LOCK(sc); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ hsmsg = 0; DELAY(1000); while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2, hsmsg == 0)) != 0) { /* report first time around... */ if (hsmsg == 0) { device_printf(sc->mlx_dev, "controller initialisation in progress...\n"); hsmsg = 1; } /* did we get a real message? */ if (hscode == 2) { hscode = mlx_fw_message(sc, hserror, hsparam1, hsparam2); /* fatal initialisation error? */ if (hscode != 0) { return(ENXIO); } } } if (hsmsg == 1) device_printf(sc->mlx_dev, "initialisation complete.\n"); /* * Allocate and connect our interrupt. */ rid = 0; sc->mlx_irq = bus_alloc_resource_any(sc->mlx_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->mlx_irq == NULL) { device_printf(sc->mlx_dev, "can't allocate interrupt\n"); return(ENXIO); } error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, mlx_intr, sc, &sc->mlx_intr); if (error) { device_printf(sc->mlx_dev, "can't set up interrupt\n"); return(ENXIO); } /* * Create DMA tag for mapping buffers into controller-addressable space. */ error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* align, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX_MAXPHYS, /* maxsize */ MLX_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mlx_io_lock, /* lockarg */ &sc->mlx_buffer_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n"); return(ENOMEM); } /* * Create some initial scatter/gather mappings so we can run the probe * commands. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n"); return(error); } /* * We don't (yet) know where the event log is up to. */ sc->mlx_currevent = -1; /* * Obtain controller feature information */ MLX_IO_LOCK(sc); if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY2 failed\n"); return(ENXIO); } /* * Do quirk/feature related things. */ fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff; switch(sc->mlx_iftype) { case MLX_IFTYPE_2: /* These controllers don't report the firmware version in the ENQUIRY2 response */ if ((meo = mlx_enquire(sc, MLX_CMD_ENQUIRY_OLD, sizeof(struct mlx_enquiry_old), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n"); return(ENXIO); } sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor; /* XXX require 2.42 or better (PCI) or 2.14 or better (EISA) */ if (meo->me_fwminor < 42) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n"); } free(meo, M_DEVBUF); break; case MLX_IFTYPE_3: /* XXX certify 3.52? */ if (fwminor < 51) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n"); } break; case MLX_IFTYPE_4: /* XXX certify firmware versions? */ if (fwminor < 6) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n"); } break; case MLX_IFTYPE_5: if (fwminor < 7) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n"); } break; default: MLX_IO_UNLOCK(sc); return(ENXIO); /* should never happen */ } MLX_IO_UNLOCK(sc); /* * Create the final scatter/gather mappings now that we have characterised the controller. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make final s/g list mapping\n"); return(error); } /* * No user-requested background operation is in progress. */ sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; /* * Create the control device. */ sc->mlx_dev_t = make_dev(&mlx_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mlx%d", device_get_unit(sc->mlx_dev)); sc->mlx_dev_t->si_drv1 = sc; /* * Start the timeout routine. */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); /* print a little information about the controller */ mlx_describe_controller(sc); return(0); } /******************************************************************************** * Locate disk resources and attach children to them. */ void mlx_startup(struct mlx_softc *sc) { struct mlx_enq_sys_drive *mes; struct mlx_sysdrive *dr; int i, error; debug_called(1); /* * Scan all the system drives and attach children for those that * don't currently have them. */ MLX_IO_LOCK(sc); mes = mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(*mes) * MLX_MAXDRIVES, NULL); MLX_IO_UNLOCK(sc); if (mes == NULL) { device_printf(sc->mlx_dev, "error fetching drive status\n"); return; } /* iterate over drives returned */ MLX_CONFIG_LOCK(sc); for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++, dr++) { /* are we already attached to this drive? */ if (dr->ms_disk == 0) { /* pick up drive information */ dr->ms_size = mes[i].sd_size; dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf; dr->ms_state = mes[i].sd_state; /* generate geometry information */ if (sc->mlx_geom == MLX_GEOM_128_32) { dr->ms_heads = 128; dr->ms_sectors = 32; dr->ms_cylinders = dr->ms_size / (128 * 32); } else { /* MLX_GEOM_255/63 */ dr->ms_heads = 255; dr->ms_sectors = 63; dr->ms_cylinders = dr->ms_size / (255 * 63); } dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, -1); if (dr->ms_disk == 0) device_printf(sc->mlx_dev, "device_add_child failed\n"); device_set_ivars(dr->ms_disk, dr); } } free(mes, M_DEVBUF); if ((error = bus_generic_attach(sc->mlx_dev)) != 0) device_printf(sc->mlx_dev, "bus_generic_attach returned %d", error); /* mark controller back up */ MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SHUTDOWN; /* enable interrupts */ sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); } /******************************************************************************** * Disconnect from the controller completely, in preparation for unload. */ int mlx_detach(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); struct mlxd_softc *mlxd; int i, error; debug_called(1); error = EBUSY; MLX_CONFIG_LOCK(sc); if (sc->mlx_state & MLX_STATE_OPEN) goto out; for (i = 0; i < MLX_MAXDRIVES; i++) { if (sc->mlx_sysdrive[i].ms_disk != 0) { mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk); if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */ device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n"); goto out; } } } if ((error = mlx_shutdown(dev))) goto out; MLX_CONFIG_UNLOCK(sc); mlx_free(sc); return (0); out: MLX_CONFIG_UNLOCK(sc); return(error); } /******************************************************************************** * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach, system shutdown, or before performing * an operation which may add or delete system disks. (Call mlx_startup to * resume normal operation.) * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int mlx_shutdown(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); int error; MLX_CONFIG_LOCK(sc); error = mlx_shutdown_locked(sc); MLX_CONFIG_UNLOCK(sc); return (error); } static int mlx_shutdown_locked(struct mlx_softc *sc) { int i, error; debug_called(1); MLX_CONFIG_ASSERT_LOCKED(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SHUTDOWN; sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); if (mlx_flush(sc)) { printf("failed\n"); } else { printf("done\n"); } MLX_IO_UNLOCK(sc); /* delete all our child devices */ for (i = 0; i < MLX_MAXDRIVES; i++) { if (sc->mlx_sysdrive[i].ms_disk != 0) { if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0) return (error); sc->mlx_sysdrive[i].ms_disk = 0; } } return (0); } /******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. */ int mlx_suspend(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SUSPEND; /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); printf("%s\n", mlx_flush(sc) ? "failed" : "done"); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Bring the controller back to a state ready for operation. */ int mlx_resume(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SUSPEND; sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ void mlx_intr(void *arg) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* collect finished commands, queue anything waiting */ MLX_IO_LOCK(sc); mlx_done(sc, 1); MLX_IO_UNLOCK(sc); }; /******************************************************************************* * Receive a buf structure from a child device and queue it on a particular * disk resource, then poke the disk resource to start as much work as it can. */ int mlx_submit_buf(struct mlx_softc *sc, struct bio *bp) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); bioq_insert_tail(&sc->mlx_bioq, bp); sc->mlx_waitbufs++; mlx_startio(sc); return(0); } /******************************************************************************** * Accept an open operation on the control device. */ int mlx_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return(0); } /******************************************************************************** * Accept the last close on the control device. */ int mlx_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return (0); } /******************************************************************************** * Handle controller-specific control operations. */ int mlx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; struct mlx_rebuild_request *rb = (struct mlx_rebuild_request *)addr; struct mlx_rebuild_status *rs = (struct mlx_rebuild_status *)addr; int *arg = (int *)addr; struct mlx_pause *mp; struct mlx_sysdrive *dr; struct mlxd_softc *mlxd; int i, error; switch(cmd) { /* * Enumerate connected system drives; returns the first system drive's * unit number if *arg is -1, or the next unit after *arg if it's * a valid unit on this controller. */ case MLX_NEXT_CHILD: /* search system drives */ MLX_CONFIG_LOCK(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* looking for the next one we come across? */ if (*arg == -1) { *arg = device_get_unit(sc->mlx_sysdrive[i].ms_disk); MLX_CONFIG_UNLOCK(sc); return(0); } /* we want the one after this one */ if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) *arg = -1; } } MLX_CONFIG_UNLOCK(sc); return(ENOENT); /* * Scan the controller to see whether new drives have appeared. */ case MLX_RESCAN_DRIVES: mtx_lock(&Giant); mlx_startup(sc); mtx_unlock(&Giant); return(0); /* * Disconnect from the specified drive; it may be about to go * away. */ case MLX_DETACH_DRIVE: /* detach one drive */ MLX_CONFIG_LOCK(sc); if (((dr = mlx_findunit(sc, *arg)) == NULL) || ((mlxd = device_get_softc(dr->ms_disk)) == NULL)) { MLX_CONFIG_UNLOCK(sc); return(ENOENT); } device_printf(dr->ms_disk, "detaching..."); error = 0; if (mlxd->mlxd_flags & MLXD_OPEN) { error = EBUSY; goto detach_out; } /* flush controller */ MLX_IO_LOCK(sc); if (mlx_flush(sc)) { MLX_IO_UNLOCK(sc); error = EBUSY; goto detach_out; } MLX_IO_UNLOCK(sc); /* nuke drive */ if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0) goto detach_out; dr->ms_disk = 0; detach_out: MLX_CONFIG_UNLOCK(sc); if (error) { printf("failed\n"); } else { printf("done\n"); } return(error); /* * Pause one or more SCSI channels for a period of time, to assist * in the process of hot-swapping devices. * * Note that at least the 3.51 firmware on the DAC960PL doesn't seem * to do this right. */ case MLX_PAUSE_CHANNEL: /* schedule a channel pause */ /* Does this command work on this firmware? */ if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS)) return(EOPNOTSUPP); /* check time values */ mp = (struct mlx_pause *)addr; if ((mp->mp_when < 0) || (mp->mp_when > 3600)) return(EINVAL); if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30))) return(EINVAL); MLX_IO_LOCK(sc); if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) { /* cancel a pending pause operation */ sc->mlx_pause.mp_which = 0; } else { /* fix for legal channels */ mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1); /* check for a pause currently running */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { MLX_IO_UNLOCK(sc); return(EBUSY); } /* looks ok, go with it */ sc->mlx_pause.mp_which = mp->mp_which; sc->mlx_pause.mp_when = time_second + mp->mp_when; sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong; } MLX_IO_UNLOCK(sc); return(0); /* * Accept a command passthrough-style. */ case MLX_COMMAND: return(mlx_user_command(sc, (struct mlx_usercommand *)addr)); /* * Start a rebuild on a given SCSI disk */ case MLX_REBUILDASYNC: MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); rb->rr_status = 0x0106; return(EBUSY); } rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target); switch (rb->rr_status) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EBUSY; break; case 0x0104: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_REBUILD; MLX_IO_UNLOCK(sc); return(error); /* * Get the status of the current rebuild or consistency check. */ case MLX_REBUILDSTAT: MLX_IO_LOCK(sc); *rs = sc->mlx_rebuildstat; MLX_IO_UNLOCK(sc); return(0); /* * Return the per-controller system drive number matching the * disk device number in (arg), if it happens to belong to us. */ case MLX_GET_SYSDRIVE: error = ENOENT; MLX_CONFIG_LOCK(sc); mtx_lock(&Giant); mlxd = (struct mlxd_softc *)devclass_get_softc(mlxd_devclass, *arg); mtx_unlock(&Giant); if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) && (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) { error = 0; *arg = mlxd->mlxd_drive - sc->mlx_sysdrive; } MLX_CONFIG_UNLOCK(sc); return(error); default: return(ENOTTY); } } /******************************************************************************** * Handle operations requested by a System Drive connected to this controller. */ int mlx_submit_ioctl(struct mlx_softc *sc, struct mlx_sysdrive *drive, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { int *arg = (int *)addr; int error, result; switch(cmd) { /* * Return the current status of this drive. */ case MLXD_STATUS: MLX_IO_LOCK(sc); *arg = drive->ms_state; MLX_IO_UNLOCK(sc); return(0); /* * Start a background consistency check on this drive. */ case MLXD_CHECKASYNC: /* start a background consistency check */ MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); *arg = 0x0106; return(EBUSY); } result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]); switch (result) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_CHECK; MLX_IO_UNLOCK(sc); *arg = result; return(error); } return(ENOIOCTL); } /******************************************************************************** ******************************************************************************** Status Monitoring ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fire off commands to periodically check the status of connected drives. */ static void mlx_periodic(void *data) { struct mlx_softc *sc = (struct mlx_softc *)data; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Run a bus pause? */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when > 0) && (time_second >= sc->mlx_pause.mp_when)){ mlx_pause_action(sc); /* pause is running */ sc->mlx_pause.mp_when = 0; sysbeep(500, hz); /* * Bus pause still running? */ } else if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { /* time to stop bus pause? */ if (time_second >= sc->mlx_pause.mp_howlong) { mlx_pause_action(sc); sc->mlx_pause.mp_which = 0; /* pause is complete */ sysbeep(500, hz); } else { sysbeep((time_second % 5) * 100 + 500, hz/8); } /* * Run normal periodic activities? */ } else if (time_second > (sc->mlx_lastpoll + 10)) { sc->mlx_lastpoll = time_second; /* * Check controller status. * * XXX Note that this may not actually launch a command in situations of high load. */ mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY, imax(sizeof(struct mlx_enquiry), sizeof(struct mlx_enquiry_old)), mlx_periodic_enquiry); /* * Check system drive status. * * XXX This might be better left to event-driven detection, eg. I/O to an offline * drive will detect it's offline, rebuilds etc. should detect the drive is back * online. */ mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(struct mlx_enq_sys_drive) * MLX_MAXDRIVES, mlx_periodic_enquiry); } /* get drive rebuild/check status */ /* XXX should check sc->mlx_background if this is only valid while in progress */ mlx_enquire(sc, MLX_CMD_REBUILDSTAT, sizeof(struct mlx_rebuild_stat), mlx_periodic_rebuild); /* deal with possibly-missed interrupts and timed-out commands */ mlx_done(sc, 1); /* reschedule another poll next second or so */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); } /******************************************************************************** * Handle the result of an ENQUIRY command instigated by periodic status polling. */ static void mlx_periodic_enquiry(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* Command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc)); goto out; } /* respond to command */ switch(mc->mc_mailbox[0]) { /* * This is currently a bit fruitless, as we don't know how to extract the eventlog * pointer yet. */ case MLX_CMD_ENQUIRY_OLD: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data; int i; /* convert data in-place to new format */ for (i = (sizeof(me->me_dead) / sizeof(me->me_dead[0])) - 1; i >= 0; i--) { me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan; me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ; } me->me_misc_flags = 0; me->me_rebuild_count = meo->me_rebuild_count; me->me_dead_count = meo->me_dead_count; me->me_critical_sd_count = meo->me_critical_sd_count; me->me_event_log_seq_num = 0; me->me_offline_sd_count = meo->me_offline_sd_count; me->me_max_commands = meo->me_max_commands; me->me_rebuild_flag = meo->me_rebuild_flag; me->me_fwmajor = meo->me_fwmajor; me->me_fwminor = meo->me_fwminor; me->me_status_flags = meo->me_status_flags; me->me_flash_age = meo->me_flash_age; for (i = (sizeof(me->me_drvsize) / sizeof(me->me_drvsize[0])) - 1; i >= 0; i--) { if (i > ((sizeof(meo->me_drvsize) / sizeof(meo->me_drvsize[0])) - 1)) { me->me_drvsize[i] = 0; /* drive beyond supported range */ } else { me->me_drvsize[i] = meo->me_drvsize[i]; } } me->me_num_sys_drvs = meo->me_num_sys_drvs; } /* FALLTHROUGH */ /* * Generic controller status update. We could do more with this than just * checking the event log. */ case MLX_CMD_ENQUIRY: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; if (sc->mlx_currevent == -1) { /* initialise our view of the event log */ sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num; } else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)) { /* record where current events are up to */ sc->mlx_currevent = me->me_event_log_seq_num; debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent); /* mark the event log as busy */ sc->mlx_flags |= MLX_EVENTLOG_BUSY; /* drain new eventlog entries */ mlx_periodic_eventlog_poll(sc); } break; } case MLX_CMD_ENQSYSDRIVE: { struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data; struct mlx_sysdrive *dr; int i; for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++) { /* has state been changed by controller? */ if (dr->ms_state != mes[i].sd_state) { switch(mes[i].sd_state) { case MLX_SYSD_OFFLINE: device_printf(dr->ms_disk, "drive offline\n"); break; case MLX_SYSD_ONLINE: device_printf(dr->ms_disk, "drive online\n"); break; case MLX_SYSD_CRITICAL: device_printf(dr->ms_disk, "drive critical\n"); break; } /* save new state */ dr->ms_state = mes[i].sd_state; } } break; } default: device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]); break; } out: free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } static void mlx_eventlog_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); /* build the command to get one entry */ mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1, mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0); mc->mc_complete = mlx_periodic_eventlog_respond; mc->mc_private = mc; /* start the command */ if (mlx_start(mc) != 0) { mlx_releasecmd(mc); free(mc->mc_data, M_DEVBUF); mc->mc_data = NULL; } } /******************************************************************************** * Instigate a poll for one event log message on (sc). * We only poll for one message at a time, to keep our command usage down. */ static void mlx_periodic_eventlog_poll(struct mlx_softc *sc) { struct mlx_command *mc; void *result = NULL; int error = 0; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(/*sizeof(struct mlx_eventlog_entry)*/1024, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT); out: if (error != 0) { if (mc != NULL) mlx_releasecmd(mc); if ((result != NULL) && (mc->mc_data != NULL)) free(result, M_DEVBUF); } } /******************************************************************************** * Handle the result of polling for a log message, generate diagnostic output. * If this wasn't the last message waiting for us, we'll go collect another. */ static char *mlx_sense_messages[] = { "because write recovery failed", "because of SCSI bus reset failure", "because of double check condition", "because it was removed", "because of gross error on SCSI chip", "because of bad tag returned from drive", "because of timeout on SCSI command", "because of reset SCSI command issued from system", "because busy or parity error count exceeded limit", "because of 'kill drive' command from system", "because of selection timeout", "due to SCSI phase sequence error", "due to unknown status" }; static void mlx_periodic_eventlog_respond(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data; char *reason; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); sc->mlx_lastevent++; /* next message... */ if (mc->mc_status == 0) { /* handle event log message */ switch(el->el_type) { /* * This is the only sort of message we understand at the moment. * The tests here are probably incomplete. */ case MLX_LOGMSG_SENSE: /* sense data */ /* Mylex vendor-specific message indicating a drive was killed? */ if ((el->el_sensekey == 9) && (el->el_asc == 0x80)) { if (el->el_asq < nitems(mlx_sense_messages)) { reason = mlx_sense_messages[el->el_asq]; } else { reason = "for unknown reason"; } device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n", el->el_channel, el->el_target, reason); } /* SCSI drive was reset? */ if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) { device_printf(sc->mlx_dev, "physical drive %d:%d reset\n", el->el_channel, el->el_target); } /* SCSI drive error? */ if (!((el->el_sensekey == 0) || ((el->el_sensekey == 2) && (el->el_asc == 0x04) && ((el->el_asq == 0x01) || (el->el_asq == 0x02))))) { device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n", el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq); device_printf(sc->mlx_dev, " info %4D csi %4D\n", el->el_information, ":", el->el_csi, ":"); } break; default: device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type); break; } } else { device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc)); /* give up on all the outstanding messages, as we may have come unsynched */ sc->mlx_lastevent = sc->mlx_currevent; } /* dispose of command and data */ free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); /* is there another message to obtain? */ if (sc->mlx_lastevent != sc->mlx_currevent) { mlx_periodic_eventlog_poll(sc); } else { /* clear log-busy status */ sc->mlx_flags &= ~MLX_EVENTLOG_BUSY; } } /******************************************************************************** * Handle check/rebuild operations in progress. */ static void mlx_periodic_rebuild(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data; MLX_IO_ASSERT_LOCKED(sc); switch(mc->mc_status) { case 0: /* operation running, update stats */ sc->mlx_rebuildstat = *mr; /* spontaneous rebuild/check? */ if (sc->mlx_background == 0) { sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS; device_printf(sc->mlx_dev, "background check/rebuild operation started\n"); } break; case 0x0105: /* nothing running, finalise stats and report */ switch(sc->mlx_background) { case MLX_BACKGROUND_CHECK: device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */ break; case MLX_BACKGROUND_REBUILD: device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */ break; case MLX_BACKGROUND_SPONTANEOUS: default: /* if we have previously been non-idle, report the transition */ if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) { device_printf(sc->mlx_dev, "background check/rebuild operation completed\n"); } } sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; break; } free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Channel Pause ******************************************************************************** ********************************************************************************/ /******************************************************************************** * It's time to perform a channel pause action for (sc), either start or stop * the pause. */ static void mlx_pause_action(struct mlx_softc *sc) { struct mlx_command *mc; int failsafe, i, command; MLX_IO_ASSERT_LOCKED(sc); /* What are we doing here? */ if (sc->mlx_pause.mp_when == 0) { command = MLX_CMD_STARTCHANNEL; failsafe = 0; } else { command = MLX_CMD_STOPCHANNEL; /* * Channels will always start again after the failsafe period, * which is specified in multiples of 30 seconds. * This constrains us to a maximum pause of 450 seconds. */ failsafe = ((sc->mlx_pause.mp_howlong - time_second) + 5) / 30; if (failsafe > 0xf) { failsafe = 0xf; sc->mlx_pause.mp_howlong = time_second + (0xf * 30) - 5; } } /* build commands for every channel requested */ for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) { if ((1 << i) & sc->mlx_pause.mp_which) { /* get ourselves a command buffer */ if ((mc = mlx_alloccmd(sc)) == NULL) goto fail; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY; if (mlx_getslot(mc)) goto fail; /* build the command */ mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0); mc->mc_complete = mlx_pause_done; mc->mc_private = sc; /* XXX not needed */ if (mlx_start(mc)) goto fail; /* command submitted OK */ return; fail: device_printf(sc->mlx_dev, "%s failed for channel %d\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", i); if (mc != NULL) mlx_releasecmd(mc); } } } static void mlx_pause_done(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int command = mc->mc_mailbox[0]; int channel = mc->mc_mailbox[2] & 0xf; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "%s command failed - %s\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc)); } else if (command == MLX_CMD_STOPCHANNEL) { device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n", channel, (long)(sc->mlx_pause.mp_howlong - time_second)); } else { device_printf(sc->mlx_dev, "channel %d resuming\n", channel); } mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Command Submission ******************************************************************************** ********************************************************************************/ static void mlx_enquire_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc; struct mlx_command *mc; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); /* build an enquiry command */ sc = mc->mc_sc; mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0); /* do we want a completion callback? */ if (mc->mc_complete != NULL) { if ((error = mlx_start(mc)) != 0) return; } else { /* run the command in either polled or wait mode */ if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) : mlx_poll_command(mc)) return; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n", mlx_diagnose_command(mc)); return; } } } /******************************************************************************** * Perform an Enquiry command using a type-3 command buffer and a return a single * linear result buffer. If the completion function is specified, it will * be called with the completed command (and the result response will not be * valid until that point). Otherwise, the command will either be busy-waited * for (interrupts not enabled), or slept for. */ static void * mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc)) { struct mlx_command *mc; void *result; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; result = NULL; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(bufsize, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT; if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = bufsize; mc->mc_command = command; if (complete != NULL) { mc->mc_complete = complete; mc->mc_private = mc; } error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT); out: /* we got a command, but nobody else will free it */ if ((mc != NULL) && (mc->mc_complete == NULL)) mlx_releasecmd(mc); /* we got an error, and we allocated a result */ if ((error != 0) && (result != NULL)) { free(result, M_DEVBUF); result = NULL; } return(result); } /******************************************************************************** * Perform a Flush command on the nominated controller. * * May be called with interrupts enabled or disabled; will not return until * the flush operation completes or fails. */ static int mlx_flush(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a flush command */ mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0); /* can't assume that interrupts are going to work here, so play it safe */ if (mlx_poll_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc)); goto out; } error = 0; /* success */ out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background consistency check on (drive). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_check(struct mlx_softc *sc, int drive) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started"); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background rebuild of the physical drive at (channel),(target). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_rebuild(struct mlx_softc *sc, int channel, int target) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Run the command (mc) and return when it completes. * * Interrupts need to be enabled; returns nonzero on error. */ static int mlx_wait_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = mc; /* wake us when you're done */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; /* XXX better timeout? */ while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) { mtx_sleep(mc->mc_private, &sc->mlx_io_lock, PRIBIO | PCATCH, "mlxwcmd", hz); } if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } return(0); } /******************************************************************************** * Start the command (mc) and busy-wait for it to complete. * * Should only be used when interrupts can't be relied upon. Returns 0 on * success, nonzero on error. * Successfully completed commands are dequeued. */ static int mlx_poll_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = NULL; /* we will poll for it */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; do { /* poll for completion */ mlx_done(mc->mc_sc, 1); } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000)); if (mc->mc_status != MLX_STATUS_BUSY) { TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); return(0); } device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; struct mlxd_softc *mlxd; struct mlx_softc *sc; struct bio *bp; int blkcount; int driveno; int cmd; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); sc = mc->mc_sc; bp = mc->mc_private; if (bp->bio_cmd == BIO_READ) { mc->mc_flags |= MLX_CMD_DATAIN; cmd = MLX_CMD_READSG; } else { mc->mc_flags |= MLX_CMD_DATAOUT; cmd = MLX_CMD_WRITESG; } /* build a suitable I/O command (assumes 512-byte rounded transfers) */ mlxd = bp->bio_disk->d_drv1; driveno = mlxd->mlxd_drive - sc->mlx_sysdrive; - blkcount = (bp->bio_bcount + MLX_BLKSIZE - 1) / MLX_BLKSIZE; + blkcount = howmany(bp->bio_bcount, MLX_BLKSIZE); if ((bp->bio_pblkno + blkcount) > sc->mlx_sysdrive[driveno].ms_size) device_printf(sc->mlx_dev, "I/O beyond end of unit (%lld,%d > %lu)\n", (long long)bp->bio_pblkno, blkcount, (u_long)sc->mlx_sysdrive[driveno].ms_size); /* * Build the I/O command. Note that the SG list type bits are set to zero, * denoting the format of SG list that we are using. */ if (sc->mlx_iftype == MLX_IFTYPE_2) { mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD : MLX_CMD_READSG_OLD, blkcount & 0xff, /* xfer length low byte */ bp->bio_pblkno, /* physical block number */ driveno, /* target drive number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } else { mlx_make_type5(mc, cmd, blkcount & 0xff, /* xfer length low byte */ (driveno << 3) | ((blkcount >> 8) & 0x07), /* target+length high 3 bits */ bp->bio_pblkno, /* physical block number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } /* try to give command to controller */ if (mlx_start(mc) != 0) { /* fail the command */ mc->mc_status = MLX_STATUS_WEDGED; mlx_completeio(mc); } sc->mlx_state &= ~MLX_STATE_QFROZEN; } /******************************************************************************** * Pull as much work off the softc's work queue as possible and give it to the * controller. Leave a couple of slots free for emergencies. */ static void mlx_startio(struct mlx_softc *sc) { struct mlx_command *mc; struct bio *bp; int error; MLX_IO_ASSERT_LOCKED(sc); /* spin until something prevents us from doing any work */ for (;;) { if (sc->mlx_state & MLX_STATE_QFROZEN) break; /* see if there's work to be done */ if ((bp = bioq_first(&sc->mlx_bioq)) == NULL) break; /* get a command */ if ((mc = mlx_alloccmd(sc)) == NULL) break; /* get a slot for the command */ if (mlx_getslot(mc) != 0) { mlx_releasecmd(mc); break; } /* get the buf containing our work */ bioq_remove(&sc->mlx_bioq, bp); sc->mlx_waitbufs--; /* connect the buf to the command */ mc->mc_complete = mlx_completeio; mc->mc_private = bp; mc->mc_data = bp->bio_data; mc->mc_length = bp->bio_bcount; /* map the command so the controller can work with it */ error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_startio_cb, mc, 0); if (error == EINPROGRESS) { sc->mlx_state |= MLX_STATE_QFROZEN; break; } } } /******************************************************************************** * Handle completion of an I/O command. */ static void mlx_completeio(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct bio *bp = mc->mc_private; struct mlxd_softc *mlxd = bp->bio_disk->d_drv1; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; switch(mc->mc_status) { case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */ device_printf(mlxd->mlxd_dev, "drive offline\n"); /* should signal this with a return code */ mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE; break; default: /* other I/O error */ device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc)); #if 0 device_printf(sc->mlx_dev, " b_bcount %ld blkcount %ld b_pblkno %d\n", bp->bio_bcount, bp->bio_bcount / MLX_BLKSIZE, bp->bio_pblkno); device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " "); #endif break; } } mlx_releasecmd(mc); mlxd_intr(bp); } void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_usercommand *mu; struct mlx_command *mc; struct mlx_dcdb *dcdb; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); mu = (struct mlx_usercommand *)mc->mc_private; dcdb = NULL; /* * If this is a passthrough SCSI command, the DCDB is packed at the * beginning of the data area. Fix up the DCDB to point to the correct * physical address and override any bufptr supplied by the caller since * we know what it's meant to be. */ if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) { dcdb = (struct mlx_dcdb *)mc->mc_data; dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb); mu->mu_bufptr = 8; } /* * If there's a data buffer, fix up the command's buffer pointer. */ if (mu->mu_datasize > 0) { mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff; mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff; } debug(0, "command fixup"); /* submit the command and wait */ if (mlx_wait_command(mc) != 0) return; } /******************************************************************************** * Take a command from user-space and try to run it. * * XXX Note that this can't perform very much in the way of error checking, and * as such, applications _must_ be considered trustworthy. * XXX Commands using S/G for data are not supported. */ static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu) { struct mlx_command *mc; void *kbuf; int error; debug_called(0); kbuf = NULL; mc = NULL; error = ENOMEM; /* get ourselves a command and copy in from user space */ MLX_IO_LOCK(sc); if ((mc = mlx_alloccmd(sc)) == NULL) { MLX_IO_UNLOCK(sc); return(error); } bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox)); debug(0, "got command buffer"); /* * if we need a buffer for data transfer, allocate one and copy in its * initial contents */ if (mu->mu_datasize > 0) { if (mu->mu_datasize > MLX_MAXPHYS) { error = EINVAL; goto out; } MLX_IO_UNLOCK(sc); if (((kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) || (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) { MLX_IO_LOCK(sc); goto out; } MLX_IO_LOCK(sc); debug(0, "got kernel buffer"); } /* get a command slot */ if (mlx_getslot(mc)) goto out; debug(0, "got a slot"); if (mu->mu_datasize > 0) { /* range check the pointer to physical buffer address */ if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) - sizeof(u_int32_t)))) { error = EINVAL; goto out; } } /* map the command so the controller can see it */ mc->mc_data = kbuf; mc->mc_length = mu->mu_datasize; mc->mc_private = mu; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT); if (error) goto out; /* copy out status and data */ mu->mu_status = mc->mc_status; if (mu->mu_datasize > 0) { MLX_IO_UNLOCK(sc); error = copyout(kbuf, mu->mu_buf, mu->mu_datasize); MLX_IO_LOCK(sc); } out: mlx_releasecmd(mc); MLX_IO_UNLOCK(sc); if (kbuf != NULL) free(kbuf, M_DEVBUF); return(error); } /******************************************************************************** ******************************************************************************** Command I/O to Controller ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find a free command slot for (mc). * * Don't hand out a slot to a normal-priority command unless there are at least * 4 slots free for priority commands. */ static int mlx_getslot(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int slot, limit; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Enforce slot-usage limit, if we have the required information. */ if (sc->mlx_enq2 != NULL) { limit = sc->mlx_enq2->me_max_commands; } else { limit = 2; } if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4)) return(EBUSY); /* * Allocate an outstanding command slot * * XXX linear search is slow */ for (slot = 0; slot < limit; slot++) { debug(2, "try slot %d", slot); if (sc->mlx_busycmd[slot] == NULL) break; } if (slot < limit) { sc->mlx_busycmd[slot] = mc; sc->mlx_busycmds++; } /* out of slots? */ if (slot >= limit) return(EBUSY); debug(2, "got slot %d", slot); mc->mc_slot = slot; return(0); } /******************************************************************************** * Map/unmap (mc)'s data in the controller's addressable space. */ static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc = mc->mc_sc; struct mlx_sgentry *sg; int i; debug_called(1); /* XXX should be unnecessary */ if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg)) panic("MLX: too many s/g segments (%d, max %d)", nsegments, sc->mlx_enq2->me_max_sg); /* get base address of s/g table */ sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG); /* save s/g table information in command */ mc->mc_nsgent = nsegments; mc->mc_sgphys = sc->mlx_sgbusaddr + (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry)); mc->mc_dataphys = segs[0].ds_addr; /* populate s/g table */ for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; } /* Make sure the buffers are visible on the bus. */ if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREWRITE); } static void mlx_unmapcmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); /* if the command involved data at all */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap); } } /******************************************************************************** * Try to deliver (mc) to the controller. * * Can be called at any interrupt level, with or without interrupts enabled. */ static int mlx_start(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int i; debug_called(1); /* save the slot number as ident so we can handle this command when complete */ mc->mc_mailbox[0x1] = mc->mc_slot; /* mark the command as currently being processed */ mc->mc_status = MLX_STATUS_BUSY; /* set a default 60-second timeout XXX tunable? XXX not currently used */ mc->mc_timeout = time_second + 60; /* spin waiting for the mailbox */ for (i = 100000; i > 0; i--) { if (sc->mlx_tryqueue(sc, mc)) { /* move command to work queue */ TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link); return (0); } else if (i > 1) mlx_done(sc, 0); } /* * We couldn't get the controller to take the command. Revoke the slot * that the command was given and return it with a bad status. */ sc->mlx_busycmd[mc->mc_slot] = NULL; device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n"); mc->mc_status = MLX_STATUS_WEDGED; mlx_complete(sc); return(EIO); } /******************************************************************************** * Poll the controller (sc) for completed commands. * Update command status and free slots for reuse. If any slots were freed, * new commands may be posted. * * Returns nonzero if one or more commands were completed. */ static int mlx_done(struct mlx_softc *sc, int startio) { struct mlx_command *mc; int result; u_int8_t slot; u_int16_t status; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); result = 0; /* loop collecting completed commands */ for (;;) { /* poll for a completed command's identifier and status */ if (sc->mlx_findcomplete(sc, &slot, &status)) { result = 1; mc = sc->mlx_busycmd[slot]; /* find command */ if (mc != NULL) { /* paranoia */ if (mc->mc_status == MLX_STATUS_BUSY) { mc->mc_status = status; /* save status */ /* free slot for reuse */ sc->mlx_busycmd[slot] = NULL; sc->mlx_busycmds--; } else { device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot); } } else { device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot); } } else { break; } } /* if we've completed any commands, try posting some more */ if (result && startio) mlx_startio(sc); /* handle completion and timeouts */ mlx_complete(sc); return(result); } /******************************************************************************** * Perform post-completion processing for commands on (sc). */ static void mlx_complete(struct mlx_softc *sc) { struct mlx_command *mc, *nc; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* scan the list of busy/done commands */ mc = TAILQ_FIRST(&sc->mlx_work); while (mc != NULL) { nc = TAILQ_NEXT(mc, mc_link); /* Command has been completed in some fashion */ if (mc->mc_status != MLX_STATUS_BUSY) { /* unmap the command's data buffer */ mlx_unmapcmd(mc); /* * Does the command have a completion handler? */ if (mc->mc_complete != NULL) { /* remove from list and give to handler */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); mc->mc_complete(mc); /* * Is there a sleeper waiting on this command? */ } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */ /* remove from list and wake up sleeper */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); wakeup_one(mc->mc_private); /* * Leave the command for a caller that's polling for it. */ } else { } } mc = nc; } } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Get a new command buffer. * * This may return NULL in low-memory cases. * * Note that using malloc() is expensive (the command buffer is << 1 page) but * necessary if we are to be a loadable module before the zone allocator is fixed. * * If possible, we recycle a command buffer that's been used before. * * XXX Note that command buffers are not cleaned out - it is the caller's * responsibility to ensure that all required fields are filled in before * using a buffer. */ static struct mlx_command * mlx_alloccmd(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); /* allocate a new command buffer? */ if (mc == NULL) { mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO); if (mc != NULL) { mc->mc_sc = sc; error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap); if (error) { free(mc, M_DEVBUF); return(NULL); } } } return(mc); } /******************************************************************************** * Release a command buffer for recycling. * * XXX It might be a good idea to limit the number of commands we save for reuse * if it's shown that this list bloats out massively. */ static void mlx_releasecmd(struct mlx_command *mc) { debug_called(1); MLX_IO_ASSERT_LOCKED(mc->mc_sc); TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link); } /******************************************************************************** * Permanently discard a command buffer. */ static void mlx_freecmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap); free(mc, M_DEVBUF); } /******************************************************************************** ******************************************************************************** Type 3 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_FULL); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V3_GET_ODBR(sc) & MLX_V3_ODB_SAVAIL) { *slot = MLX_V3_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V3_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V3_PUT_ODBR(sc, MLX_V3_ODB_SAVAIL); MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. (No acknowledge required) */ static void mlx_v3_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V3_PUT_IER(sc, 0); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V3_PUT_IER(sc, 1); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V3_GET_FWERROR(sc); if (!(fwerror & MLX_V3_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V3_FWERROR_PEND; *param1 = MLX_V3_GET_FWERROR_PARAM1(sc); *param2 = MLX_V3_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V3_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 4 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */ bus_barrier(sc->mlx_mem, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH, BUS_SPACE_BARRIER_WRITE); /* post command */ MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V4_GET_ODBR(sc) & MLX_V4_ODB_HWSAVAIL) { *slot = MLX_V4_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V4_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V4_PUT_ODBR(sc, MLX_V4_ODB_HWMBOX_ACK); MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v4_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK | MLX_V4_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK & ~MLX_V4_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V4_GET_FWERROR(sc); if (!(fwerror & MLX_V4_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V4_FWERROR_PEND; *param1 = MLX_V4_GET_FWERROR_PARAM1(sc); *param2 = MLX_V4_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V4_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 5 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_EMPTY) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V5_GET_ODBR(sc) & MLX_V5_ODB_HWSAVAIL) { *slot = MLX_V5_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V5_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V5_PUT_ODBR(sc, MLX_V5_ODB_HWMBOX_ACK); MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v5_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V5_PUT_IER(sc, 0xff & MLX_V5_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V5_PUT_IER(sc, 0xff & ~MLX_V5_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); DELAY(1000); } /* init in progress? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_INIT_DONE) return(0); /* test for error value */ fwerror = MLX_V5_GET_FWERROR(sc); if (!(fwerror & MLX_V5_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V5_FWERROR_PEND; *param1 = MLX_V5_GET_FWERROR_PARAM1(sc); *param2 = MLX_V5_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V5_PUT_FWERROR(sc, 0xff); return(2); } /******************************************************************************** ******************************************************************************** Debugging ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Return a status message describing (mc) */ static char *mlx_status_messages[] = { "normal completion", /* 00 */ "irrecoverable data error", /* 01 */ "drive does not exist, or is offline", /* 02 */ "attempt to write beyond end of drive", /* 03 */ "bad data encountered", /* 04 */ "invalid log entry request", /* 05 */ "attempt to rebuild online drive", /* 06 */ "new disk failed during rebuild", /* 07 */ "invalid channel/target", /* 08 */ "rebuild/check already in progress", /* 09 */ "one or more disks are dead", /* 10 */ "invalid or non-redundant drive", /* 11 */ "channel is busy", /* 12 */ "channel is not stopped", /* 13 */ "rebuild successfully terminated", /* 14 */ "unsupported command", /* 15 */ "check condition received", /* 16 */ "device is busy", /* 17 */ "selection or command timeout", /* 18 */ "command terminated abnormally", /* 19 */ "" }; static struct { int command; u_int16_t status; int msg; } mlx_messages[] = { {MLX_CMD_READSG, 0x0001, 1}, {MLX_CMD_READSG, 0x0002, 1}, {MLX_CMD_READSG, 0x0105, 3}, {MLX_CMD_READSG, 0x010c, 4}, {MLX_CMD_WRITESG, 0x0001, 1}, {MLX_CMD_WRITESG, 0x0002, 1}, {MLX_CMD_WRITESG, 0x0105, 3}, {MLX_CMD_READSG_OLD, 0x0001, 1}, {MLX_CMD_READSG_OLD, 0x0002, 1}, {MLX_CMD_READSG_OLD, 0x0105, 3}, {MLX_CMD_WRITESG_OLD, 0x0001, 1}, {MLX_CMD_WRITESG_OLD, 0x0002, 1}, {MLX_CMD_WRITESG_OLD, 0x0105, 3}, {MLX_CMD_LOGOP, 0x0105, 5}, {MLX_CMD_REBUILDASYNC, 0x0002, 6}, {MLX_CMD_REBUILDASYNC, 0x0004, 7}, {MLX_CMD_REBUILDASYNC, 0x0105, 8}, {MLX_CMD_REBUILDASYNC, 0x0106, 9}, {MLX_CMD_REBUILDASYNC, 0x0107, 14}, {MLX_CMD_CHECKASYNC, 0x0002, 10}, {MLX_CMD_CHECKASYNC, 0x0105, 11}, {MLX_CMD_CHECKASYNC, 0x0106, 9}, {MLX_CMD_STOPCHANNEL, 0x0106, 12}, {MLX_CMD_STOPCHANNEL, 0x0105, 8}, {MLX_CMD_STARTCHANNEL, 0x0005, 13}, {MLX_CMD_STARTCHANNEL, 0x0105, 8}, {MLX_CMD_DIRECT_CDB, 0x0002, 16}, {MLX_CMD_DIRECT_CDB, 0x0008, 17}, {MLX_CMD_DIRECT_CDB, 0x000e, 18}, {MLX_CMD_DIRECT_CDB, 0x000f, 19}, {MLX_CMD_DIRECT_CDB, 0x0105, 8}, {0, 0x0104, 14}, {-1, 0, 0} }; static char * mlx_diagnose_command(struct mlx_command *mc) { static char unkmsg[80]; int i; /* look up message in table */ for (i = 0; mlx_messages[i].command != -1; i++) if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) && (mc->mc_status == mlx_messages[i].status)) return(mlx_status_messages[mlx_messages[i].msg]); sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]); return(unkmsg); } /******************************************************************************* * Print a string describing the controller (sc) */ static struct { int hwid; char *name; } mlx_controller_names[] = { {0x01, "960P/PD"}, {0x02, "960PL"}, {0x10, "960PG"}, {0x11, "960PJ"}, {0x12, "960PR"}, {0x13, "960PT"}, {0x14, "960PTL0"}, {0x15, "960PRL"}, {0x16, "960PTL1"}, {0x20, "1164PVX"}, {-1, NULL} }; static void mlx_describe_controller(struct mlx_softc *sc) { static char buf[80]; char *model; int i; for (i = 0, model = NULL; mlx_controller_names[i].name != NULL; i++) { if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) { model = mlx_controller_names[i].name; break; } } if (model == NULL) { sprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff); model = buf; } device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n", model, sc->mlx_enq2->me_actual_channels, sc->mlx_enq2->me_actual_channels > 1 ? "s" : "", sc->mlx_enq2->me_firmware_id & 0xff, (sc->mlx_enq2->me_firmware_id >> 8) & 0xff, (sc->mlx_enq2->me_firmware_id >> 24) & 0xff, (sc->mlx_enq2->me_firmware_id >> 16) & 0xff, sc->mlx_enq2->me_mem_size / (1024 * 1024)); if (bootverbose) { device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id); device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id); device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_channels, sc->mlx_enq2->me_actual_channels); device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets); device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags); device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives); device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms); device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans); device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_size, sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size); device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type); device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed); device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed); device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands); device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg); device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp); device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod); device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb); device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency); device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout); device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines); device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const); device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk); device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking_factor); device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline); device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n", sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "", (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10, 8 << (sc->mlx_enq2->me_scsi_cap & 0x3)); device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build); device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type); device_printf(sc->mlx_dev, " Features %b\n", sc->mlx_enq2->me_firmware_features, "\20\4Background Init\3Read Ahead\2MORE\1Cluster\n"); } } /******************************************************************************* * Emit a string describing the firmware handshake status code, and return a flag * indicating whether the code represents a fatal error. * * Error code interpretations are from the Linux driver, and don't directly match * the messages printed by Mylex's BIOS. This may change if documentation on the * codes is forthcoming. */ static int mlx_fw_message(struct mlx_softc *sc, int error, int param1, int param2) { switch(error) { case 0x00: device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1); break; case 0x08: /* we could be neater about this and give some indication when we receive more of them */ if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) { device_printf(sc->mlx_dev, "spinning up drives...\n"); sc->mlx_flags |= MLX_SPINUP_REPORTED; } break; case 0x30: device_printf(sc->mlx_dev, "configuration checksum error\n"); break; case 0x60: device_printf(sc->mlx_dev, "mirror race recovery failed\n"); break; case 0x70: device_printf(sc->mlx_dev, "mirror race recovery in progress\n"); break; case 0x90: device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1); break; case 0xa0: device_printf(sc->mlx_dev, "logical drive installation aborted\n"); break; case 0xb0: device_printf(sc->mlx_dev, "mirror race on a critical system drive\n"); break; case 0xd0: device_printf(sc->mlx_dev, "new controller configuration found\n"); break; case 0xf0: device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n"); return(1); default: device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1, param2); break; } return(0); } /******************************************************************************** ******************************************************************************** Utility Functions ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find the disk whose unit number is (unit) on this controller */ static struct mlx_sysdrive * mlx_findunit(struct mlx_softc *sc, int unit) { int i; /* search system drives */ MLX_CONFIG_ASSERT_LOCKED(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* is this the one? */ if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) return(&sc->mlx_sysdrive[i]); } } return(NULL); } diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c index f55d3e6075d0..f96eff6c6eaa 100644 --- a/sys/dev/mmc/host/dwmmc.c +++ b/sys/dev/mmc/host/dwmmc.c @@ -1,1181 +1,1181 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Synopsys DesignWare Mobile Storage Host Controller * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #define dprintf(x, arg...) #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ bus_write_4((_sc)->res[0], _reg, _val) -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP(n, d) howmany(n, d) #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define DWMMC_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "dwmmc", MTX_DEF) #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CARD_INIT_DONE 0x04 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ |SDMMC_INTMASK_EBE) #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ |SDMMC_INTMASK_RE) #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ |SDMMC_INTMASK_HLE) #define DES0_DIC (1 << 1) #define DES0_LD (1 << 2) #define DES0_FS (1 << 3) #define DES0_CH (1 << 4) #define DES0_ER (1 << 5) #define DES0_CES (1 << 30) #define DES0_OWN (1 << 31) #define DES1_BS1_MASK 0xfff #define DES1_BS1_SHIFT 0 struct idmac_desc { uint32_t des0; /* control */ uint32_t des1; /* bufsize */ uint32_t des2; /* buf1 phys addr */ uint32_t des3; /* buf2 phys addr or next descr */ }; #define DESC_MAX 256 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_MAX) #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ static void dwmmc_next_operation(struct dwmmc_softc *); static int dwmmc_setup_bus(struct dwmmc_softc *, int); static int dma_done(struct dwmmc_softc *, struct mmc_command *); static int dma_stop(struct dwmmc_softc *); static void pio_read(struct dwmmc_softc *, struct mmc_command *); static void pio_write(struct dwmmc_softc *, struct mmc_command *); static struct resource_spec dwmmc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; #define HWTYPE_MASK (0x0000ffff) #define HWFLAG_MASK (0xffff << 16) static struct ofw_compat_data compat_data[] = { {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, {NULL, HWTYPE_NONE}, }; static void dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static void dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct dwmmc_softc *sc; int idx; if (error != 0) return; sc = arg; dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); for (idx = 0; idx < nsegs; idx++) { sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); sc->desc_ring[idx].des1 = segs[idx].ds_len; sc->desc_ring[idx].des2 = segs[idx].ds_addr; if (idx == 0) sc->desc_ring[idx].des0 |= DES0_FS; if (idx == (nsegs - 1)) { sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); sc->desc_ring[idx].des0 |= DES0_LD; } } } static int dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) { int reg; int i; reg = READ4(sc, SDMMC_CTRL); reg |= (reset_bits); WRITE4(sc, SDMMC_CTRL, reg); /* Wait reset done */ for (i = 0; i < 100; i++) { if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) return (0); DELAY(10); } device_printf(sc->dev, "Reset failed\n"); return (1); } static int dma_setup(struct dwmmc_softc *sc) { int error; int nidx; int idx; /* * Set up TX descriptor ring, descriptors, and dma maps. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DESC_SIZE, 1, /* maxsize, nsegments */ DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->desc_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->desc_map); if (error != 0) { device_printf(sc->dev, "could not allocate descriptor ring.\n"); return (1); } error = bus_dmamap_load(sc->desc_tag, sc->desc_map, sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, &sc->desc_ring_paddr, 0); if (error != 0) { device_printf(sc->dev, "could not load descriptor ring map.\n"); return (1); } for (idx = 0; idx < sc->desc_count; idx++) { sc->desc_ring[idx].des0 = DES0_CH; sc->desc_ring[idx].des1 = 0; nidx = (idx + 1) % sc->desc_count; sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ (nidx * sizeof(struct idmac_desc)); } error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* Parent tag. */ 4096, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ sc->desc_count, /* nsegments */ MMC_SECTOR_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->buf_tag); if (error != 0) { device_printf(sc->dev, "could not create ring DMA tag.\n"); return (1); } error = bus_dmamap_create(sc->buf_tag, 0, &sc->buf_map); if (error != 0) { device_printf(sc->dev, "could not create TX buffer DMA map.\n"); return (1); } return (0); } static void dwmmc_cmd_done(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = READ4(sc, SDMMC_RESP0); cmd->resp[2] = READ4(sc, SDMMC_RESP1); cmd->resp[1] = READ4(sc, SDMMC_RESP2); cmd->resp[0] = READ4(sc, SDMMC_RESP3); } else { cmd->resp[3] = 0; cmd->resp[2] = 0; cmd->resp[1] = 0; cmd->resp[0] = READ4(sc, SDMMC_RESP0); } } } static void dwmmc_tasklet(struct dwmmc_softc *sc) { struct mmc_command *cmd; cmd = sc->curcmd; if (cmd == NULL) return; if (!sc->cmd_done) return; if (cmd->error != MMC_ERR_NONE || !cmd->data) { dwmmc_next_operation(sc); } else if (cmd->data && sc->dto_rcvd) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) { if (sc->acd_rcvd) dwmmc_next_operation(sc); } else { dwmmc_next_operation(sc); } } } static void dwmmc_intr(void *arg) { struct mmc_command *cmd; struct dwmmc_softc *sc; uint32_t reg; sc = arg; DWMMC_LOCK(sc); cmd = sc->curcmd; /* First handle SDMMC controller interrupts */ reg = READ4(sc, SDMMC_MINTSTS); if (reg) { dprintf("%s 0x%08x\n", __func__, reg); if (reg & DWMMC_CMD_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); dprintf("cmd err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_TIMEOUT; } if (reg & DWMMC_DATA_ERR_FLAGS) { WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); dprintf("data err 0x%08x cmd 0x%08x\n", reg, cmd->opcode); cmd->error = MMC_ERR_FAILED; if (!sc->use_pio) { dma_done(sc, cmd); dma_stop(sc); } } if (reg & SDMMC_INTMASK_CMD_DONE) { dwmmc_cmd_done(sc); sc->cmd_done = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); } if (reg & SDMMC_INTMASK_ACD) { sc->acd_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); } if (reg & SDMMC_INTMASK_DTO) { sc->dto_rcvd = 1; WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); } if (reg & SDMMC_INTMASK_CD) { /* XXX: Handle card detect */ WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); } } if (sc->use_pio) { if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { pio_read(sc, cmd); } if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { pio_write(sc, cmd); } } else { /* Now handle DMA interrupts */ reg = READ4(sc, SDMMC_IDSTS); if (reg) { dprintf("dma intr 0x%08x\n", reg); if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)); WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); dma_done(sc, cmd); } } } dwmmc_tasklet(sc); DWMMC_UNLOCK(sc); } static int parse_fdt(struct dwmmc_softc *sc) { pcell_t dts_value[3]; phandle_t node; int len; if ((node = ofw_bus_get_node(sc->dev)) == -1) return (ENXIO); /* fifo-depth */ if ((len = OF_getproplen(node, "fifo-depth")) > 0) { OF_getencprop(node, "fifo-depth", dts_value, len); sc->fifo_depth = dts_value[0]; } /* num-slots */ sc->num_slots = 1; if ((len = OF_getproplen(node, "num-slots")) > 0) { OF_getencprop(node, "num-slots", dts_value, len); sc->num_slots = dts_value[0]; } /* * We need some platform-specific code to know * what the clock is supplied for our device. * For now rely on the value specified in FDT. */ if (sc->bus_hz == 0) { if ((len = OF_getproplen(node, "bus-frequency")) <= 0) return (ENXIO); OF_getencprop(node, "bus-frequency", dts_value, len); sc->bus_hz = dts_value[0]; } /* * Platform-specific stuff * XXX: Move to separate file */ if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) return (0); if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) return (ENXIO); OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); return (0); } static int dwmmc_probe(device_t dev) { uintptr_t hwtype; if (!ofw_bus_status_okay(dev)) return (ENXIO); hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (hwtype == HWTYPE_NONE) return (ENXIO); device_set_desc(dev, "Synopsys DesignWare Mobile " "Storage Host Controller"); return (BUS_PROBE_DEFAULT); } int dwmmc_attach(device_t dev) { struct dwmmc_softc *sc; int error; int slot; sc = device_get_softc(dev); sc->dev = dev; if (sc->hwtype == HWTYPE_NONE) { sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; } /* Why not to use Auto Stop? It save a hundred of irq per second */ sc->use_auto_stop = 1; error = parse_fdt(sc); if (error != 0) { device_printf(dev, "Can't get FDT property.\n"); return (ENXIO); } DWMMC_LOCK_INIT(sc); if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, dwmmc_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); return (ENXIO); } device_printf(dev, "Hardware version ID is %04x\n", READ4(sc, SDMMC_VERID) & 0xffff); if (sc->desc_count == 0) sc->desc_count = DESC_MAX; if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { sc->use_pio = 1; sc->pwren_inverted = 1; } else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { WRITE4(sc, EMMCP_MPSBEGIN0, 0); WRITE4(sc, EMMCP_SEND0, 0); WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | MPSCTRL_SECURE_WRITE_BIT | MPSCTRL_NON_SECURE_READ_BIT | MPSCTRL_NON_SECURE_WRITE_BIT | MPSCTRL_VALID)); } /* XXX: we support operation for slot index 0 only */ slot = 0; if (sc->pwren_inverted) { WRITE4(sc, SDMMC_PWREN, (0 << slot)); } else { WRITE4(sc, SDMMC_PWREN, (1 << slot)); } /* Reset all */ if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET))) return (ENXIO); dwmmc_setup_bus(sc, sc->host.f_min); if (sc->fifo_depth == 0) { sc->fifo_depth = 1 + ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); device_printf(dev, "No fifo-depth, using FIFOTH %x\n", sc->fifo_depth); } if (!sc->use_pio) { if (dma_setup(sc)) return (ENXIO); /* Install desc base */ WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); /* Enable DMA interrupts */ WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | SDMMC_IDINTEN_RI | SDMMC_IDINTEN_TI)); } /* Clear and disable interrups for a while */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, 0); /* Maximum timeout */ WRITE4(sc, SDMMC_TMOUT, 0xffffffff); /* Enable interrupts */ WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DTO | SDMMC_INTMASK_ACD | SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR | DWMMC_ERR_FLAGS | SDMMC_INTMASK_CD)); WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); sc->host.f_min = 400000; sc->host.f_max = min(200000000, sc->bus_hz); sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = MMC_CAP_4_BIT_DATA; device_add_child(dev, "mmc", -1); return (bus_generic_attach(dev)); } static int dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) { int tout; int div; if (freq == 0) { WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed update clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } WRITE4(sc, SDMMC_CLKENA, 0); WRITE4(sc, SDMMC_CLKSRC, 0); div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; WRITE4(sc, SDMMC_CLKDIV, div); WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to update clk"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); tout = 1000; do { if (tout-- < 0) { device_printf(sc->dev, "Failed to enable clk\n"); return (1); } } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); return (0); } static int dwmmc_update_ios(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; struct mmc_ios *ios; sc = device_get_softc(brdev); ios = &sc->host.ios; dprintf("Setting up clk %u bus_width %d\n", ios->clock, ios->bus_width); dwmmc_setup_bus(sc, ios->clock); if (ios->bus_width == bus_width_8) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); else if (ios->bus_width == bus_width_4) WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); else WRITE4(sc, SDMMC_CTYPE, 0); if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { /* XXX: take care about DDR or SDR use here */ WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); } /* * XXX: take care about DDR bit * * reg = READ4(sc, SDMMC_UHS_REG); * reg |= (SDMMC_UHS_REG_DDR); * WRITE4(sc, SDMMC_UHS_REG, reg); */ return (0); } static int dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; data = cmd->data; if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->buf_tag, sc->buf_map); return (0); } static int dma_stop(struct dwmmc_softc *sc) { int reg; reg = READ4(sc, SDMMC_CTRL); reg &= ~(SDMMC_CTRL_USE_IDMAC); reg |= (SDMMC_CTRL_DMA_RESET); WRITE4(sc, SDMMC_CTRL, reg); reg = READ4(sc, SDMMC_BMOD); reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); reg |= (SDMMC_BMOD_SWR); WRITE4(sc, SDMMC_BMOD, reg); return (0); } static int dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int len; int err; int reg; data = cmd->data; len = data->len; reg = READ4(sc, SDMMC_INTMASK); reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); WRITE4(sc, SDMMC_INTMASK, reg); err = bus_dmamap_load(sc->buf_tag, sc->buf_map, data->data, data->len, dwmmc_ring_setup, sc, BUS_DMA_NOWAIT); if (err != 0) panic("dmamap_load failed\n"); if (data->flags & MMC_DATA_WRITE) bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREWRITE); else bus_dmamap_sync(sc->buf_tag, sc->buf_map, BUS_DMASYNC_PREREAD); reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); reg = READ4(sc, SDMMC_CTRL); reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); WRITE4(sc, SDMMC_CTRL, reg); wmb(); reg = READ4(sc, SDMMC_BMOD); reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); WRITE4(sc, SDMMC_BMOD, reg); /* Start */ WRITE4(sc, SDMMC_PLDMND, 1); return (0); } static int pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; int reg; data = cmd->data; data->xfer_len = 0; reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; WRITE4(sc, SDMMC_FIFOTH, reg); wmb(); return (0); } static void pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_READ) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_EMPTY) break; *p++ = READ4(sc, SDMMC_DATA); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); } static void pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t *p, status; if (cmd == NULL || cmd->data == NULL) return; data = cmd->data; if ((data->flags & MMC_DATA_WRITE) == 0) return; KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); p = (uint32_t *)data->data + (data->xfer_len >> 2); while (data->xfer_len < data->len) { status = READ4(sc, SDMMC_STATUS); if (status & SDMMC_STATUS_FIFO_FULL) break; WRITE4(sc, SDMMC_DATA, *p++); data->xfer_len += 4; } WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); } static void dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) { struct mmc_data *data; uint32_t blksz; uint32_t cmdr; sc->curcmd = cmd; data = cmd->data; if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) dwmmc_setup_bus(sc, sc->host.ios.clock); /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; /* Begin setting up command register. */ cmdr = cmd->opcode; dprintf("cmd->opcode 0x%08x\n", cmd->opcode); if (cmd->opcode == MMC_STOP_TRANSMISSION || cmd->opcode == MMC_GO_IDLE_STATE || cmd->opcode == MMC_GO_INACTIVE_STATE) cmdr |= SDMMC_CMD_STOP_ABORT; else if (cmd->opcode != MMC_SEND_STATUS && data) cmdr |= SDMMC_CMD_WAIT_PRVDATA; /* Set up response handling. */ if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136) cmdr |= SDMMC_CMD_RESP_LONG; } if (cmd->flags & MMC_RSP_CRC) cmdr |= SDMMC_CMD_RESP_CRC; /* * XXX: Not all platforms want this. */ cmdr |= SDMMC_CMD_USE_HOLD_REG; if ((sc->flags & CARD_INIT_DONE) == 0) { sc->flags |= (CARD_INIT_DONE); cmdr |= SDMMC_CMD_SEND_INIT; } if (data) { if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && sc->use_auto_stop) cmdr |= SDMMC_CMD_SEND_ASTOP; cmdr |= SDMMC_CMD_DATA_EXP; if (data->flags & MMC_DATA_STREAM) cmdr |= SDMMC_CMD_MODE_STREAM; if (data->flags & MMC_DATA_WRITE) cmdr |= SDMMC_CMD_DATA_WRITE; WRITE4(sc, SDMMC_TMOUT, 0xffffffff); WRITE4(sc, SDMMC_BYTCNT, data->len); blksz = (data->len < MMC_SECTOR_SIZE) ? \ data->len : MMC_SECTOR_SIZE; WRITE4(sc, SDMMC_BLKSIZ, blksz); if (sc->use_pio) { pio_prepare(sc, cmd); } else { dma_prepare(sc, cmd); } wmb(); } dprintf("cmdr 0x%08x\n", cmdr); WRITE4(sc, SDMMC_CMDARG, cmd->arg); wmb(); WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); }; static void dwmmc_next_operation(struct dwmmc_softc *sc) { struct mmc_request *req; req = sc->req; if (req == NULL) return; sc->acd_rcvd = 0; sc->dto_rcvd = 0; sc->cmd_done = 0; /* * XXX: Wait until card is still busy. * We do need this to prevent data timeouts, * mostly caused by multi-block write command * followed by single-read. */ while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) continue; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; dwmmc_start_cmd(sc, req->cmd); return; } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { sc->flags &= ~PENDING_STOP; dwmmc_start_cmd(sc, req->stop); return; } sc->req = NULL; sc->curcmd = NULL; req->done(req); } static int dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); dprintf("%s\n", __func__); DWMMC_LOCK(sc); if (sc->req != NULL) { DWMMC_UNLOCK(sc); return (EBUSY); } sc->req = req; sc->flags |= PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; dwmmc_next_operation(sc); DWMMC_UNLOCK(sc); return (0); } static int dwmmc_get_ro(device_t brdev, device_t reqdev) { dprintf("%s\n", __func__); return (0); } static int dwmmc_acquire_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); sc->bus_busy++; DWMMC_UNLOCK(sc); return (0); } static int dwmmc_release_host(device_t brdev, device_t reqdev) { struct dwmmc_softc *sc; sc = device_get_softc(brdev); DWMMC_LOCK(sc); sc->bus_busy--; wakeup(sc); DWMMC_UNLOCK(sc); return (0); } static int dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = sc->desc_count; } return (0); } static int dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct dwmmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static device_method_t dwmmc_methods[] = { DEVMETHOD(device_probe, dwmmc_probe), DEVMETHOD(device_attach, dwmmc_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), DEVMETHOD(mmcbr_request, dwmmc_request), DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), DEVMETHOD(mmcbr_release_host, dwmmc_release_host), DEVMETHOD_END }; driver_t dwmmc_driver = { "dwmmc", dwmmc_methods, sizeof(struct dwmmc_softc), }; static devclass_t dwmmc_devclass; DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0); DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, 0, 0); DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL); MODULE_DEPEND(dwmmc, mmc, 1, 1, 1); diff --git a/sys/dev/ncr/ncr.c b/sys/dev/ncr/ncr.c index 7a1c1d2bd2b8..8f02ef02a865 100644 --- a/sys/dev/ncr/ncr.c +++ b/sys/dev/ncr/ncr.c @@ -1,7119 +1,7119 @@ /************************************************************************** ** ** ** Device driver for the NCR 53C8XX PCI-SCSI-Controller Family. ** **------------------------------------------------------------------------- ** ** Written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier ** Stefan Esser ** **------------------------------------------------------------------------- */ /*- ** Copyright (c) 1994 Wolfgang Stanglmeier. All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** *************************************************************************** */ #include __FBSDID("$FreeBSD$"); #define NCR_GETCC_WITHMSG #if defined (__FreeBSD__) && defined(_KERNEL) #include "opt_ncr.h" #endif /*========================================================== ** ** Configuration and Debugging ** ** May be overwritten in ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* SCSI_NCR_MYADDR */ /* ** The default synchronous period factor ** (0=asynchronous) ** If maximum synchronous frequency is defined, use it instead. */ #ifndef SCSI_NCR_MAX_SYNC #ifndef SCSI_NCR_DFLT_SYNC #define SCSI_NCR_DFLT_SYNC (12) #endif /* SCSI_NCR_DFLT_SYNC */ #else #if SCSI_NCR_MAX_SYNC == 0 #define SCSI_NCR_DFLT_SYNC 0 #else #define SCSI_NCR_DFLT_SYNC (250000 / SCSI_NCR_MAX_SYNC) #endif #endif /* ** The minimal asynchronous pre-scaler period (ns) ** Shall be 40. */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* SCSI_NCR_MIN_ASYNC */ /* ** The maximal bus with (in log2 byte) ** (0=8 bit, 1=16 bit) */ #ifndef SCSI_NCR_MAX_WIDE #define SCSI_NCR_MAX_WIDE (1) #endif /* SCSI_NCR_MAX_WIDE */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 7, meaning targets #0..#6. ** #7 .. is myself. */ #define MAX_TARGET (16) /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifndef MAX_LUN #define MAX_LUN (8) #endif /* MAX_LUN */ /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. */ #define MAX_START (256) /* ** The maximum number of segments a transfer is split into. */ #define MAX_SCATTER (33) /* ** The maximum transfer length (should be >= 64k). ** MUST NOT be greater than (MAX_SCATTER-1) * PAGE_SIZE. */ #define MAX_SIZE ((MAX_SCATTER-1) * (long) PAGE_SIZE) /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /*========================================================== ** ** Include files ** **========================================================== */ #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_FREEZE (0x0800) #define DEBUG_RESTART (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG #define DEBUG_FLAGS ncr_debug #else /* SCSI_NCR_DEBUG */ #define SCSI_NCR_DEBUG 0 #define DEBUG_FLAGS 0 #endif /* SCSI_NCR_DEBUG */ /*========================================================== ** ** assert () ** **========================================================== ** ** modified copy from 386bsd:/usr/include/sys/assert.h ** **---------------------------------------------------------- */ #ifdef DIAGNOSTIC #define assert(expression) { \ KASSERT(expression, ("%s", #expression)); \ } #else #define assert(expression) { \ if (!(expression)) { \ (void)printf("assertion \"%s\" failed: " \ "file \"%s\", line %d\n", \ #expression, __FILE__, __LINE__); \ } \ } #endif /*========================================================== ** ** Access to the controller chip. ** **========================================================== */ #define INB(r) bus_read_1(np->reg_res, offsetof(struct ncr_reg, r)) #define INW(r) bus_read_2(np->reg_res, offsetof(struct ncr_reg, r)) #define INL(r) bus_read_4(np->reg_res, offsetof(struct ncr_reg, r)) #define OUTB(r, val) bus_write_1(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTW(r, val) bus_write_2(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL(r, val) bus_write_4(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL_OFF(o, val) bus_write_4(np->reg_res, o, val) #define INB_OFF(o) bus_read_1(np->reg_res, o) #define INW_OFF(o) bus_read_2(np->reg_res, o) #define INL_OFF(o) bus_read_4(np->reg_res, o) #define READSCRIPT_OFF(base, off) \ (base ? *((volatile u_int32_t *)((volatile char *)base + (off))) : \ bus_read_4(np->sram_res, off)) #define WRITESCRIPT_OFF(base, off, val) \ do { \ if (base) \ *((volatile u_int32_t *) \ ((volatile char *)base + (off))) = (val); \ else \ bus_write_4(np->sram_res, off, val); \ } while (0) #define READSCRIPT(r) \ READSCRIPT_OFF(np->script, offsetof(struct script, r)) #define WRITESCRIPT(r, val) \ WRITESCRIPT_OFF(np->script, offsetof(struct script, r), val) /* ** Set bit field ON, OFF */ #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_COMPLETE (4) #define HS_SEL_TIMEOUT (5) /* Selection timeout */ #define HS_RESET (6) /* SCSI reset */ #define HS_ABORTED (7) /* Transfer aborted */ #define HS_TIMEOUT (8) /* Software timeout */ #define HS_FAIL (9) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10) /* Unexpected disconnect */ #define HS_STALL (11) /* QUEUE FULL or BUSY */ #define HS_DONEMASK (0xfc) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_SENSE_RESTART (1) #define SIR_SENSE_FAILED (2) #define SIR_STALL_RESTART (3) #define SIR_STALL_QUEUE (4) #define SIR_NEGO_SYNC (5) #define SIR_NEGO_WIDE (6) #define SIR_NEGO_FAILED (7) #define SIR_NEGO_PROTO (8) #define SIR_REJECT_RECEIVED (9) #define SIR_REJECT_SENT (10) #define SIR_IGN_RESIDUE (11) #define SIR_MISSING_SAVE (12) #define SIR_MAX (12) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct nccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct nccb. ** **========================================================== */ #define NS_SYNC (1) #define NS_WIDE (2) /*========================================================== ** ** XXX These are no longer used. Remove once the ** script is updated. ** "Special features" of targets. ** quirks field of struct tcb. ** actualquirks field of struct nccb. ** **========================================================== */ #define QUIRK_AUTOSAVE (0x01) #define QUIRK_NOMSG (0x02) #define QUIRK_NOSYNC (0x10) #define QUIRK_NOWIDE16 (0x20) #define QUIRK_NOTAGS (0x40) #define QUIRK_UPDATE (0x80) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) #define MAX_TAGS (32) /* hard limit */ /*========================================================== ** ** OS dependencies. ** **========================================================== */ #define PRINT_ADDR(ccb) xpt_print_path((ccb)->ccb_h.path) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ struct tcb; struct lcb; struct nccb; struct ncb; struct script; typedef struct ncb * ncb_p; typedef struct tcb * tcb_p; typedef struct lcb * lcb_p; typedef struct nccb * nccb_p; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UF_TRACE (0x01) /*--------------------------------------- ** ** Timestamps for profiling ** **--------------------------------------- */ /* Type of the kernel variable `ticks'. XXX should be declared with the var. */ typedef int ticks_t; struct tstamp { ticks_t start; ticks_t end; ticks_t select; ticks_t command; ticks_t data; ticks_t status; ticks_t disconnect; }; /* ** profiling data (per device) */ struct profile { u_long num_trans; u_long num_bytes; u_long num_disc; u_long num_break; u_long num_int; u_long num_fly; u_long ms_setup; u_long ms_data; u_long ms_disc; u_long ms_post; }; /*========================================================== ** ** Declaration of structs: target control block ** **========================================================== */ #define NCR_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define NCR_TRANS_ACTIVE 0x03 /* Assume this is the active target */ #define NCR_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define NCR_TRANS_USER 0x08 /* Modify user negotiation settings */ struct ncr_transinfo { u_int8_t width; u_int8_t period; u_int8_t offset; }; struct ncr_target_tinfo { /* Hardware version of our sync settings */ u_int8_t disc_tag; #define NCR_CUR_DISCENB 0x01 #define NCR_CUR_TAGENB 0x02 #define NCR_USR_DISCENB 0x04 #define NCR_USR_TAGENB 0x08 u_int8_t sval; struct ncr_transinfo current; struct ncr_transinfo goal; struct ncr_transinfo user; /* Hardware version of our wide settings */ u_int8_t wval; }; struct tcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the encoded target number ** with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#) ** @(next tcb) */ struct link jump_tcb; /* ** load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1); ** @(sval field of this tcb) ** @(sxfer register) ** SCR_COPY (1); ** @(wval field of this tcb) ** @(scntl3 register) */ ncrcmd getscr[6]; /* ** if next message is "identify" ** then load the message to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_lun; /* ** now look for the right lun. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_lcb; /* ** pointer to interrupted getcc nccb */ nccb_p hold_cp; /* ** pointer to nccb used for negotiating. ** Avoid to start a nego for all queued commands ** when tagged command queuing is enabled. */ nccb_p nego_cp; /* ** statistical data */ u_long transfers; u_long bytes; /* ** user settable limits for sync transfer ** and tagged commands. */ struct ncr_target_tinfo tinfo; /* ** the lcb's of this tcb */ lcb_p lp[MAX_LUN]; }; /*========================================================== ** ** Declaration of structs: lun control block ** **========================================================== */ struct lcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#) ** @(next lcb of this target) */ struct link jump_lcb; /* ** if next message is "simple tag", ** then load the tag to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_tag; /* ** now look for the right nccb. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_nccb; /* ** start of the nccb chain */ nccb_p next_nccb; /* ** Control of tagged queueing */ u_char reqnccbs; u_char reqlink; u_char actlink; u_char usetags; u_char lasttag; }; /*========================================================== ** ** Declaration of structs: COMMAND control block ** **========================================================== ** ** This substructure is copied from the nccb to a ** global address after selection (or reselection) ** and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **---------------------------------------------------------- */ struct head { /* ** Execution of a nccb starts at this point. ** It's a jump to the "SELECT" label ** of the script. ** ** After successful selection the script ** processor overwrites it with a jump to ** the IDLE label of the script. */ struct link launch; /* ** Saved data pointer. ** Points to the position in the script ** responsible for the actual transfer ** of data. ** It's written after reception of a ** "SAVE_DATA_POINTER" message. ** The goalpointer points after ** the last transfer command. */ u_int32_t savep; u_int32_t lastp; u_int32_t goalp; /* ** The virtual address of the nccb ** containing this header. */ nccb_p cp; /* ** space for some timestamps to gather ** profiling data about devices and this driver. */ struct tstamp stamp; /* ** status fields. */ u_char status[8]; }; /* ** The status bytes are used by the host and the script processor. ** ** The first four byte are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The last four bytes are used inside the script by "COPY" commands. ** Because source and destination must have the same alignment ** in a longword, the fields HAVE to be at the choosen offsets. ** xerr_st (4) 0 (0x34) scratcha ** sync_st (5) 1 (0x05) sxfer ** wide_st (7) 3 (0x03) scntl3 */ /* ** First four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define PS_REG scr3 /* ** First four bytes (host) */ #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define s_status phys.header.status[2] #define parity_status phys.header.status[3] /* ** Last four bytes (script) */ #define xerr_st header.status[4] /* MUST be ==0 mod 4 */ #define sync_st header.status[5] /* MUST be ==1 mod 4 */ #define nego_st header.status[6] #define wide_st header.status[7] /* MUST be ==3 mod 4 */ /* ** Last four bytes (host) */ #define xerr_status phys.xerr_st #define sync_status phys.sync_st #define nego_status phys.nego_st #define wide_status phys.wide_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. ** Has to be the first entry, ** because it's jumped to by the ** script processor */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove smsg2 ; struct scr_tblmove cmd ; struct scr_tblmove scmd ; struct scr_tblmove sense ; struct scr_tblmove data [MAX_SCATTER]; }; /*========================================================== ** ** Declaration of structs: Command control block. ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and then ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct nccb { /* ** This filler ensures that the global header is ** cache line size aligned. */ ncrcmd filler[4]; /* ** during reselection the ncr jumps to this point. ** If a "SIMPLE_TAG" message was received, ** then SFBR is set to the tag. ** else SFBR is set to 0 ** If looking for another tag, jump to the next nccb. ** ** JUMP IF (SFBR != #TAG#) ** @(next nccb of this lun) */ struct link jump_nccb; /* ** After execution of this call, the return address ** (in the TEMP register) points to the following ** data structure block. ** So copy it to the DSA register, and start ** processing of this data structure. ** ** CALL ** */ struct link call_tmp; /* ** This is the data structure which is ** to be executed by the script processor. */ struct dsb phys; /* ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. */ ncrcmd patch[8]; /* ** The general SCSI driver provides a ** pointer to a control block. */ union ccb *ccb; /* ** We prepare a message to be sent after selection, ** and a second one to be sent after getcc selection. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTM or WDTM message is appended. */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /* ** Lock this nccb. ** Flag is used while looking for a free nccb. */ u_long magic; /* ** Physical address of this instance of nccb */ u_long p_nccb; /* ** Completion time out for this job. ** It's set to time of start + allowed number of seconds. */ time_t tlimit; /* ** All nccbs of one hostadapter are chained. */ nccb_p link_nccb; /* ** All nccbs of one target/lun are chained. */ nccb_p next_nccb; /* ** Sense command */ u_char sensecmd[6]; /* ** Tag for this transfer. ** It's patched into jump_nccb. ** If it's not zero, a SIMPLE_TAG ** message is included in smsg. */ u_char tag; }; #define CCB_PHYS(cp,lbl) (cp->p_nccb + offsetof(struct nccb, lbl)) /*========================================================== ** ** Declaration of structs: NCR device descriptor ** **========================================================== */ struct ncb { /* ** The global header. ** Accessible to both the host and the ** script-processor. ** We assume it is cache line size aligned. */ struct head header; device_t dev; /*----------------------------------------------- ** Scripts .. **----------------------------------------------- ** ** During reselection the ncr jumps to this point. ** The SFBR register is loaded with the encoded target id. ** ** Jump to the first target. ** ** JUMP ** @(next tcb) */ struct link jump_tcb; /*----------------------------------------------- ** Configuration .. **----------------------------------------------- ** ** virtual and physical addresses ** of the 53c810 chip. */ int reg_rid; struct resource *reg_res; int sram_rid; struct resource *sram_res; struct resource *irq_res; void *irq_handle; /* ** Scripts instance virtual address. */ struct script *script; struct scripth *scripth; /* ** Scripts instance physical address. */ u_long p_script; u_long p_scripth; /* ** The SCSI address of the host adapter. */ u_char myaddr; /* ** timing parameters */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ u_long features; /* Chip features map */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char maxburst; /* log base 2 of dwords burst */ /* ** BIOS supplied PCI bus options */ u_char rv_scntl3; u_char rv_dcntl; u_char rv_dmode; u_char rv_ctest3; u_char rv_ctest4; u_char rv_ctest5; u_char rv_gpcntl; u_char rv_stest2; /*----------------------------------------------- ** CAM SIM information for this instance **----------------------------------------------- */ struct cam_sim *sim; struct cam_path *path; /*----------------------------------------------- ** Job control **----------------------------------------------- ** ** Commands from user */ struct usrcmd user; /* ** Target data */ struct tcb target[MAX_TARGET]; /* ** Start queue. */ u_int32_t squeue [MAX_START]; u_short squeueput; /* ** Timeout handler */ time_t heartbeat; u_short ticks; u_short latetime; time_t lasttime; struct callout timer; /*----------------------------------------------- ** Debug and profiling **----------------------------------------------- ** ** register dump */ struct ncr_reg regdump; time_t regtime; /* ** Profiling data */ struct profile profile; u_long disc_phys; u_long disc_ref; /* ** Head of list of all nccbs for this controller. */ nccb_p link_nccb; /* ** message buffers. ** Should be longword aligned, ** because they're written with a ** COPY script command. */ u_char msgout[8]; u_char msgin [8]; u_int32_t lastmsg; /* ** Buffer for STATUS_IN phase. */ u_char scratch; /* ** controller chip dependent maximal transfer width. */ u_char maxwide; struct mtx lock; #ifdef NCR_IOMAPPED /* ** address of the ncr control registers in io space */ pci_port_t port; #endif }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** Script fragments which are loaded into the on-board RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 7]; ncrcmd start0 [ 2]; ncrcmd start1 [ 3]; ncrcmd startpos [ 1]; ncrcmd trysel [ 8]; ncrcmd skip [ 8]; ncrcmd skip2 [ 3]; ncrcmd idle [ 2]; ncrcmd select [ 18]; ncrcmd prepare [ 4]; ncrcmd loadpos [ 14]; ncrcmd prepare2 [ 24]; ncrcmd setmsg [ 5]; ncrcmd clrack [ 2]; ncrcmd dispatch [ 33]; ncrcmd no_data [ 17]; ncrcmd checkatn [ 10]; ncrcmd command [ 15]; ncrcmd status [ 27]; ncrcmd msg_in [ 26]; ncrcmd msg_bad [ 6]; ncrcmd complete [ 13]; ncrcmd cleanup [ 12]; ncrcmd cleanup0 [ 9]; ncrcmd signal [ 12]; ncrcmd save_dp [ 5]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 12]; ncrcmd disconnect0 [ 5]; ncrcmd disconnect1 [ 23]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd badgetcc [ 6]; ncrcmd reselect [ 8]; ncrcmd reselect1 [ 8]; ncrcmd reselect2 [ 8]; ncrcmd resel_tmp [ 5]; ncrcmd resel_lun [ 18]; ncrcmd resel_tag [ 24]; ncrcmd data_in [MAX_SCATTER * 4 + 7]; ncrcmd data_out [MAX_SCATTER * 4 + 7]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*5+2]; ncrcmd msg_parity [ 6]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 32]; ncrcmd msg_extended [ 18]; ncrcmd msg_ext_2 [ 18]; ncrcmd msg_wdtr [ 27]; ncrcmd msg_ext_3 [ 18]; ncrcmd msg_sdtr [ 27]; ncrcmd msg_out_abort [ 10]; ncrcmd getcc [ 4]; ncrcmd getcc1 [ 5]; #ifdef NCR_GETCC_WITHMSG ncrcmd getcc2 [ 29]; #else ncrcmd getcc2 [ 14]; #endif ncrcmd getcc3 [ 6]; ncrcmd aborttag [ 4]; ncrcmd abort [ 22]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ #ifdef _KERNEL static nccb_p ncr_alloc_nccb(ncb_p np, u_long target, u_long lun); static void ncr_complete(ncb_p np, nccb_p cp); static int ncr_delta(int * from, int * to); static void ncr_exception(ncb_p np); static void ncr_free_nccb(ncb_p np, nccb_p cp); static void ncr_freeze_devq(ncb_p np, struct cam_path *path); static void ncr_selectclock(ncb_p np, u_char scntl3); static void ncr_getclock(ncb_p np, u_char multiplier); static nccb_p ncr_get_nccb(ncb_p np, u_long t,u_long l); #if 0 static u_int32_t ncr_info(int unit); #endif static void ncr_init(ncb_p np, char * msg, u_long code); static void ncr_intr(void *vnp); static void ncr_intr_locked(ncb_p np); static void ncr_int_ma(ncb_p np, u_char dstat); static void ncr_int_sir(ncb_p np); static void ncr_int_sto(ncb_p np); #if 0 static void ncr_min_phys(struct buf *bp); #endif static void ncr_poll(struct cam_sim *sim); static void ncb_profile(ncb_p np, nccb_p cp); static void ncr_script_copy_and_bind(ncb_p np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill(struct script * scr, struct scripth *scrh); static int ncr_scatter(struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen); static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync(ncb_p np, nccb_p cp,u_char scntl3,u_char sxfer, u_char period); static void ncr_setwide(ncb_p np, nccb_p cp, u_char wide, u_char ack); static int ncr_show_msg(u_char * msg); static int ncr_snooptest(ncb_p np); static void ncr_action(struct cam_sim *sim, union ccb *ccb); static void ncr_timeout(void *arg); static void ncr_wakeup(ncb_p np, u_long code); static int ncr_probe(device_t dev); static int ncr_attach(device_t dev); #endif /* _KERNEL */ /*========================================================== ** ** ** Global static data. ** ** **========================================================== */ #ifdef _KERNEL static int ncr_debug = SCSI_NCR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, ncr_debug, CTLFLAG_RW, &ncr_debug, 0, ""); static int ncr_cache; /* to be aligned _NOT_ static */ /*========================================================== ** ** ** Global static data: auto configure ** ** **========================================================== */ #define NCR_810_ID (0x00011000ul) #define NCR_815_ID (0x00041000ul) #define NCR_820_ID (0x00021000ul) #define NCR_825_ID (0x00031000ul) #define NCR_860_ID (0x00061000ul) #define NCR_875_ID (0x000f1000ul) #define NCR_875_ID2 (0x008f1000ul) #define NCR_885_ID (0x000d1000ul) #define NCR_895_ID (0x000c1000ul) #define NCR_896_ID (0x000b1000ul) #define NCR_895A_ID (0x00121000ul) #define NCR_1510D_ID (0x000a1000ul) /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_KVAR 0x70000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) #define KVAR(which) (RELOC_KVAR | (which)) #define KVAR_SECOND (0) #define KVAR_TICKS (1) #define KVAR_NCR_CACHE (2) #define SCRIPT_KVAR_FIRST (0) #define SCRIPT_KVAR_LAST (3) /* * Kernel variables referenced in the scripts. * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. */ static volatile void *script_kvars[] = { &time_second, &ticks, &ncr_cache }; static struct script script0 = { /*--------------------------< START >-----------------------*/ { /* ** Claim to be still alive ... */ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)), KVAR (KVAR_SECOND), NADDR (heartbeat), /* ** Make data structure address invalid. ** clear SIGP. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_FROM_REG (ctest2), 0, }/*-------------------------< START0 >----------------------*/,{ /* ** Hook for interrupted GetConditionCode. ** Will be patched to ... IFTRUE by ** the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_SENSE_RESTART, }/*-------------------------< START1 >----------------------*/,{ /* ** Hook for stalled start queue. ** Will be patched to IFTRUE by the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_STALL_RESTART, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< TRYSEL >----------------------*/,{ /* ** Now: ** DSA: Address of a Data Structure ** or Address of the IDLE-Label. ** ** TEMP: Address of a script, which tries to ** start the NEXT entry. ** ** Save the TEMP register into the SCRATCHA register. ** Then copy the DSA to TEMP and RETURN. ** This is kind of an indirect jump. ** (The script processor has NO stack, so the ** CALL is actually a jump and link, and the ** RETURN is an indirect jump.) ** ** If the slot was empty, DSA contains the address ** of the IDLE part of this script. The processor ** jumps to IDLE and waits for a reselect. ** It will wake up and try the same slot again ** after the SIGP bit becomes set by the host. ** ** If the slot was not empty, DSA contains ** the address of the phys-part of a nccb. ** The processor jumps to this address. ** phys starts with head, ** head starts with launch, ** so actually the processor jumps to ** the lauch part. ** If the entry is scheduled for execution, ** then launch contains a jump to SELECT. ** If it's not scheduled, it contains a jump to IDLE. */ SCR_COPY (4), RADDR (temp), RADDR (scratcha), SCR_COPY (4), RADDR (dsa), RADDR (temp), SCR_RETURN, 0 }/*-------------------------< SKIP >------------------------*/,{ /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), /* ** patch the launch field. ** should look like an idle process. */ SCR_COPY_F (4), RADDR (dsa), PADDR (skip2), SCR_COPY (8), PADDR (idle), }/*-------------------------< SKIP2 >-----------------------*/,{ 0, SCR_JUMP, PADDR(start), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. */ SCR_JUMP, PADDR(reselect), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, 0xff), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr completes the selection. ** Then it will execute the next statement. ** ** (4) There is a selection timeout. ** Then the ncr should interrupt the host and stop. ** Unfortunately, it seems to continue execution ** of the script. But it will fail with an ** IID-interrupt on the next WHEN. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the MSG_EXT_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), #ifdef undef /* XXX better fail than try to deal with this ... */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)), -16, #endif SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** Selection complete. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), }/*-------------------------< PREPARE >----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Mark this nccb as not scheduled. */ SCR_COPY (8), PADDR (idle), NADDR (header.launch), /* ** Set a time stamp for this selection */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.select), /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Load the synchronous mode register */ SCR_COPY (1), NADDR (sync_st), RADDR (sxfer), /* ** Load the wide mode and timing register */ SCR_COPY (1), NADDR (wide_st), RADDR (scntl3), /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_COPY (1), RADDR (scratcha), NADDR (msgin), /* ** Message in phase ? */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** Extended or reject message ? */ SCR_FROM_REG (sbdl), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), /* ** normal processing */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, }/*-----------------------< DISPATCH >----------------------*/,{ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** remove bogus output signals */ SCR_REG_REG (socl, SCR_AND, CACK|CATN), 0, SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)), 0, SCR_RETURN ^ IFTRUE (IF (SCR_DATA_IN)), 0, SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to tranfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< CHECKATN >--------------------*/,{ /* ** If AAP (bit 1 of scntl0 register) is set ** and a parity error is detected, ** the script processor asserts ATN. ** ** The target should switch to a MSG_OUT phase ** to get the message. */ SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)), PADDR (dispatch), /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 1), 0, /* ** Prepare a MSG_INITIATOR_DET_ERR message ** (initiator detected error). ** The target should retry the transfer. */ SCR_LOAD_REG (scratcha, MSG_INITIATOR_DET_ERR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMMAND >--------------------*/,{ /* ** If this is not a GETCC transfer ... */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), 28, /* ** ... set a timestamp ... */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.command), /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), SCR_JUMP, PADDR (dispatch), /* ** Send the GETCC command */ /*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, scmd), SCR_JUMP, PADDR (dispatch), }/*-------------------------< STATUS >--------------------*/,{ /* ** set the timestamp. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.status), /* ** If this is a GETCC transfer, */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (SCSI_STATUS_CHECK_COND)), 40, /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** Save status to scsi_status. ** Mark as complete. ** And wait for disconnect. */ SCR_TO_REG (SS_REG), 0, SCR_REG_REG (SS_REG, SCR_OR, SCSI_STATUS_SENSE), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), /* ** If it was no GETCC transfer, ** save the status to scsi_status. */ /*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), SCR_TO_REG (SS_REG), 0, /* ** if it was no check condition ... */ SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDR (checkatn), /* ** ... mark as complete. */ SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Parity was ok, handle this message. */ SCR_JUMP ^ IFTRUE (DATA (MSG_CMDCOMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (MSG_SAVEDATAPOINTER)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_RESTOREPOINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (MSG_NOOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (MSG_IGN_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MSG_MESSAGE_REJECT), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** If it's not the get condition code, ** copy TEMP register to LASTP in header. */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (SCSI_STATUS_SENSE, SCSI_STATUS_SENSE)), 12, SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /*>>>*/ /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to nccb ** or xxxxxxFF (no nccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (signal), /* ** dsa is valid. ** save the status registers */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the nccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, /* ** If command resulted in "check condition" ** status and is not yet completed, ** try to get the condition code. */ SCR_FROM_REG (HS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_FROM_REG (SS_REG), 0, SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDRH(getcc2), }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if status = queue full, ** reinsert in startqueue and stall queue. */ /*>>>*/ SCR_FROM_REG (SS_REG), 0, SCR_INT ^ IFTRUE (DATA (SCSI_STATUS_QUEUE_FULL)), SIR_STALL_QUEUE, /* ** And make the DSA register invalid. */ SCR_LOAD_REG (dsa, 0xff), /* invalid */ 0, /* ** if job completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... signal completion to the host */ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)), 0, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_JUMP, PADDR (clrack), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** If QUIRK_AUTOSAVE is set, ** do a "save pointer" operation. */ SCR_FROM_REG (QU_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)), 12, /* ** like SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /*>>>*/ /* ** Check if temp==savep or temp==goalp: ** if not, log a missing save pointer message. ** In fact, it's a comparison mod 256. ** ** Hmmm, I hadn't thought that I would be urged to ** write this kind of ugly self modifying code. ** ** It's unbelievable, but the ncr53c8xx isn't able ** to subtract one register from another. */ SCR_FROM_REG (temp), 0, /* ** You are not expected to understand this .. ** ** CAUTION: only little endian architectures supported! XXX */ SCR_COPY_F (1), NADDR (header.savep), PADDR (disconnect0), }/*-------------------------< DISCONNECT0 >--------------*/,{ /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)), 20, /* ** neither this */ SCR_COPY_F (1), NADDR (header.goalp), PADDR (disconnect1), }/*-------------------------< DISCONNECT1 >--------------*/,{ SCR_INT ^ IFFALSE (DATA (1)), SIR_MISSING_SAVE, /*>>>*/ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Profiling: ** Set a time stamp, ** and count the disconnects. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.disconnect), SCR_COPY (4), NADDR (disc_phys), RADDR (temp), SCR_REG_REG (temp, SCR_ADD, 0x01), 0, SCR_COPY (4), RADDR (temp), NADDR (disc_phys), /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (MSG_ABORT)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*------------------------< BADGETCC >---------------------*/,{ /* ** If SIGP was set, clear it and try again. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDRH (getcc2), SCR_INT, SIR_SENSE_FAILED, }/*-------------------------< RESELECT >--------------------*/,{ /* ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(reselect2), }/*-------------------------< RESELECT1 >--------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct nccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESELECT2 >-------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** If it's not connected :( ** -> interrupted by SIGP bit. ** Jump to start. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDR (start), SCR_JUMP, PADDR (reselect), }/*-------------------------< RESEL_TMP >-------------------*/,{ /* ** The return address in TEMP ** is in fact the data structure address, ** so copy it to the DSA register. */ SCR_COPY (4), RADDR (temp), RADDR (dsa), SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 48, /* ** message phase ** It's not a sony, it's a trick: ** read the data without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (MSG_IDENTIFYFLAG, 0x98)), 32, /* ** It WAS an Identify message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Mask out the lun. */ SCR_REG_REG (sfbr, SCR_AND, 0x07), 0, SCR_RETURN, 0, /* ** No message phase or no IDENTIFY message: ** return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** come back to this point ** to get a SIMPLE_TAG message ** Wait for a MSG_IN phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 64, /* ** message phase ** It's a trick - read the data ** without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (MSG_SIMPLE_Q_TAG)), 48, /* ** It WAS a SIMPLE_TAG message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Wait for the second byte (the tag) */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 24, /* ** Get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK|SCR_CARRY), 0, SCR_RETURN, 0, /* ** No message phase or no SIMPLE_TAG message ** or no second byte: return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_SET (SCR_CARRY), 0, SCR_RETURN, 0, }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_IN, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (checkatn), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (checkatn), ** SCR_JUMP, ** PADDR (no_data), */ 0 }/*-------------------------< DATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_OUT, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (dispatch), ** SCR_JUMP, ** PADDR (no_data), ** **--------------------------------------------------------- */ (u_long)0 }/*--------------------------------------------------------*/ }; static struct scripth scripth0 = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Load an entry of the start queue into dsa ** and try to start it by jumping to TRYSEL. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i=========== ** || SCR_COPY (4), ** || NADDR (squeue[i]), ** || RADDR (dsa), ** || SCR_CALL, ** || PADDR (trysel), ** ##========================================== ** ** SCR_JUMP, ** PADDRH(tryloop), ** **----------------------------------------------------------- */ 0 }/*-------------------------< MSG_PARITY >---------------*/,{ /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** send a "message parity error" message. */ SCR_LOAD_REG (scratcha, MSG_PARITY_ERROR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< MSG_MESSAGE_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** else make host log this message */ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ /*>>>*/ SCR_FROM_REG (scratcha), 0, /*>>>*/ SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< GETCC >-----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can modify it. ** ** We patch the address part of a COPY command ** with the address of the dsa register ... */ SCR_COPY_F (4), RADDR (dsa), PADDRH (getcc1), /* ** ... then we do the actual copy. */ SCR_COPY (sizeof (struct head)), }/*-------------------------< GETCC1 >----------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< GETCC2 >----------------------*/,{ /* ** Get the condition code from a target. ** ** DSA points to a data structure. ** Set TEMP to the script location ** that receives the condition code. ** ** Because there is no script command ** to load a longword into a register, ** we use a CALL command. */ /*<<<*/ SCR_CALLR, 24, /* ** Get the condition code. */ SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), /* ** No data phase may follow! */ SCR_CALL, PADDR (checkatn), SCR_JUMP, PADDR (no_data), /*>>>*/ /* ** The CALL jumps to this point. ** Prepare for a RESTORE_POINTER message. ** Save the TEMP register into the saved pointer. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /* ** Load scratcha, because in case of a selection timeout, ** the host will expect a new value for startpos in ** the scratcha register. */ SCR_COPY (4), PADDR (startpos), RADDR (scratcha), #ifdef NCR_GETCC_WITHMSG /* ** If QUIRK_NOMSG is set, select without ATN. ** and don't send a message. */ SCR_FROM_REG (QU_REG), 0, SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)), PADDRH(getcc3), /* ** Then try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Send the IDENTIFY message. ** In case of short transfer, remove ATN. */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg2), SCR_CLR (SCR_ATN), 0, /* ** save the first byte of the message. */ SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (prepare2), #endif }/*-------------------------< GETCC3 >----------------------*/,{ /* ** Try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. ** ** Silly target won't accept a message. ** Select without ATN. */ SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Force error if selection timeout */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** don't negotiate. */ SCR_JUMP, PADDR (prepare2), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a bad reselection. ** Set the message to ABORT vs. ABORT_TAG */ SCR_LOAD_REG (scratcha, MSG_ABORT_TAG), 0, SCR_JUMPR ^ IFFALSE (CARRYSET), 8, }/*-------------------------< ABORT >----------------------*/,{ SCR_LOAD_REG (scratcha, MSG_ABORT), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), KVAR (KVAR_NCR_CACHE), /* ** Read back the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ static void ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; itryloop + sizeof (scrh->tryloop)); p = scr->data_in; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_in + sizeof (scr->data_in)); p = scr->data_out; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs, offset; start = src; end = src + len/4; offset = 0; while (src < end) { opcode = *src++; WRITESCRIPT_OFF(dst, offset, opcode); offset += 4; /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { device_printf(np->dev, "ERROR0 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; tmp2 = src[1]; if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; if ((tmp1 ^ tmp2) & 3) { device_printf(np->dev, "ERROR1 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features&FE_PFEN)) WRITESCRIPT_OFF(dst, offset - 4, (opcode & ~SCR_NO_FLUSH)); break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + rman_get_start(np->reg_res); break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + vtophys(np); break; case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } /* FALLTHROUGH */ default: panic("ncr_script_copy_and_bind: weird relocation %x @ %d\n", old, (int)(src - start)); break; } WRITESCRIPT_OFF(dst, offset, new); offset += 4; } } else { WRITESCRIPT_OFF(dst, offset, *src++); offset += 4; } } } /*========================================================== ** ** ** Auto configuration. ** ** **========================================================== */ #if 0 /*---------------------------------------------------------- ** ** Reduce the transfer length to the max value ** we can transfer safely. ** ** Reading a block greater then MAX_SIZE from the ** raw (character) device exercises a memory leak ** in the vm subsystem. This is common to ALL devices. ** We have submitted a description of this bug to ** . ** It should be fixed in the current release. ** **---------------------------------------------------------- */ void ncr_min_phys (struct buf *bp) { if ((unsigned long)bp->b_bcount > MAX_SIZE) bp->b_bcount = MAX_SIZE; } #endif #if 0 /*---------------------------------------------------------- ** ** Maximal number of outstanding requests per target. ** **---------------------------------------------------------- */ u_int32_t ncr_info (int unit) { return (1); /* may be changed later */ } #endif /*---------------------------------------------------------- ** ** NCR chip devices table and chip look up function. ** Features bit are defined in ncrreg.h. Is it the ** right place? ** **---------------------------------------------------------- */ typedef struct { unsigned long device_id; unsigned short minrevid; char *name; unsigned char maxburst; unsigned char maxoffs; unsigned char clock_divn; unsigned int features; } ncr_chip; static ncr_chip ncr_chip_table[] = { {NCR_810_ID, 0x00, "ncr 53c810 fast10 scsi", 4, 8, 4, FE_ERL} , {NCR_810_ID, 0x10, "ncr 53c810a fast10 scsi", 4, 8, 4, FE_ERL|FE_LDSTR|FE_PFEN|FE_BOF} , {NCR_815_ID, 0x00, "ncr 53c815 fast10 scsi", 4, 8, 4, FE_ERL|FE_BOF} , {NCR_820_ID, 0x00, "ncr 53c820 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL} , {NCR_825_ID, 0x00, "ncr 53c825 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL|FE_BOF} , {NCR_825_ID, 0x10, "ncr 53c825a fast10 wide scsi", 7, 8, 4, FE_WIDE|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_860_ID, 0x00, "ncr 53c860 fast20 scsi", 4, 8, 5, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_LDSTR|FE_PFEN} , {NCR_875_ID, 0x00, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID, 0x02, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID2, 0x00, "ncr 53c875j fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_885_ID, 0x00, "ncr 53c885 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895_ID, 0x00, "ncr 53c895 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_896_ID, 0x00, "ncr 53c896 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895A_ID, 0x00, "ncr 53c895a fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_1510D_ID, 0x00, "ncr 53c1510d fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} }; static int ncr_chip_lookup(u_long device_id, u_char revision_id) { int i, found; found = -1; for (i = 0; i < nitems(ncr_chip_table); i++) { if (device_id == ncr_chip_table[i].device_id && ncr_chip_table[i].minrevid <= revision_id) { if (found < 0 || ncr_chip_table[found].minrevid < ncr_chip_table[i].minrevid) { found = i; } } } return found; } /*---------------------------------------------------------- ** ** Probe the hostadapter. ** **---------------------------------------------------------- */ static int ncr_probe (device_t dev) { int i; i = ncr_chip_lookup(pci_get_devid(dev), pci_get_revid(dev)); if (i >= 0) { device_set_desc(dev, ncr_chip_table[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static void ncr_init_burst(ncb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /*========================================================== ** ** ** Auto configuration: attach and init a host adapter. ** ** **========================================================== */ static int ncr_attach (device_t dev) { ncb_p np = (struct ncb*) device_get_softc(dev); u_char rev = 0; u_long period; int i, rid; u_int8_t usrsync; u_int8_t usrwide; struct cam_devq *devq; /* ** allocate and initialize structures. */ np->dev = dev; mtx_init(&np->lock, "ncr", NULL, MTX_DEF); callout_init_mtx(&np->timer, &np->lock, 0); /* ** Try to map the controller chip to ** virtual and physical memory. */ np->reg_rid = PCIR_BAR(1); np->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->reg_rid, RF_ACTIVE); if (!np->reg_res) { device_printf(dev, "could not map memory\n"); return ENXIO; } /* ** Now the INB INW INL OUTB OUTW OUTL macros ** can be used safely. */ #ifdef NCR_IOMAPPED /* ** Try to map the controller chip into iospace. */ if (!pci_map_port (config_id, 0x10, &np->port)) return; #endif /* ** Save some controller register default values */ np->rv_scntl3 = INB(nc_scntl3) & 0x77; np->rv_dmode = INB(nc_dmode) & 0xce; np->rv_dcntl = INB(nc_dcntl) & 0xa9; np->rv_ctest3 = INB(nc_ctest3) & 0x01; np->rv_ctest4 = INB(nc_ctest4) & 0x88; np->rv_ctest5 = INB(nc_ctest5) & 0x24; np->rv_gpcntl = INB(nc_gpcntl); np->rv_stest2 = INB(nc_stest2) & 0x20; if (bootverbose >= 2) { printf ("\tBIOS values: SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } np->rv_dcntl |= NOCOM; /* ** Do chip dependent initialization. */ rev = pci_get_revid(dev); /* ** Get chip features from chips table. */ i = ncr_chip_lookup(pci_get_devid(dev), rev); if (i >= 0) { np->maxburst = ncr_chip_table[i].maxburst; np->maxoffs = ncr_chip_table[i].maxoffs; np->clock_divn = ncr_chip_table[i].clock_divn; np->features = ncr_chip_table[i].features; } else { /* Should'nt happen if probe() is ok */ np->maxburst = 4; np->maxoffs = 8; np->clock_divn = 4; np->features = FE_ERL; } np->maxwide = np->features & FE_WIDE ? 1 : 0; np->clock_khz = np->features & FE_CLK80 ? 80000 : 40000; if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* ** Get the frequency of the chip's clock. ** Find the right value for scntl3. */ if (np->features & (FE_ULTRA|FE_ULTRA2)) ncr_getclock(np, np->multiplier); #ifdef NCR_TEKRAM_EEPROM if (bootverbose) { device_printf(dev, "Tekram EEPROM read %s\n", read_tekram_eeprom (np, NULL) ? "succeeded" : "failed"); } #endif /* NCR_TEKRAM_EEPROM */ /* * If scntl3 != 0, we assume BIOS is present. */ if (np->rv_scntl3) np->features |= FE_BIOS; /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (i >= 0) { --i; if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ - period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; + period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; - else np->minsync = (period + 40 - 1) / 40; + else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & FE_ULTRA2)) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * Now, some features available with Symbios compatible boards. * LED support through GPIO0 and DIFF support. */ #ifdef SCSI_NCR_SYMBIOS_COMPAT if (!(np->rv_gpcntl & 0x01)) np->features |= FE_LED0; #if 0 /* Not safe enough without NVRAM support or user settable option */ if (!(INB(nc_gpreg) & 0x08)) np->features |= FE_DIFF; #endif #endif /* SCSI_NCR_SYMBIOS_COMPAT */ /* * Prepare initial IO registers settings. * Trust BIOS only if we believe we have one and if we want to. */ #ifdef SCSI_NCR_TRUST_BIOS if (!(np->features & FE_BIOS)) { #else if (1) { #endif np->rv_dmode = 0; np->rv_dcntl = NOCOM; np->rv_ctest3 = 0; np->rv_ctest4 = MPEE; np->rv_ctest5 = 0; np->rv_stest2 = 0; if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_DIFF) np->rv_stest2 |= 0x20; /* Differential mode */ ncr_init_burst(np, np->maxburst); /* Max dwords burst length */ } else { np->maxburst = burst_code(np->rv_dmode, np->rv_ctest4, np->rv_ctest5); } /* ** Get on-chip SRAM address, if supported */ if ((np->features & FE_RAM) && sizeof(struct script) <= 4096) { np->sram_rid = PCIR_BAR(2); np->sram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->sram_rid, RF_ACTIVE); } /* ** Allocate structure for script relocation. */ if (np->sram_res != NULL) { np->script = NULL; np->p_script = rman_get_start(np->sram_res); } else if (sizeof (struct script) > PAGE_SIZE) { np->script = (struct script*) contigmalloc (round_page(sizeof (struct script)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->script = (struct script *) malloc (sizeof (struct script), M_DEVBUF, M_WAITOK); } if (sizeof (struct scripth) > PAGE_SIZE) { np->scripth = (struct scripth*) contigmalloc (round_page(sizeof (struct scripth)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->scripth = (struct scripth *) malloc (sizeof (struct scripth), M_DEVBUF, M_WAITOK); } #ifdef SCSI_NCR_PCI_CONFIG_FIXUP /* ** If cache line size is enabled, check PCI config space and ** try to fix it up if necessary. */ #ifdef PCIR_CACHELNSZ /* To be sure that new PCI stuff is present */ { u_char cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); u_short command = pci_read_config(dev, PCIR_COMMAND, 2); if (!cachelnsz) { cachelnsz = 8; device_printf(dev, "setting PCI cache line size register to %d.\n", (int)cachelnsz); pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } if (!(command & PCIM_CMD_MWRICEN)) { command |= PCIM_CMD_MWRICEN; device_printf(dev, "setting PCI command write and invalidate.\n"); pci_write_config(dev, PCIR_COMMAND, command, 2); } } #endif /* PCIR_CACHELNSZ */ #endif /* SCSI_NCR_PCI_CONFIG_FIXUP */ /* Initialize per-target user settings */ usrsync = 0; if (SCSI_NCR_DFLT_SYNC) { usrsync = SCSI_NCR_DFLT_SYNC; if (usrsync > np->maxsync) usrsync = np->maxsync; if (usrsync < np->minsync) usrsync = np->minsync; } usrwide = (SCSI_NCR_MAX_WIDE); if (usrwide > np->maxwide) usrwide=np->maxwide; for (i=0;itarget[i]; tp->tinfo.user.period = usrsync; tp->tinfo.user.offset = usrsync != 0 ? np->maxoffs : 0; tp->tinfo.user.width = usrwide; tp->tinfo.disc_tag = NCR_CUR_DISCENB | NCR_CUR_TAGENB | NCR_USR_DISCENB | NCR_USR_TAGENB; } /* ** Bells and whistles ;-) */ if (bootverbose) device_printf(dev, "minsync=%d, maxsync=%d, maxoffs=%d, %d dwords burst, %s dma fifo\n", np->minsync, np->maxsync, np->maxoffs, burst_length(np->maxburst), (np->rv_ctest5 & DFS) ? "large" : "normal"); /* ** Print some complementary information that can be helpfull. */ if (bootverbose) device_printf(dev, "%s, %s IRQ driver%s\n", np->rv_stest2 & 0x20 ? "differential" : "single-ended", np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->sram_res ? ", using on-chip SRAM" : ""); /* ** Patch scripts to physical addresses */ ncr_script_fill (&script0, &scripth0); if (np->script) np->p_script = vtophys(np->script); np->p_scripth = vtophys(np->scripth); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth, sizeof(struct scripth)); /* ** Patch the script for LED support. */ if (np->features & FE_LED0) { WRITESCRIPT(reselect[0], SCR_REG_REG(gpreg, SCR_OR, 0x01)); WRITESCRIPT(reselect1[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); WRITESCRIPT(reselect2[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* ** init data structure */ np->jump_tcb.l_cmd = SCR_JUMP; np->jump_tcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); /* ** Get SCSI addr of host adapter (set by bios?). */ np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; #ifdef NCR_DUMP_REG /* ** Log the initial register contents */ { int reg; for (reg=0; reg<256; reg+=4) { if (reg%16==0) printf ("reg[%2x]", reg); printf (" %08x", (int)pci_conf_read (config_id, reg)); if (reg%16==12) printf ("\n"); } } #endif /* NCR_DUMP_REG */ /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0 ); /* ** Now check the cache handling of the pci chipset. */ if (ncr_snooptest (np)) { printf ("CACHE INCORRECTLY CONFIGURED.\n"); return EINVAL; } /* ** Install the interrupt handler. */ rid = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (np->irq_res == NULL) { device_printf(dev, "interruptless mode: reduced performance.\n"); } else { bus_setup_intr(dev, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncr_intr, np, &np->irq_handle); } /* ** Create the device queue. We only allow MAX_START-1 concurrent ** transactions so we can be sure to have one element free in our ** start queue to reset to the idle loop. */ devq = cam_simq_alloc(MAX_START - 1); if (devq == NULL) return ENOMEM; /* ** Now tell the generic SCSI layer ** about our bus. */ np->sim = cam_sim_alloc(ncr_action, ncr_poll, "ncr", np, device_get_unit(dev), &np->lock, 1, MAX_TAGS, devq); if (np->sim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&np->lock); if (xpt_bus_register(np->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(np->sim, /*free_devq*/ TRUE); mtx_unlock(&np->lock); return ENOMEM; } if (xpt_create_path(&np->path, /*periph*/NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/TRUE); mtx_unlock(&np->lock); return ENOMEM; } /* ** start the timeout daemon */ ncr_timeout (np); np->lasttime=0; mtx_unlock(&np->lock); return 0; } /*========================================================== ** ** ** Process pending device interrupts. ** ** **========================================================== */ static void ncr_intr(vnp) void *vnp; { ncb_p np = vnp; mtx_lock(&np->lock); ncr_intr_locked(np); mtx_unlock(&np->lock); } static void ncr_intr_locked(ncb_p np) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Repeat until no outstanding ints */ do { ncr_exception (np); } while (INB(nc_istat) & (INTF|SIP|DIP)); np->ticks = 100; } if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n"); } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static void ncr_action (struct cam_sim *sim, union ccb *ccb) { ncb_p np; np = (ncb_p) cam_sim_softc(sim); mtx_assert(&np->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { nccb_p cp; lcb_p lp; tcb_p tp; struct ccb_scsiio *csio; u_int8_t *msgptr; u_int msglen; u_int msglen2; int segments; u_int8_t nego; u_int8_t idmsg; int qidx; tp = &np->target[ccb->ccb_h.target_id]; csio = &ccb->csio; /* * Make sure we support this request. We can't do * PHYS pointers. */ if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } /* * Last time we need to check if this CCB needs to * be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; /*--------------------------------------------------- ** ** Assign an nccb / bind ccb ** **---------------------------------------------------- */ cp = ncr_get_nccb (np, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (cp == NULL) { /* XXX JGibbs - Freeze SIMQ */ ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } cp->ccb = ccb; /*--------------------------------------------------- ** ** timestamp ** **---------------------------------------------------- */ /* ** XXX JGibbs - Isn't this expensive ** enough to be conditionalized?? */ bzero (&cp->phys.header.stamp, sizeof (struct tstamp)); cp->phys.header.stamp.start = ticks; nego = 0; if (tp->nego_cp == NULL) { if (tp->tinfo.current.width != tp->tinfo.goal.width) { tp->nego_cp = cp; nego = NS_WIDE; } else if ((tp->tinfo.current.period != tp->tinfo.goal.period) || (tp->tinfo.current.offset != tp->tinfo.goal.offset)) { tp->nego_cp = cp; nego = NS_SYNC; } } /*--------------------------------------------------- ** ** choose a new tag ... ** **---------------------------------------------------- */ lp = tp->lp[ccb->ccb_h.target_lun]; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && (ccb->csio.tag_action != CAM_TAG_ACTION_NONE) && (nego == 0)) { /* ** assign a tag to this nccb */ while (!cp->tag) { nccb_p cp2 = lp->next_nccb; lp->lasttag = lp->lasttag % 255 + 1; while (cp2 && cp2->tag != lp->lasttag) cp2 = cp2->next_nccb; if (cp2) continue; cp->tag=lp->lasttag; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(ccb); printf ("using tag #%d.\n", cp->tag); } } } else { cp->tag=0; } /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = MSG_IDENTIFYFLAG | ccb->ccb_h.target_lun; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) idmsg |= MSG_IDENTIFY_DISCFLAG; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag) { msgptr[msglen++] = ccb->csio.tag_action; msgptr[msglen++] = cp->tag; } switch (nego) { case NS_SYNC: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_SDTR_LEN; msgptr[msglen++] = MSG_EXT_SDTR; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("sync msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-5]); printf (".\n"); }; break; case NS_WIDE: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_WDTR_LEN; msgptr[msglen++] = MSG_EXT_WDTR; msgptr[msglen++] = tp->tinfo.goal.width; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("wide msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-4]); printf (".\n"); }; break; } /*---------------------------------------------------- ** ** Build the identify message for getcc. ** **---------------------------------------------------- */ cp->scsi_smsg2 [0] = idmsg; msglen2 = 1; /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ /* XXX JGibbs - Handle other types of I/O */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { segments = ncr_scatter(&cp->phys, (vm_offset_t)csio->data_ptr, (vm_size_t)csio->dxfer_len); if (segments < 0) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; ncr_free_nccb(np, cp); xpt_done(ccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_in); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } else { /* CAM_DIR_OUT */ cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_out); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } } else { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, no_data); cp->phys.header.goalp = cp->phys.header.savep; } cp->phys.header.lastp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in nccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ cp->phys.header.cp = cp; /* ** Startqueue */ cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, select); cp->phys.header.launch.l_cmd = SCR_JUMP; /* ** select */ cp->phys.select.sel_id = ccb->ccb_h.target_id; cp->phys.select.sel_scntl3 = tp->tinfo.wval; cp->phys.select.sel_sxfer = tp->tinfo.sval; /* ** message */ cp->phys.smsg.addr = CCB_PHYS (cp, scsi_smsg); cp->phys.smsg.size = msglen; cp->phys.smsg2.addr = CCB_PHYS (cp, scsi_smsg2); cp->phys.smsg2.size = msglen2; /* ** command */ cp->phys.cmd.addr = vtophys (scsiio_cdb_ptr(csio)); cp->phys.cmd.size = csio->cdb_len; /* ** sense command */ cp->phys.scmd.addr = CCB_PHYS (cp, sensecmd); cp->phys.scmd.size = 6; /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = ccb->ccb_h.target_lun << 5; cp->sensecmd[4] = csio->sense_len; /* ** sense data */ cp->phys.sense.addr = vtophys (&csio->sense_data); cp->phys.sense.size = csio->sense_len; /* ** status */ cp->actualquirks = QUIRK_NOMSG; cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY; cp->s_status = SCSI_STATUS_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; cp->sync_status = tp->tinfo.sval; cp->nego_status = nego; cp->wide_status = tp->tinfo.wval; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* ** reselect pattern and activate this job. */ cp->jump_nccb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (cp->tag))); cp->tlimit = time_second + ccb->ccb_h.timeout / 1000 + 2; cp->magic = CCB_MAGIC; /* ** insert into start queue. */ qidx = np->squeueput + 1; if (qidx >= MAX_START) qidx = 0; np->squeue [qidx ] = NCB_SCRIPT_PHYS (np, idle); np->squeue [np->squeueput] = CCB_PHYS (cp, phys); np->squeueput = qidx; if(DEBUG_FLAGS & DEBUG_QUEUE) device_printf(np->dev, "queuepos=%d tryoffset=%d.\n", np->squeueput, (unsigned)(READSCRIPT(startpos[0]) - (NCB_SCRIPTH_PHYS (np, tryloop)))); /* ** Script processor may be waiting for reselect. ** Wake it up. */ OUTB (nc_istat, SIGP); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; tcb_p tp; u_int update_type; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= NCR_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= NCR_TRANS_USER; tp = &np->target[ccb->ccb_h.target_id]; /* Tag and disc enables */ if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_CUR_DISCENB; } if (update_type & NCR_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_CUR_TAGENB; } if (update_type & NCR_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_USR_TAGENB; } } /* Filter bus width and sync negotiation settings */ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width > np->maxwide) spi->bus_width = np->maxwide; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { if (spi->sync_period != 0 && (spi->sync_period < np->minsync)) spi->sync_period = np->minsync; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; if (spi->sync_offset > np->maxoffs) spi->sync_offset = np->maxoffs; } } if ((update_type & NCR_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.user.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.user.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.user.width = spi->bus_width; } if ((update_type & NCR_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.goal.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.goal.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.goal.width = spi->bus_width; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ncr_transinfo *tinfo; tcb_p tp = &np->target[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tinfo = &tp->tinfo.current; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } else { tinfo = &tp->tinfo.user; if (tp->tinfo.disc_tag & NCR_USR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* XXX JGibbs - I'm sure the NCR uses a different strategy, * but it should be able to deal with Adaptec * geometry too. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { OUTB (nc_scntl1, CRST); ccb->ccb_h.status = CAM_REQ_CMP; DELAY(10000); /* Wait until our interrupt handler sees it */ xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = np->myaddr; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ static void ncr_complete (ncb_p np, nccb_p cp) { union ccb *ccb; tcb_p tp; /* ** Sanity check */ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->ccb) return; cp->magic = 1; cp->tlimit= 0; /* ** No Reselect anymore. */ cp->jump_nccb.l_cmd = (SCR_JUMP); /* ** No starting. */ cp->phys.header.launch.l_paddr= NCB_SCRIPT_PHYS (np, idle); /* ** timestamp */ ncb_profile (np, cp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("CCB=%x STAT=%x/%x\n", (int)(intptr_t)cp & 0xfff, cp->host_status,cp->s_status); ccb = cp->ccb; cp->ccb = NULL; tp = &np->target[ccb->ccb_h.target_id]; /* ** We do not queue more than 1 nccb per target ** with negotiation at any time. If this nccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** Check for parity errors. */ /* XXX JGibbs - What about reporting them??? */ if (cp->parity_status) { PRINT_ADDR(ccb); printf ("%d parity error(s), fallback.\n", cp->parity_status); /* ** fallback to asynch transfer. */ tp->tinfo.goal.period = 0; tp->tinfo.goal.offset = 0; } /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { PRINT_ADDR(ccb); switch (cp->xerr_status) { case XE_EXTRA_DATA: printf ("extraneous data discarded.\n"); break; case XE_BAD_PHASE: printf ("illegal scsi phase (4/5).\n"); break; default: printf ("extended error %d.\n", cp->xerr_status); break; } if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; } /* ** Check the status. */ if (cp->host_status == HS_COMPLETE) { if (cp->s_status == SCSI_STATUS_OK) { /* ** All went well. */ /* XXX JGibbs - Properly calculate residual */ tp->bytes += ccb->csio.dxfer_len; tp->transfers ++; ccb->ccb_h.status = CAM_REQ_CMP; } else if ((cp->s_status & SCSI_STATUS_SENSE) != 0) { /* * XXX Could be TERMIO too. Should record * original status. */ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; cp->s_status &= ~SCSI_STATUS_SENSE; if (cp->s_status == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; } else { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = cp->s_status; } } else if (cp->host_status == HS_SEL_TIMEOUT) { /* ** Device failed selection */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (cp->host_status == HS_TIMEOUT) { /* ** No response */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; } else if (cp->host_status == HS_STALL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; } else { /* ** Other protocol messes */ PRINT_ADDR(ccb); printf ("COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->s_status, cp); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* ** Free this nccb */ ncr_free_nccb (np, cp); /* ** signal completion to generic driver. */ xpt_done (ccb); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ static void ncr_wakeup (ncb_p np, u_long code) { /* ** Starting at the default nccb and following ** the links, complete all jobs with a ** host_status greater than "disconnect". ** ** If the "code" parameter is not zero, ** complete all jobs that are not IDLE. */ nccb_p cp = np->link_nccb; while (cp) { switch (cp->host_status) { case HS_IDLE: break; case HS_DISCONNECT: if(DEBUG_FLAGS & DEBUG_TINY) printf ("D"); /* FALLTHROUGH */ case HS_BUSY: case HS_NEGOTIATE: if (!code) break; cp->host_status = code; /* FALLTHROUGH */ default: ncr_complete (np, cp); break; } cp = cp -> link_nccb; } } static void ncr_freeze_devq (ncb_p np, struct cam_path *path) { nccb_p cp; int i; int count; int firstskip; /* ** Starting at the first nccb and following ** the links, complete all jobs that match ** the passed in path and are in the start queue. */ cp = np->link_nccb; count = 0; firstskip = 0; while (cp) { switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: if ((cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) && (xpt_path_comp(path, cp->ccb->ccb_h.path) >= 0)) { /* Mark for removal from the start queue */ for (i = 1; i < MAX_START; i++) { int idx; idx = np->squeueput - i; if (idx < 0) idx = MAX_START + idx; if (np->squeue[idx] == CCB_PHYS(cp, phys)) { np->squeue[idx] = NCB_SCRIPT_PHYS (np, skip); if (i > firstskip) firstskip = i; break; } } cp->host_status=HS_STALL; ncr_complete (np, cp); count++; } break; default: break; } cp = cp->link_nccb; } if (count > 0) { int j; int bidx; /* Compress the start queue */ j = 0; bidx = np->squeueput; i = np->squeueput - firstskip; if (i < 0) i = MAX_START + i; for (;;) { bidx = i - j; if (bidx < 0) bidx = MAX_START + bidx; if (np->squeue[i] == NCB_SCRIPT_PHYS (np, skip)) { j++; } else if (j != 0) { np->squeue[bidx] = np->squeue[i]; if (np->squeue[bidx] == NCB_SCRIPT_PHYS(np, idle)) break; } i = (i + 1) % MAX_START; } np->squeueput = bidx; } } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ static void ncr_init(ncb_p np, char * msg, u_long code) { int i; /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0); /* ** Message. */ if (msg) device_printf(np->dev, "restart (%s).\n", msg); /* ** Clear Start Queue */ for (i=0;i squeue [i] = NCB_SCRIPT_PHYS (np, idle); /* ** Start at first entry. */ np->squeueput = 0; WRITESCRIPT(startpos[0], NCB_SCRIPTH_PHYS (np, tryloop)); WRITESCRIPT(start0 [0], SCR_INT ^ IFFALSE (0)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort ... */ OUTB (nc_scntl0, 0xca ); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00 ); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr);/* host adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr);/* id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* XXX modify burstlen ??? */ OUTB (nc_dcntl , np->rv_dcntl); OUTB (nc_ctest3, np->rv_ctest3); OUTB (nc_ctest5, np->rv_ctest5); OUTB (nc_ctest4, np->rv_ctest4);/* enable master parity checking */ OUTB (nc_stest2, np->rv_stest2|EXT); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE ); /* TolerANT enable */ OUTB (nc_stime0, 0x0b ); /* HTH = disabled, STO = 0.1 sec. */ if (bootverbose >= 2) { printf ("\tACTUAL values:SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** Fill in target structure. */ for (i=0;itarget[i]; tp->tinfo.sval = 0; tp->tinfo.wval = np->rv_scntl3; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Start script processor. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); /* * Notify the XPT of the event */ if (code == HS_RESET) xpt_async(AC_BUS_RESET, np->path, NULL); } static void ncr_poll(struct cam_sim *sim) { ncr_intr_locked(cam_sim_softc(sim)); } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div >= 0) if (kpc >= (div_10M[div] * 4)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; #if 0 /* You can #if 1 if you think this optimization is useful */ per = (fak * div_10M[div]) / clk; /* ** Why not to try the immediate lower divisor and to choose ** the one that allows the fastest output speed ? ** We dont want input speed too much greater than output speed. */ if (div >= 1 && fak < 6) { u_long fak2, per2; fak2 = (kpc - 1) / div_10M[div-1] + 1; per2 = (fak2 * div_10M[div-1]) / clk; if (per2 < per && fak2 <= 6) { fak = fak2; per = per2; --div; } } #endif if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Switch sync mode for current job and its target ** **========================================================== */ static void ncr_setsync(ncb_p np, nccb_p cp, u_char scntl3, u_char sxfer, u_char period) { union ccb *ccb; struct ccb_trans_settings neg; tcb_p tp; int div; u_int target = INB (nc_sdid) & 0x0f; u_int period_10ns; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->tinfo.wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ div = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && div) period_10ns = (((sxfer>>5)+4)*div_10M[div-1])/np->clock_khz; else period_10ns = 0; tp->tinfo.goal.period = period; tp->tinfo.goal.offset = sxfer & 0x1f; tp->tinfo.current.period = period; tp->tinfo.current.offset = sxfer & 0x1f; /* ** Stop there if sync parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; if (sxfer & 0x1f) { /* ** Disable extended Sreq/Sack filtering */ if (period_10ns <= 2000) OUTOFFB (nc_stest2, EXT); } /* ** Tell the SCSI layer about the ** new transfer parameters. */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.sync_period = period; neg.xport_specific.spi.sync_offset = sxfer & 0x1f; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** Switch wide mode for current job and its target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (ncb_p np, nccb_p cp, u_char wide, u_char ack) { union ccb *ccb; struct ccb_trans_settings neg; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp; u_char scntl3; u_char sxfer; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; tp->tinfo.current.width = wide; tp->tinfo.goal.width = wide; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; scntl3 = (tp->tinfo.wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->tinfo.sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; /* Tell the SCSI layer about the new transfer params */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.bus_width = (scntl3 & EWS) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; neg.xport_specific.spi.sync_period = 0; neg.xport_specific.spi.sync_offset = 0; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (void *arg) { ncb_p np = arg; time_t thistime = time_second; ticks_t step = np->ticks; u_long count = 0; long signed t; nccb_p cp; mtx_assert(&np->lock, MA_OWNED); if (np->lasttime != thistime) { np->lasttime = thistime; /*---------------------------------------------------- ** ** handle ncr chip timeouts ** ** Assumption: ** We have a chance to arbitrate for the ** SCSI bus at least every 10 seconds. ** **---------------------------------------------------- */ t = thistime - np->heartbeat; if (t<2) np->latetime=0; else np->latetime++; if (np->latetime>2) { /* ** If there are no requests, the script ** processor will sleep on SEL_WAIT_RESEL. ** But we have to check whether it died. ** Let's try to wake it up. */ OUTB (nc_istat, SIGP); } /*---------------------------------------------------- ** ** handle nccb timeouts ** **---------------------------------------------------- */ for (cp=np->link_nccb; cp; cp=cp->link_nccb) { /* ** look for timed out nccbs. */ if (!cp->host_status) continue; count++; if (cp->tlimit > thistime) continue; /* ** Disable reselect. ** Remove it from startqueue. */ cp->jump_nccb.l_cmd = (SCR_JUMP); if (cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) { device_printf(np->dev, "timeout nccb=%p (skip)\n", cp); cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, skip); } switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: /* FALLTHROUGH */ case HS_DISCONNECT: cp->host_status=HS_TIMEOUT; } cp->tag = 0; /* ** wakeup this nccb. */ ncr_complete (np, cp); } } callout_reset(&np->timer, step ? step : 1, ncr_timeout, np); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printf ("}"); } } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat) { u_int32_t dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (np->p_script < dsp && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } device_printf(np->dev, "%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { device_printf(np->dev, "script cmd = %08x\n", (int)READSCRIPT_OFF(script_base, script_ofs)); } device_printf(np->dev, "regdump:"); for (i=0; i<16;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); } /*========================================================== ** ** ** ncr chip exception handler. ** ** **========================================================== */ static void ncr_exception (ncb_p np) { u_char istat, dstat; u_short sist; /* ** interrupt on the fly ? */ while ((istat = INB (nc_istat)) & INTF) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); OUTB (nc_istat, INTF); np->profile.num_fly++; ncr_wakeup (np, 0); } if (!(istat & (SIP|DIP))) { return; } /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; np->profile.num_int++; if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); if ((dstat==DFE) && (sist==PAR)) return; /*========================================================== ** ** First the normal cases. ** **========================================================== */ /*------------------------------------------- ** SCSI reset **------------------------------------------- */ if (sist & RST) { ncr_init (np, bootverbose ? "scsi reset" : NULL, HS_RESET); return; } /*------------------------------------------- ** selection timeout ** ** IID excluded from dstat mask! ** (chip bug) **------------------------------------------- */ if ((sist & STO) && !(sist & (GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR))) { ncr_int_sto (np); return; } /*------------------------------------------- ** Phase mismatch. **------------------------------------------- */ if ((sist & MA) && !(sist & (STO|GEN|HTH|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { ncr_int_ma (np, dstat); return; } /*---------------------------------------- ** move command with length 0 **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_MOVE_TBL)) { /* ** Target wants more data than available. ** The "no_data" script will do it. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, no_data)); return; } /*------------------------------------------- ** Programmed interrupt **------------------------------------------- */ if ((dstat & SIR) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|IID)) && (INB(nc_dsps) <= SIR_MAX)) { ncr_int_sir (np); return; } /*======================================== ** log message for real hard errors **======================================== */ ncr_log_hard_error(np, sist, dstat); /*======================================== ** do the register dump **======================================== */ if (time_second - np->regtime > 10) { int i; np->regtime = time_second; for (i=0; iregdump); i++) ((volatile char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; } /*---------------------------------------- ** clean up the dma fifo **---------------------------------------- */ if ( (INB(nc_sstat0) & (ILF|ORF|OLF) ) || (INB(nc_sstat1) & (FF3210) ) || (INB(nc_sstat2) & (ILF1|ORF1|OLF1)) || /* wide .. */ !(dstat & DFE)) { device_printf(np->dev, "have to clear fifos.\n"); OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ } /*---------------------------------------- ** handshake timeout **---------------------------------------- */ if (sist & HTH) { device_printf(np->dev, "handshake timeout\n"); OUTB (nc_scntl1, CRST); DELAY (1000); OUTB (nc_scntl1, 0x00); OUTB (nc_scr0, HS_FAIL); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** unexpected disconnect **---------------------------------------- */ if ((sist & UDC) && !(sist & (STO|GEN|HTH|MA|SGE|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_scr0, HS_UNEXPECTED); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** cannot disconnect **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_WAIT_DISC)) { /* ** Unexpected data cycle while waiting for disconnect. */ if (INB(nc_sstat2) & LDSC) { /* ** It's an early reconnect. ** Let's continue ... */ OUTB (nc_dcntl, np->rv_dcntl | STD); /* ** info message */ device_printf(np->dev, "INFO: LDSC while IID.\n"); return; } device_printf(np->dev, "target %d doesn't release the bus.\n", INB (nc_sdid)&0x0f); /* ** return without restarting the NCR. ** timeout will do the real work. */ return; } /*---------------------------------------- ** single step **---------------------------------------- */ if ((dstat & SSI) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** @RECOVER@ HTH, SGE, ABRT. ** ** We should try to recover from these interrupts. ** They may occur if there are problems with synch transfers, or ** if targets are switched on or off while the driver is running. */ if (sist & SGE) { /* clear scsi offsets */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); } /* ** Freeze controller to be able to read the messages. */ if (DEBUG_FLAGS & DEBUG_FREEZE) { int i; unsigned char val; for (i=0; i<0x60; i++) { switch (i%16) { case 0: device_printf(np->dev, "reg[%d0]: ", i / 16); break; case 4: case 8: case 12: printf (" "); break; } val = bus_read_1(np->reg_res, i); printf (" %x%x", val/16, val%16); if (i%16==15) printf (".\n"); } callout_stop(&np->timer); device_printf(np->dev, "halted!\n"); /* ** don't restart controller ... */ OUTB (nc_istat, SRST); return; } #ifdef NCR_FREEZE /* ** Freeze system to be able to read the messages. */ printf ("ncr: fatal error: system halted - press reset to reboot ..."); for (;;); #endif /* ** sorry, have to kill ALL jobs ... */ ncr_init (np, "fatal error", HS_FAIL); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ static void ncr_int_sto (ncb_p np) { u_long dsa, scratcha, diff; nccb_p cp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); /* ** look for nccb and set the status. */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); } /* ** repair start queue */ scratcha = INL (nc_scratcha); diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop); /* assert ((diff <= MAX_START * 20) && !(diff % 20));*/ if ((diff <= MAX_START * 20) && !(diff % 20)) { WRITESCRIPT(startpos[0], scratcha); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); return; } ncr_init (np, "selection timeout", HS_FAIL); } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (ncb_p np, u_char dstat) { u_int32_t dbc; u_int32_t rest; u_int32_t dsa; u_int32_t dsp; u_int32_t nxtdsp; volatile void *vdsp_base; size_t vdsp_off; u_int32_t oadr, olen; u_int32_t *tblp, *newcmd; u_char cmd, sbcl, ss0, ss2, ctest5; u_short delta; nccb_p cp; dsp = INL (nc_dsp); dsa = INL (nc_dsa); dbc = INL (nc_dbc); ss0 = INB (nc_sstat0); ss2 = INB (nc_sstat2); sbcl= INB (nc_sbcl); cmd = dbc >> 24; rest= dbc & 0xffffff; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5<<8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transfered to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ if (!(dstat & DFE)) rest += delta; if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp */ cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (!cp) { device_printf(np->dev, "SCSI phase error fixup: CCB already dequeued (%p)\n", (void *)np->header.cp); return; } if (cp != np->header.cp) { device_printf(np->dev, "SCSI phase error fixup: CCB address mismatch " "(%p != %p) np->nccb = %p\n", (void *)cp, (void *)np->header.cp, (void *)np->link_nccb); /* return;*/ } /* ** find the interrupted script command, ** and the address at which to continue. */ if (dsp == vtophys (&cp->patch[2])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[0]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp == vtophys (&cp->patch[6])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[4]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp_base = np->script; vdsp_off = dsp - np->p_script - 8; nxtdsp = dsp; } else { vdsp_base = np->scripth; vdsp_off = dsp - np->p_scripth - 8; nxtdsp = dsp; } /* ** log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) { printf ("P%x%x ",cmd&7, sbcl&7); printf ("RL=%d D=%d SS0=%x ", (unsigned) rest, (unsigned) delta, ss0); } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, dsp, nxtdsp, (volatile char*)vdsp_base+vdsp_off, cmd); } /* ** get old startaddress and old length. */ oadr = READSCRIPT_OFF(vdsp_base, vdsp_off + 1*4); if (cmd & 0x10) { /* Table indirect */ tblp = (u_int32_t *) ((char*) &cp->phys + oadr); olen = tblp[0]; oadr = tblp[1]; } else { tblp = (u_int32_t *) 0; olen = READSCRIPT_OFF(vdsp_base, vdsp_off) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%lx OADR=%lx\n", (unsigned) (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24), (void *) tblp, (u_long) olen, (u_long) oadr); } /* ** if old phase not dataphase, leave here. */ if (cmd != (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24)) { PRINT_ADDR(cp->ccb); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24); return; } if (cmd & 0x06) { PRINT_ADDR(cp->ccb); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; if (cp->phys.header.savep == vtophys (newcmd)) newcmd+=4; /* ** fillin the commands */ newcmd[0] = ((cmd & 0x0f) << 24) | rest; newcmd[1] = oadr + olen - rest; newcmd[2] = SCR_JUMP; newcmd[3] = nxtdsp; if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->ccb); printf ("newcmd[%d] %x %x %x %x.\n", (int)(newcmd - cp->patch), (unsigned)newcmd[0], (unsigned)newcmd[1], (unsigned)newcmd[2], (unsigned)newcmd[3]); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ np->profile.num_break++; OUTL (nc_temp, vtophys (newcmd)); if ((cmd & 7) == 0) OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); else OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn)); } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ static int ncr_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==MSG_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void ncr_int_sir (ncb_p np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); nccb_p cp=0; u_long dsa; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int i; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { case SIR_SENSE_RESTART: case SIR_STALL_RESTART: break; default: /* ** lookup the nccb */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; assert (cp); if (!cp) goto out; assert (cp == np->header.cp); if (cp != np->header.cp) goto out; } switch (num) { /*-------------------------------------------------------------------- ** ** Processing of interrupted getcc selects ** **-------------------------------------------------------------------- */ case SIR_SENSE_RESTART: /*------------------------------------------ ** Script processor is idle. ** Look for interrupted "check cond" **------------------------------------------ */ if (DEBUG_FLAGS & DEBUG_RESTART) device_printf(np->dev, "int#%d", num); cp = (nccb_p) 0; for (i=0; itarget[i]; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); cp = tp->hold_cp; if (!cp) continue; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); if ((cp->host_status==HS_BUSY) && (cp->s_status==SCSI_STATUS_CHECK_COND)) break; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)"); tp->hold_cp = cp = (nccb_p) 0; } if (cp) { if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+ restart job ..\n"); OUTL (nc_dsa, CCB_PHYS (cp, phys)); OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc)); return; } /* ** no job, resume normal processing */ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n"); WRITESCRIPT(start0[0], SCR_INT ^ IFFALSE (0)); break; case SIR_SENSE_FAILED: /*------------------------------------------- ** While trying to select for ** getting the condition code, ** a target reselected us. **------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_RESTART) { PRINT_ADDR(cp->ccb); printf ("in getcc reselect by t%d.\n", INB(nc_ssid) & 0x0f); } /* ** Mark this job */ cp->host_status = HS_BUSY; cp->s_status = SCSI_STATUS_CHECK_COND; np->target[cp->ccb->ccb_h.target_id].hold_cp = cp; /* ** And patch code to restart it. */ WRITESCRIPT(start0[0], SCR_INT); break; /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ** We try to negotiate sync and wide transfer only after ** a successfull inquire command. We look at byte 7 of the ** inquire data to determine the capabilities if the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immidiately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all nccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try try to negotiate: ** -> target doesnt't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); /* FALLTHROUGH */ case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("negotiation failed sir=%x status=%x.\n", num, cp->nego_status); } /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } np->msgin [0] = MSG_NOOP; np->msgout[0] = MSG_NOOP; cp->nego_status = 0; OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); break; case SIR_NEGO_SYNC: /* ** Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setsync (np, cp, 0, 0xe0, 0); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setsync (np,cp,scntl3,(fak<<5)|ofs, per); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } } /* ** It was a request. Set value and ** prepare an answer message */ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs, per); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 3; np->msgout[2] = MSG_EXT_SDTR; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } if (!ofs) { OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = MSG_NOOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** check values against driver limits. */ if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setwide (np, cp, 0, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setwide (np, cp, wide, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; } } /* ** It was a request, set value and ** prepare an answer message */ ncr_setwide (np, cp, wide, 1); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 2; np->msgout[2] = MSG_EXT_WDTR; np->msgout[3] = wide; np->msgin [0] = MSG_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MSG_MESSAGE_REJECT message. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT received (%x:%x).\n", (unsigned)np->lastmsg, np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT sent for "); (void) ncr_show_msg (np->msgin); printf (".\n"); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_IGN_WIDE_RESIDUE received, but not yet implemented.\n"); break; case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_DISCONNECT received, but datapointer not saved:\n" "\tdata=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) np->header.savep, (unsigned) np->header.goalp); break; /*-------------------------------------------------------------------- ** ** Processing of a "SCSI_STATUS_QUEUE_FULL" status. ** ** XXX JGibbs - We should do the same thing for BUSY status. ** ** The current command has been rejected, ** because there are too many in the command queue. ** We have started too many commands for that target. ** **-------------------------------------------------------------------- */ case SIR_STALL_QUEUE: cp->xerr_status = XE_OK; cp->host_status = HS_COMPLETE; cp->s_status = SCSI_STATUS_QUEUE_FULL; ncr_freeze_devq(np, cp->ccb->ccb_h.path); ncr_complete(np, cp); /* FALLTHROUGH */ case SIR_STALL_RESTART: /*----------------------------------------------- ** ** Enable selecting again, ** if NO disconnected jobs. ** **----------------------------------------------- */ /* ** Look for a disconnected job. */ cp = np->link_nccb; while (cp && cp->host_status != HS_DISCONNECT) cp = cp->link_nccb; /* ** if there is one, ... */ if (cp) { /* ** wait for reselection */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect)); return; } /* ** else remove the interrupt. */ device_printf(np->dev, "queue empty.\n"); WRITESCRIPT(start1[0], SCR_INT ^ IFFALSE (0)); break; } out: OUTB (nc_dcntl, np->rv_dcntl | STD); } /*========================================================== ** ** ** Aquire a control block ** ** **========================================================== */ static nccb_p ncr_get_nccb (ncb_p np, u_long target, u_long lun) { lcb_p lp; nccb_p cp = NULL; /* ** Lun structure available ? */ lp = np->target[target].lp[lun]; if (lp) { cp = lp->next_nccb; /* ** Look for free CCB */ while (cp && cp->magic) { cp = cp->next_nccb; } } /* ** if nothing available, create one. */ if (cp == NULL) cp = ncr_alloc_nccb(np, target, lun); if (cp != NULL) { if (cp->magic) { device_printf(np->dev, "Bogus free cp found\n"); return (NULL); } cp->magic = 1; } return (cp); } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_nccb (ncb_p np, nccb_p cp) { /* ** sanity */ assert (cp != NULL); cp -> host_status = HS_IDLE; cp -> magic = 0; } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ static nccb_p ncr_alloc_nccb (ncb_p np, u_long target, u_long lun) { tcb_p tp; lcb_p lp; nccb_p cp; assert (np != NULL); if (target>=MAX_TARGET) return(NULL); if (lun >=MAX_LUN ) return(NULL); tp=&np->target[target]; if (!tp->jump_tcb.l_cmd) { /* ** initialize it. */ tp->jump_tcb.l_cmd = (SCR_JUMP^IFFALSE (DATA (0x80 + target))); tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr; tp->getscr[0] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[1] = vtophys (&tp->tinfo.sval); tp->getscr[2] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_sxfer); tp->getscr[3] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[4] = vtophys (&tp->tinfo.wval); tp->getscr[5] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_scntl3); assert (((offsetof(struct ncr_reg, nc_sxfer) ^ (offsetof(struct tcb ,tinfo) + offsetof(struct ncr_target_tinfo, sval))) & 3) == 0); assert (((offsetof(struct ncr_reg, nc_scntl3) ^ (offsetof(struct tcb, tinfo) + offsetof(struct ncr_target_tinfo, wval))) &3) == 0); tp->call_lun.l_cmd = (SCR_CALL); tp->call_lun.l_paddr = NCB_SCRIPT_PHYS (np, resel_lun); tp->jump_lcb.l_cmd = (SCR_JUMP); tp->jump_lcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); np->jump_tcb.l_paddr = vtophys (&tp->jump_tcb); } /* ** Logic unit control block */ lp = tp->lp[lun]; if (!lp) { /* ** Allocate a lcb */ lp = (lcb_p) malloc (sizeof (struct lcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (!lp) return(NULL); /* ** Initialize it */ lp->jump_lcb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (lun))); lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr; lp->call_tag.l_cmd = (SCR_CALL); lp->call_tag.l_paddr = NCB_SCRIPT_PHYS (np, resel_tag); lp->jump_nccb.l_cmd = (SCR_JUMP); lp->jump_nccb.l_paddr = NCB_SCRIPTH_PHYS (np, aborttag); lp->actlink = 1; /* ** Chain into LUN list */ tp->jump_lcb.l_paddr = vtophys (&lp->jump_lcb); tp->lp[lun] = lp; } /* ** Allocate a nccb */ cp = (nccb_p) malloc (sizeof (struct nccb), M_DEVBUF, M_NOWAIT|M_ZERO); if (!cp) return (NULL); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new nccb @%p.\n", cp); } /* ** Fill in physical addresses */ cp->p_nccb = vtophys (cp); /* ** Chain into reselect list */ cp->jump_nccb.l_cmd = SCR_JUMP; cp->jump_nccb.l_paddr = lp->jump_nccb.l_paddr; lp->jump_nccb.l_paddr = CCB_PHYS (cp, jump_nccb); cp->call_tmp.l_cmd = SCR_CALL; cp->call_tmp.l_paddr = NCB_SCRIPT_PHYS (np, resel_tmp); /* ** Chain into wakeup list */ cp->link_nccb = np->link_nccb; np->link_nccb = cp; /* ** Chain into CCB list */ cp->next_nccb = lp->next_nccb; lp->next_nccb = cp; return (cp); } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ static int ncr_scatter (struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen) { u_long paddr, pnext; u_short segment = 0; u_long segsize, segaddr; u_long size, csize = 0; u_long chunk = MAX_SIZE; int free; bzero (&phys->data, sizeof (phys->data)); if (!datalen) return (0); paddr = vtophys (vaddr); /* ** insert extra break points at a distance of chunk. ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. */ free = MAX_SCATTER - 1; if (vaddr & PAGE_MASK) free -= datalen / PAGE_SIZE; if (free>1) while ((chunk * free >= 2 * datalen) && (chunk>=1024)) chunk /= 2; if(DEBUG_FLAGS & DEBUG_SCATTER) printf("ncr?:\tscattering virtual=%p size=%d chunk=%d.\n", (void *) vaddr, (unsigned) datalen, (unsigned) chunk); /* ** Build data descriptors. */ while (datalen && (segment < MAX_SCATTER)) { /* ** this segment is empty */ segsize = 0; segaddr = paddr; pnext = paddr; if (!csize) csize = chunk; while ((datalen) && (paddr == pnext) && (csize)) { /* ** continue this segment */ pnext = (paddr & (~PAGE_MASK)) + PAGE_SIZE; /* ** Compute max size */ size = pnext - paddr; /* page size */ if (size > datalen) size = datalen; /* data size */ if (size > csize ) size = csize ; /* chunksize */ segsize += size; vaddr += size; csize -= size; datalen -= size; paddr = vtophys (vaddr); } if(DEBUG_FLAGS & DEBUG_SCATTER) printf ("\tseg #%d addr=%x size=%d (rest=%d).\n", segment, (unsigned) segaddr, (unsigned) segsize, (unsigned) datalen); phys->data[segment].addr = segaddr; phys->data[segment].size = segsize; segment++; } if (datalen) { printf("ncr?: scatter/gather failed (residue=%d).\n", (unsigned) datalen); return (-1); } return (segment); } /*========================================================== ** ** ** Test the pci bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ #ifndef NCR_IOMAPPED static int ncr_regtest (struct ncb* np) { register volatile u_int32_t data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int ncr_snooptest (struct ncb* np) { u_int32_t ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; #ifndef NCR_IOMAPPED err |= ncr_regtest (np); if (err) return (err); #endif /* ** init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ ncr_cache = host_wr; OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL (nc_dsp, pc); /* ** Wait 'til done (with timeout) */ for (i=0; i=NCR_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); } /* ** Show results. */ if (host_wr != ncr_rd) { printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; } if (host_rd != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; } if (ncr_bk != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; } return (err); } /*========================================================== ** ** ** Profiling the drivers and targets performance. ** ** **========================================================== */ /* ** Compute the difference in milliseconds. **/ static int ncr_delta (int *from, int *to) { if (!from) return (-1); if (!to) return (-2); return ((to - from) * 1000 / hz); } #define PROFILE cp->phys.header.stamp static void ncb_profile (ncb_p np, nccb_p cp) { int co, da, st, en, di, se, post,work,disc; u_long diff; PROFILE.end = ticks; st = ncr_delta (&PROFILE.start,&PROFILE.status); if (st<0) return; /* status not reached */ da = ncr_delta (&PROFILE.start,&PROFILE.data); if (da<0) return; /* No data transfer phase */ co = ncr_delta (&PROFILE.start,&PROFILE.command); if (co<0) return; /* command not executed */ en = ncr_delta (&PROFILE.start,&PROFILE.end), di = ncr_delta (&PROFILE.start,&PROFILE.disconnect), se = ncr_delta (&PROFILE.start,&PROFILE.select); post = en - st; /* ** @PROFILE@ Disconnect time invalid if multiple disconnects */ if (di>=0) disc = se-di; else disc = 0; work = (st - co) - disc; diff = (np->disc_phys - np->disc_ref) & 0xff; np->disc_ref += diff; np->profile.num_trans += 1; if (cp->ccb) np->profile.num_bytes += cp->ccb->csio.dxfer_len; np->profile.num_disc += diff; np->profile.ms_setup += co; np->profile.ms_data += work; np->profile.ms_disc += disc; np->profile.ms_post += post; } #undef PROFILE /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAVE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(ncb_p np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) device_printf(np->dev, "enabling clock multiplier\n"); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) DELAY(20); if (!i) device_printf(np->dev, "the chip cannot lock the frequency\n"); } else /* Wait 20 micro-seconds for doubler */ DELAY(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned ncrgetfreq (ncb_p np, int gen) { int ms = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of (1<= 2) printf ("\tDelay (GEN=%d): %u msec\n", gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4440) / ms : 0; } static void ncr_getclock (ncb_p np, u_char multiplier) { unsigned char scntl3; unsigned char stest1; scntl3 = INB(nc_scntl3); stest1 = INB(nc_stest1); np->multiplier = 1; if (multiplier > 1) { np->multiplier = multiplier; np->clock_khz = 40000 * multiplier; } else { if ((scntl3 & 7) == 0) { unsigned f1, f2; /* throw away first result */ (void) ncrgetfreq (np, 11); f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if (bootverbose >= 2) printf ("\tNCR clock is %uKHz, %uKHz\n", f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 > 45000) { scntl3 = 5; /* >45Mhz: assume 80MHz */ } else { scntl3 = 3; /* <45Mhz: assume 40MHz */ } } else if ((scntl3 & 7) == 5) np->clock_khz = 80000; /* Probably a 875 rev. 1 ? */ } } /*=========================================================================*/ #ifdef NCR_TEKRAM_EEPROM struct tekram_eeprom_dev { u_char devmode; #define TKR_PARCHK 0x01 #define TKR_TRYSYNC 0x02 #define TKR_ENDISC 0x04 #define TKR_STARTUNIT 0x08 #define TKR_USETAGS 0x10 #define TKR_TRYWIDE 0x20 u_char syncparam; /* max. sync transfer rate (table ?) */ u_char filler1; u_char filler2; }; struct tekram_eeprom { struct tekram_eeprom_dev dev[16]; u_char adaptid; u_char adaptmode; #define TKR_ADPT_GT2DRV 0x01 #define TKR_ADPT_GT1GB 0x02 #define TKR_ADPT_RSTBUS 0x04 #define TKR_ADPT_ACTNEG 0x08 #define TKR_ADPT_NOSEEK 0x10 #define TKR_ADPT_MORLUN 0x20 u_char delay; /* unit ? ( table ??? ) */ u_char tags; /* use 4 times as many ... */ u_char filler[60]; }; static void tekram_write_bit (ncb_p np, int bit) { u_char val = 0x10 + ((bit & 1) << 1); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); OUTB (nc_gpreg, val | 0x04); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); } static int tekram_read_bit (ncb_p np) { OUTB (nc_gpreg, 0x10); DELAY(10); OUTB (nc_gpreg, 0x14); DELAY(10); return INB (nc_gpreg) & 1; } static u_short read_tekram_eeprom_reg (ncb_p np, int reg) { int bit; u_short result = 0; int cmd = 0x80 | reg; OUTB (nc_gpreg, 0x10); tekram_write_bit (np, 1); for (bit = 7; bit >= 0; bit--) { tekram_write_bit (np, cmd >> bit); } for (bit = 0; bit < 16; bit++) { result <<= 1; result |= tekram_read_bit (np); } OUTB (nc_gpreg, 0x00); return result; } static int read_tekram_eeprom(ncb_p np, struct tekram_eeprom *buffer) { u_short *p = (u_short *) buffer; u_short sum = 0; int i; if (INB (nc_gpcntl) != 0x09) { return 0; } for (i = 0; i < 64; i++) { u_short val; if((i&0x0f) == 0) printf ("%02x:", i*2); val = read_tekram_eeprom_reg (np, i); if (p) *p++ = val; sum += val; if((i&0x01) == 0x00) printf (" "); printf ("%02x%02x", val & 0xff, (val >> 8) & 0xff); if((i&0x0f) == 0x0f) printf ("\n"); } printf ("Sum = %04x\n", sum); return sum == 0x1234; } #endif /* NCR_TEKRAM_EEPROM */ static device_method_t ncr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncr_probe), DEVMETHOD(device_attach, ncr_attach), { 0, 0 } }; static driver_t ncr_driver = { "ncr", ncr_methods, sizeof(struct ncb), }; static devclass_t ncr_devclass; DRIVER_MODULE(ncr, pci, ncr_driver, ncr_devclass, 0, 0); MODULE_DEPEND(ncr, cam, 1, 1, 1); MODULE_DEPEND(ncr, pci, 1, 1, 1); /*=========================================================================*/ #endif /* _KERNEL */ diff --git a/sys/dev/patm/if_patm_tx.c b/sys/dev/patm/if_patm_tx.c index d38d07895d0d..1b9a8a50a1db 100644 --- a/sys/dev/patm/if_patm_tx.c +++ b/sys/dev/patm/if_patm_tx.c @@ -1,1277 +1,1277 @@ /*- * Copyright (c) 2003 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The TST allocation algorithm is from the IDT driver which is: * * Copyright (c) 2000, 2001 Richard Hodges and Matriplex, inc. * All rights reserved. * * Copyright (c) 1996, 1997, 1998, 1999 Mark Tinguely * All rights reserved. * * Author: Hartmut Brandt * * Driver for IDT77252 based cards like ProSum's. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_natm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_BPF #include #endif #include #include #include #include #include #include #include #include #include #include static struct mbuf *patm_tx_pad(struct patm_softc *sc, struct mbuf *m0); static void patm_launch(struct patm_softc *sc, struct patm_scd *scd); static struct patm_txmap *patm_txmap_get(struct patm_softc *); static void patm_load_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc); static void patm_tst_timer(void *p); static void patm_tst_update(struct patm_softc *); static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *); static const char *dump_scd(struct patm_softc *sc, struct patm_scd *scd) __unused; static void patm_tct_print(struct patm_softc *sc, u_int cid) __unused; /* * Structure for communication with the loader function for transmission */ struct txarg { struct patm_softc *sc; struct patm_scd *scd; /* scheduling channel */ struct patm_vcc *vcc; /* the VCC of this PDU */ struct mbuf *mbuf; u_int hdr; /* cell header */ }; static __inline u_int cbr2slots(struct patm_softc *sc, struct patm_vcc *vcc) { /* compute the number of slots we need, make sure to get at least * the specified PCR */ - return ((u_int)(((uint64_t)(sc->mmap->tst_size - 1) * - vcc->vcc.tparam.pcr + IFP2IFATM(sc->ifp)->mib.pcr - 1) / IFP2IFATM(sc->ifp)->mib.pcr)); + return ((u_int)howmany((uint64_t)(sc->mmap->tst_size - 1) * + vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr)); } static __inline u_int slots2cr(struct patm_softc *sc, u_int slots) { return ((slots * IFP2IFATM(sc->ifp)->mib.pcr + sc->mmap->tst_size - 2) / (sc->mmap->tst_size - 1)); } /* check if we can open this one */ int patm_tx_vcc_can_open(struct patm_softc *sc, struct patm_vcc *vcc) { /* check resources */ switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: { u_int slots = cbr2slots(sc, vcc); if (slots > sc->tst_free + sc->tst_reserve) return (EINVAL); break; } case ATMIO_TRAFFIC_VBR: if (vcc->vcc.tparam.scr > sc->bwrem) return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr) return (EINVAL); if (vcc->vcc.tparam.scr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mbs == 0) return (EINVAL); break; case ATMIO_TRAFFIC_ABR: if (vcc->vcc.tparam.tbe == 0 || vcc->vcc.tparam.nrm == 0) /* needed to compute CRM */ return (EINVAL); if (vcc->vcc.tparam.pcr > IFP2IFATM(sc->ifp)->mib.pcr || vcc->vcc.tparam.icr > vcc->vcc.tparam.pcr || vcc->vcc.tparam.mcr > vcc->vcc.tparam.icr) return (EINVAL); if (vcc->vcc.tparam.mcr > sc->bwrem || vcc->vcc.tparam.icr > sc->bwrem) return (EINVAL); break; } return (0); } #define NEXT_TAG(T) do { \ (T) = ((T) + 1) % IDT_TSQE_TAG_SPACE; \ } while (0) /* * open it */ void patm_tx_vcc_open(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* we use UBR0 */ vcc->scd = sc->scd0; vcc->vflags |= PATM_VCC_TX_OPEN; return; } /* get an SCD */ scd = patm_scd_alloc(sc); if (scd == NULL) { /* should not happen */ patm_printf(sc, "out of SCDs\n"); return; } vcc->scd = scd; patm_scd_setup(sc, scd); patm_tct_setup(sc, scd, vcc); if (vcc->vcc.traffic != ATMIO_TRAFFIC_CBR) patm_tct_start(sc, vcc); vcc->vflags |= PATM_VCC_TX_OPEN; } /* * close the given vcc for transmission */ void patm_tx_vcc_close(struct patm_softc *sc, struct patm_vcc *vcc) { struct patm_scd *scd; struct mbuf *m; vcc->vflags |= PATM_VCC_TX_CLOSING; if (vcc->vcc.traffic == ATMIO_TRAFFIC_UBR) { /* let the queue PDUs go out */ vcc->scd = NULL; vcc->vflags &= ~(PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING); return; } scd = vcc->scd; /* empty the waitq */ for (;;) { _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; m_freem(m); } if (scd->num_on_card == 0) { /* we are idle */ vcc->vflags &= ~PATM_VCC_TX_OPEN; if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; return; } /* speed up transmission */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); /* wait for the interrupt to drop the number to 0 */ patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); } /* transmission side finally closed */ void patm_tx_vcc_closed(struct patm_softc *sc, struct patm_vcc *vcc) { patm_debug(sc, VCC, "%u.%u TX closed", vcc->vcc.vpi, vcc->vcc.vci); if (vcc->vcc.traffic == ATMIO_TRAFFIC_VBR) sc->bwrem += vcc->vcc.tparam.scr; } /* * Pull off packets from the interface queue and try to transmit them. * If the transmission fails because of a full transmit channel, we drop * packets for CBR and queue them for other channels up to limit. * This limit should depend on the CDVT for VBR and ABR, but it doesn't. */ void patm_start(struct ifnet *ifp) { struct patm_softc *sc = ifp->if_softc; struct mbuf *m; struct atm_pseudohdr *aph; u_int vpi, vci, cid; struct patm_vcc *vcc; mtx_lock(&sc->mtx); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { mtx_unlock(&sc->mtx); return; } while (1) { /* get a new mbuf */ IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; /* split of pseudo header */ if (m->m_len < sizeof(*aph) && (m = m_pullup(m, sizeof(*aph))) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } aph = mtod(m, struct atm_pseudohdr *); vci = ATM_PH_VCI(aph); vpi = ATM_PH_VPI(aph); m_adj(m, sizeof(*aph)); /* reject empty packets */ if (m->m_pkthdr.len == 0) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } /* check whether this is a legal vcc */ if (!LEGAL_VPI(sc, vpi) || !LEGAL_VCI(sc, vci) || vci == 0) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } cid = PATM_CID(sc, vpi, vci); vcc = sc->vccs[cid]; if (vcc == NULL) { m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } /* must be multiple of 48 if not AAL5 */ if (vcc->vcc.aal == ATMIO_AAL_0 || vcc->vcc.aal == ATMIO_AAL_34) { /* XXX AAL3/4 format? */ if (m->m_pkthdr.len % 48 != 0 && (m = patm_tx_pad(sc, m)) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } } else if (vcc->vcc.aal == ATMIO_AAL_RAW) { switch (vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: if (m->m_pkthdr.len != 53) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; case PATM_RAW_NOHEC: if (m->m_pkthdr.len != 52) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; case PATM_RAW_CS: if (m->m_pkthdr.len != 64) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); m_freem(m); continue; } break; } } /* save data */ m->m_pkthdr.PH_loc.ptr = vcc; /* try to put it on the channels queue */ if (_IF_QFULL(&vcc->scd->q)) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); sc->stats.tx_qfull++; m_freem(m); continue; } _IF_ENQUEUE(&vcc->scd->q, m); #ifdef ENABLE_BPF if (!(vcc->vcc.flags & ATMIO_FLAG_NG) && (vcc->vcc.aal == ATMIO_AAL_5) && (vcc->vcc.flags & ATM_PH_LLCSNAP)) BPF_MTAP(ifp, m); #endif /* kick the channel to life */ patm_launch(sc, vcc->scd); } mtx_unlock(&sc->mtx); } /* * Pad non-AAL5 packet to a multiple of 48-byte. * We assume AAL0 only. We have still to decide on the format of AAL3/4. */ static struct mbuf * patm_tx_pad(struct patm_softc *sc, struct mbuf *m0) { struct mbuf *last, *m; u_int plen, pad, space; plen = m_length(m0, &last); if (plen != m0->m_pkthdr.len) { patm_printf(sc, "%s: mbuf length mismatch %d %u\n", __func__, m0->m_pkthdr.len, plen); m0->m_pkthdr.len = plen; if (plen == 0) { m_freem(m0); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); return (NULL); } if (plen % 48 == 0) return (m0); } pad = 48 - plen % 48; m0->m_pkthdr.len += pad; if (M_WRITABLE(last)) { if (M_TRAILINGSPACE(last) >= pad) { bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } space = M_LEADINGSPACE(last); if (space + M_TRAILINGSPACE(last) >= pad) { bcopy(last->m_data, last->m_data + space, last->m_len); last->m_data -= space; bzero(last->m_data + last->m_len, pad); last->m_len += pad; return (m0); } } MGET(m, M_NOWAIT, MT_DATA); if (m == 0) { m_freem(m0); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); return (NULL); } bzero(mtod(m, u_char *), pad); m->m_len = pad; last->m_next = m; return (m0); } /* * Try to put as many packets from the channels queue onto the channel */ static void patm_launch(struct patm_softc *sc, struct patm_scd *scd) { struct txarg a; struct mbuf *m, *tmp; u_int segs; struct patm_txmap *map; int error; a.sc = sc; a.scd = scd; /* limit the number of outstanding packets to the tag space */ while (scd->num_on_card < IDT_TSQE_TAG_SPACE) { /* get the next packet */ _IF_DEQUEUE(&scd->q, m); if (m == NULL) break; a.vcc = m->m_pkthdr.PH_loc.ptr; /* we must know the number of segments beforehand - count * this may actually give a wrong number of segments for * AAL_RAW where we still need to remove the cell header */ segs = 0; for (tmp = m; tmp != NULL; tmp = tmp->m_next) if (tmp->m_len != 0) segs++; /* check whether there is space in the queue */ if (segs >= scd->space) { /* put back */ _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_tbds++; break; } /* get a DMA map */ if ((map = patm_txmap_get(sc)) == NULL) { _IF_PREPEND(&scd->q, m); sc->stats.tx_out_of_maps++; break; } /* load the map */ m->m_pkthdr.PH_loc.ptr = map; a.mbuf = m; /* handle AAL_RAW */ if (a.vcc->vcc.aal == ATMIO_AAL_RAW) { u_char hdr[4]; m_copydata(m, 0, 4, hdr); a.hdr = (hdr[0] << 24) | (hdr[1] << 16) | (hdr[2] << 8) | hdr[3]; switch (a.vcc->vflags & PATM_RAW_FORMAT) { default: case PATM_RAW_CELL: m_adj(m, 5); break; case PATM_RAW_NOHEC: m_adj(m, 4); break; case PATM_RAW_CS: m_adj(m, 16); break; } } else a.hdr = IDT_TBD_HDR(a.vcc->vcc.vpi, a.vcc->vcc.vci, 0, 0); error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); if (error == EFBIG) { if ((m = m_defrag(m, M_NOWAIT)) == NULL) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); continue; } error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m, patm_load_txbuf, &a, BUS_DMA_NOWAIT); } if (error != 0) { sc->stats.tx_load_err++; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); m_freem(m); continue; } if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); } } /* * Load the DMA segments into the scheduling channel */ static void patm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct txarg *a= uarg; struct patm_scd *scd = a->scd; u_int w1, w3, cnt; struct idt_tbd *tbd = NULL; u_int rest = mapsize; if (error != 0) return; cnt = 0; while (nseg > 0) { if (segs->ds_len == 0) { /* transmit buffer length must be > 0 */ nseg--; segs++; continue; } /* rest after this buffer */ rest -= segs->ds_len; /* put together status word */ w1 = 0; if (rest < 48 /* && a->vcc->vcc.aal != ATMIO_AAL_5 */) /* last cell is in this buffer */ w1 |= IDT_TBD_EPDU; if (a->vcc->vcc.aal == ATMIO_AAL_5) w1 |= IDT_TBD_AAL5; else if (a->vcc->vcc.aal == ATMIO_AAL_34) w1 |= IDT_TBD_AAL34; else w1 |= IDT_TBD_AAL0; w1 |= segs->ds_len; /* AAL5 PDU length (unpadded) */ if (a->vcc->vcc.aal == ATMIO_AAL_5) w3 = mapsize; else w3 = 0; if (rest == 0) w1 |= IDT_TBD_TSIF | IDT_TBD_GTSI | (scd->tag << IDT_TBD_TAG_SHIFT); tbd = &scd->scq[scd->tail]; tbd->flags = htole32(w1); tbd->addr = htole32(segs->ds_addr); tbd->aal5 = htole32(w3); tbd->hdr = htole32(a->hdr); patm_debug(a->sc, TX, "TBD(%u): %08x %08x %08x %08x", scd->tail, w1, segs->ds_addr, w3, a->hdr); /* got to next entry */ if (++scd->tail == IDT_SCQ_SIZE) scd->tail = 0; cnt++; nseg--; segs++; } scd->space -= cnt; scd->num_on_card++; KASSERT(rest == 0, ("bad mbuf")); KASSERT(cnt > 0, ("no segs")); KASSERT(scd->space > 0, ("scq full")); KASSERT(scd->on_card[scd->tag] == NULL, ("scd on_card wedged %u%s", scd->tag, dump_scd(a->sc, scd))); scd->on_card[scd->tag] = a->mbuf; a->mbuf->m_pkthdr.csum_data = cnt; NEXT_TAG(scd->tag); patm_debug(a->sc, TX, "SCD tail %u (%lx:%lx)", scd->tail, (u_long)scd->phy, (u_long)scd->phy + (scd->tail << IDT_TBD_SHIFT)); patm_sram_write(a->sc, scd->sram, scd->phy + (scd->tail << IDT_TBD_SHIFT)); if (patm_sram_read(a->sc, a->vcc->cid * 8 + 3) & IDT_TCT_IDLE) { /* * if the connection is idle start it. We cannot rely * on a flag set by patm_tx_idle() here, because sometimes * the card seems to place an idle TSI into the TSQ but * forgets to raise an interrupt. */ patm_nor_write(a->sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(a->vcc->cid)); } } /* * packet transmitted */ void patm_tx(struct patm_softc *sc, u_int stamp, u_int status) { u_int cid, tag, last; struct mbuf *m; struct patm_vcc *vcc; struct patm_scd *scd; struct patm_txmap *map; /* get the connection */ cid = PATM_CID(sc, IDT_TBD_VPI(status), IDT_TBD_VCI(status)); if ((vcc = sc->vccs[cid]) == NULL) { /* closed UBR connection */ return; } scd = vcc->scd; tag = IDT_TSQE_TAG(stamp); last = scd->last_tag; if (tag == last) { patm_printf(sc, "same tag %u\n", tag); return; } /* Errata 12 requests us to free all entries up to the one * with the given tag. */ do { /* next tag to try */ NEXT_TAG(last); m = scd->on_card[last]; KASSERT(m != NULL, ("%stag=%u", dump_scd(sc, scd), tag)); scd->on_card[last] = NULL; patm_debug(sc, TX, "ok tag=%x", last); map = m->m_pkthdr.PH_loc.ptr; scd->space += m->m_pkthdr.csum_data; bus_dmamap_sync(sc->tx_tag, map->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_tag, map->map); m_freem(m); SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); scd->num_on_card--; if (vcc->vflags & PATM_VCC_TX_CLOSING) { if (scd->num_on_card == 0) { /* done with this VCC */ if (vcc->vcc.traffic == ATMIO_TRAFFIC_CBR) patm_tst_free(sc, vcc); patm_sram_write4(sc, scd->sram + 0, 0, 0, 0, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_scd_free(sc, scd); vcc->scd = NULL; vcc->vflags &= ~PATM_VCC_TX_CLOSING; if (vcc->vcc.flags & ATMIO_FLAG_ASYNC) { patm_tx_vcc_closed(sc, vcc); if (!(vcc->vflags & PATM_VCC_OPEN)) patm_vcc_closed(sc, vcc); } else cv_signal(&sc->vcc_cv); return; } patm_debug(sc, VCC, "%u buffers still on card", scd->num_on_card); if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { /* insist on speeding up transmission for ABR */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, 0xff)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_ULACR(vcc->cid, 0xff)); } } } while (last != tag); scd->last_tag = tag; if (vcc->vcc.traffic == ATMIO_TRAFFIC_ABR) { u_int acri, cps; acri = (patm_sram_read(sc, 8 * cid + 2) >> IDT_TCT_ACRI_SHIFT) & 0x3fff; cps = IFP2IFATM(sc->ifp)->mib.pcr * 32 / ((1 << (acri >> 10)) * (acri & 0x3ff)); if (cps != vcc->cps) { patm_debug(sc, VCC, "ACRI=%04x CPS=%u", acri, cps); ATMEV_SEND_ACR_CHANGED(IFP2IFATM(sc->ifp), vcc->vcc.vpi, vcc->vcc.vci, cps); vcc->cps = cps; } } patm_launch(sc, scd); } /* * VBR/ABR connection went idle * Either restart it or set the idle flag. */ void patm_tx_idle(struct patm_softc *sc, u_int cid) { struct patm_vcc *vcc; patm_debug(sc, VCC, "idle %u", cid); if ((vcc = sc->vccs[cid]) != NULL && (vcc->vflags & (PATM_VCC_TX_OPEN | PATM_VCC_TX_CLOSING)) != 0 && vcc->scd != NULL && (vcc->scd->num_on_card != 0 || _IF_QLEN(&vcc->scd->q) != 0)) { /* * If there is any packet outstanding in the SCD re-activate * the channel and kick it. */ patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_START(vcc->cid)); patm_launch(sc, vcc->scd); } } /* * Convert a (24bit) rate to the atm-forum form * Our rate is never larger than 19 bit. */ static u_int cps2atmf(u_int cps) { u_int e; if (cps == 0) return (0); cps <<= 9; e = 0; while (cps > (1024 - 1)) { e++; cps >>= 1; } return ((1 << 14) | (e << 9) | (cps & 0x1ff)); } /* * Do a binary search on the log2rate table to convert the rate * to its log form. This assumes that the ATM-Forum form is monotonically * increasing with the plain cell rate. */ static u_int rate2log(struct patm_softc *sc, u_int rate) { const uint32_t *tbl; u_int lower, upper, mid, done, val, afr; afr = cps2atmf(rate); if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; lower = 0; upper = 255; done = 0; while (!done) { mid = (lower + upper) / 2; val = tbl[mid] >> 17; if (val == afr || upper == lower) break; if (afr > val) lower = mid + 1; else upper = mid - 1; } if (val > afr && mid > 0) mid--; return (mid); } /* * Return the table index for an increase table. The increase table * must be selected not by the RIF itself, but by PCR/2^RIF. Each table * represents an additive increase of a cell rate that can be computed * from the first table entry (the value in this entry will not be clamped * by the link rate). */ static u_int get_air_table(struct patm_softc *sc, u_int rif, u_int pcr) { const uint32_t *tbl; u_int increase, base, lair0, ret, t, cps; #define GET_ENTRY(TAB, IDX) (0xffff & ((IDX & 1) ? \ (tbl[512 + (IDX / 2) + 128 * (TAB)] >> 16) : \ (tbl[512 + (IDX / 2) + 128 * (TAB)]))) #define MANT_BITS 10 #define FRAC_BITS 16 #define DIFF_TO_FP(D) (((D) & ((1 << MANT_BITS) - 1)) << ((D) >> MANT_BITS)) #define AFR_TO_INT(A) ((1 << (((A) >> 9) & 0x1f)) * \ (512 + ((A) & 0x1ff)) / 512 * ((A) >> 14)) if (sc->flags & PATM_25M) tbl = patm_rtables25; else tbl = patm_rtables155; if (rif >= patm_rtables_ntab) rif = patm_rtables_ntab - 1; increase = pcr >> rif; ret = 0; for (t = 0; t < patm_rtables_ntab; t++) { /* get base rate of this table */ base = GET_ENTRY(t, 0); /* convert this to fixed point */ lair0 = DIFF_TO_FP(base) >> FRAC_BITS; /* get the CPS from the log2rate table */ cps = AFR_TO_INT(tbl[lair0] >> 17) - 10; if (increase >= cps) break; ret = t; } return (ret + 4); } /* * Setup the TCT */ void patm_tct_setup(struct patm_softc *sc, struct patm_scd *scd, struct patm_vcc *vcc) { uint32_t tct[8]; u_int sram; u_int mbs, token; u_int tmp, crm, rdf, cdf, air, mcr; bzero(tct, sizeof(tct)); if (vcc == NULL) { /* special case for UBR0 */ sram = 0; tct[0] = IDT_TCT_UBR | scd->sram; tct[7] = IDT_TCT_UBR_FLG; } else { sram = vcc->cid * 8; switch (vcc->vcc.traffic) { case ATMIO_TRAFFIC_CBR: patm_tst_alloc(sc, vcc); tct[0] = IDT_TCT_CBR | scd->sram; /* must account for what was really allocated */ break; case ATMIO_TRAFFIC_VBR: /* compute parameters for the TCT */ scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.scr); /* get the 16-bit fraction of SCR/PCR * both a 24 bit. Do it the simple way. */ token = (uint64_t)(vcc->vcc.tparam.scr << 16) / vcc->vcc.tparam.pcr; patm_debug(sc, VCC, "VBR: init_er=%u lacr=%u " "token=0x%04x\n", scd->init_er, scd->lacr, token); tct[0] = IDT_TCT_VBR | scd->sram; tct[2] = IDT_TCT_TSIF; tct[3] = IDT_TCT_IDLE | IDT_TCT_HALT; tct[4] = IDT_TCT_MAXIDLE; tct[5] = 0x01000000; if ((mbs = vcc->vcc.tparam.mbs) > 0xff) mbs = 0xff; tct[6] = (mbs << 16) | token; sc->bwrem -= vcc->vcc.tparam.scr; break; case ATMIO_TRAFFIC_ABR: scd->init_er = rate2log(sc, vcc->vcc.tparam.pcr); scd->lacr = rate2log(sc, vcc->vcc.tparam.icr); mcr = rate2log(sc, vcc->vcc.tparam.mcr); /* compute CRM */ tmp = vcc->vcc.tparam.tbe / vcc->vcc.tparam.nrm; if (tmp * vcc->vcc.tparam.nrm < vcc->vcc.tparam.tbe) tmp++; for (crm = 1; tmp > (1 << crm); crm++) ; if (crm > 0x7) crm = 7; air = get_air_table(sc, vcc->vcc.tparam.rif, vcc->vcc.tparam.pcr); if ((rdf = vcc->vcc.tparam.rdf) >= patm_rtables_ntab) rdf = patm_rtables_ntab - 1; rdf += patm_rtables_ntab + 4; if ((cdf = vcc->vcc.tparam.cdf) >= patm_rtables_ntab) cdf = patm_rtables_ntab - 1; cdf += patm_rtables_ntab + 4; patm_debug(sc, VCC, "ABR: init_er=%u lacr=%u mcr=%u " "crm=%u air=%u rdf=%u cdf=%u\n", scd->init_er, scd->lacr, mcr, crm, air, rdf, cdf); tct[0] = IDT_TCT_ABR | scd->sram; tct[1] = crm << IDT_TCT_CRM_SHIFT; tct[3] = IDT_TCT_HALT | IDT_TCT_IDLE | (4 << IDT_TCT_NAGE_SHIFT); tct[4] = mcr << IDT_TCT_LMCR_SHIFT; tct[5] = (cdf << IDT_TCT_CDF_SHIFT) | (rdf << IDT_TCT_RDF_SHIFT) | (air << IDT_TCT_AIR_SHIFT); sc->bwrem -= vcc->vcc.tparam.mcr; break; } } patm_sram_write4(sc, sram + 0, tct[0], tct[1], tct[2], tct[3]); patm_sram_write4(sc, sram + 4, tct[4], tct[5], tct[6], tct[7]); patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Start a channel */ static void patm_tct_start(struct patm_softc *sc, struct patm_vcc *vcc) { patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_UIER(vcc->cid, vcc->scd->init_er)); patm_nor_write(sc, IDT_NOR_TCMDQ, IDT_TCMDQ_SLACR(vcc->cid, vcc->scd->lacr)); } static void patm_tct_print(struct patm_softc *sc, u_int cid) { #ifdef PATM_DEBUG u_int sram = cid * 8; #endif patm_debug(sc, VCC, "TCT[%u]: %08x %08x %08x %08x %08x %08x %08x %08x", sram / 8, patm_sram_read(sc, sram + 0), patm_sram_read(sc, sram + 1), patm_sram_read(sc, sram + 2), patm_sram_read(sc, sram + 3), patm_sram_read(sc, sram + 4), patm_sram_read(sc, sram + 5), patm_sram_read(sc, sram + 6), patm_sram_read(sc, sram + 7)); } /* * Setup the SCD */ void patm_scd_setup(struct patm_softc *sc, struct patm_scd *scd) { patm_sram_write4(sc, scd->sram + 0, scd->phy, 0, 0xffffffff, 0); patm_sram_write4(sc, scd->sram + 4, 0, 0, 0, 0); patm_debug(sc, VCC, "SCD(%x): %08x %08x %08x %08x %08x %08x %08x %08x", scd->sram, patm_sram_read(sc, scd->sram + 0), patm_sram_read(sc, scd->sram + 1), patm_sram_read(sc, scd->sram + 2), patm_sram_read(sc, scd->sram + 3), patm_sram_read(sc, scd->sram + 4), patm_sram_read(sc, scd->sram + 5), patm_sram_read(sc, scd->sram + 6), patm_sram_read(sc, scd->sram + 7)); } /* * Grow the TX map table if possible */ static void patm_txmaps_grow(struct patm_softc *sc) { u_int i; struct patm_txmap *map; int err; if (sc->tx_nmaps >= sc->tx_maxmaps) return; for (i = sc->tx_nmaps; i < sc->tx_nmaps + PATM_CFG_TXMAPS_STEP; i++) { map = uma_zalloc(sc->tx_mapzone, M_NOWAIT); err = bus_dmamap_create(sc->tx_tag, 0, &map->map); if (err) { uma_zfree(sc->tx_mapzone, map); break; } SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link); } sc->tx_nmaps = i; } /* * Allocate a transmission map */ static struct patm_txmap * patm_txmap_get(struct patm_softc *sc) { struct patm_txmap *map; if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) { patm_txmaps_grow(sc); if ((map = SLIST_FIRST(&sc->tx_maps_free)) == NULL) return (NULL); } SLIST_REMOVE_HEAD(&sc->tx_maps_free, link); return (map); } /* * Look whether we are in the process of updating the TST on the chip. * If we are set the flag that we need another update. * If we are not start the update. */ static __inline void patm_tst_start(struct patm_softc *sc) { if (!(sc->tst_state & TST_PENDING)) { sc->tst_state |= TST_PENDING; if (!(sc->tst_state & TST_WAIT)) { /* timer not running */ patm_tst_update(sc); } } } /* * Allocate TST entries to a CBR connection */ static void patm_tst_alloc(struct patm_softc *sc, struct patm_vcc *vcc) { u_int slots; u_int qptr, pptr; u_int qmax, pmax; u_int pspc, last; mtx_lock(&sc->tst_lock); /* compute the number of slots we need, make sure to get at least * the specified PCR */ slots = cbr2slots(sc, vcc); vcc->scd->slots = slots; sc->bwrem -= slots2cr(sc, slots); patm_debug(sc, TST, "tst_alloc: cbr=%u link=%u tst=%u slots=%u", vcc->vcc.tparam.pcr, IFP2IFATM(sc->ifp)->mib.pcr, sc->mmap->tst_size, slots); qmax = sc->mmap->tst_size - 1; pmax = qmax << 8; pspc = pmax / slots; pptr = pspc >> 1; /* starting point */ qptr = pptr >> 8; last = qptr; while (slots > 0) { if (qptr >= qmax) qptr -= qmax; if (sc->tst_soft[qptr] != IDT_TST_VBR) { /* used - try next */ qptr++; continue; } patm_debug(sc, TST, "slot[%u] = %u.%u diff=%d", qptr, vcc->vcc.vpi, vcc->vcc.vci, (int)qptr - (int)last); last = qptr; sc->tst_soft[qptr] = IDT_TST_CBR | vcc->cid | TST_BOTH; sc->tst_free--; if ((pptr += pspc) >= pmax) pptr -= pmax; qptr = pptr >> 8; slots--; } patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Free a CBR connection's TST entries */ static void patm_tst_free(struct patm_softc *sc, struct patm_vcc *vcc) { u_int i; mtx_lock(&sc->tst_lock); for (i = 0; i < sc->mmap->tst_size - 1; i++) { if ((sc->tst_soft[i] & IDT_TST_MASK) == vcc->cid) { sc->tst_soft[i] = IDT_TST_VBR | TST_BOTH; sc->tst_free++; } } sc->bwrem += slots2cr(sc, vcc->scd->slots); patm_tst_start(sc); mtx_unlock(&sc->tst_lock); } /* * Write the soft TST into the idle incore TST and start the wait timer. * We assume that we hold the tst lock. */ static void patm_tst_update(struct patm_softc *sc) { u_int flag; /* flag to clear from soft TST */ u_int idle; /* the idle TST */ u_int act; /* the active TST */ u_int i; if (sc->tst_state & TST_ACT1) { act = 1; idle = 0; flag = TST_CH0; } else { act = 0; idle = 1; flag = TST_CH1; } /* update the idle one */ for (i = 0; i < sc->mmap->tst_size - 1; i++) if (sc->tst_soft[i] & flag) { patm_sram_write(sc, sc->tst_base[idle] + i, sc->tst_soft[i] & ~TST_BOTH); sc->tst_soft[i] &= ~flag; } /* the used one jump to the idle one */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[idle] << 2)); /* wait for the chip to jump */ sc->tst_state &= ~TST_PENDING; sc->tst_state |= TST_WAIT; callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); } /* * Timer for TST updates */ static void patm_tst_timer(void *p) { struct patm_softc *sc = p; u_int act; /* active TST */ u_int now; /* current place in TST */ mtx_lock(&sc->tst_lock); if (sc->tst_state & TST_WAIT) { /* ignore the PENDING state while we are waiting for * the chip to switch tables. Once the switch is done, * we will again lock at PENDING */ act = (sc->tst_state & TST_ACT1) ? 1 : 0; now = patm_nor_read(sc, IDT_NOR_NOW) >> 2; if (now >= sc->tst_base[act] && now <= sc->tst_jump[act]) { /* not yet */ callout_reset(&sc->tst_callout, 1, patm_tst_timer, sc); goto done; } sc->tst_state &= ~TST_WAIT; /* change back jump */ patm_sram_write(sc, sc->tst_jump[act], IDT_TST_BR | (sc->tst_base[act] << 2)); /* switch */ sc->tst_state ^= TST_ACT1; } if (sc->tst_state & TST_PENDING) /* we got another update request while the timer was running. */ patm_tst_update(sc); done: mtx_unlock(&sc->tst_lock); } static const char * dump_scd(struct patm_softc *sc, struct patm_scd *scd) { u_int i; for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) printf("on_card[%u] = %p\n", i, scd->on_card[i]); printf("space=%u tag=%u num_on_card=%u last_tag=%u\n", scd->space, scd->tag, scd->num_on_card, scd->last_tag); return (""); } diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c index 1ba2791a9c21..5ac753ca299c 100644 --- a/sys/dev/ral/rt2560.c +++ b/sys/dev/ral/rt2560.c @@ -1,2745 +1,2745 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2560 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RT2560_RSSI(sc, rssi) \ ((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \ ((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0) #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2560_vap_delete(struct ieee80211vap *); static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2560_alloc_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *, int); static void rt2560_reset_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static void rt2560_free_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static int rt2560_alloc_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *, int); static void rt2560_reset_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static void rt2560_free_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static int rt2560_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t); static void rt2560_encryption_intr(struct rt2560_softc *); static void rt2560_tx_intr(struct rt2560_softc *); static void rt2560_prio_intr(struct rt2560_softc *); static void rt2560_decryption_intr(struct rt2560_softc *); static void rt2560_rx_intr(struct rt2560_softc *); static void rt2560_beacon_update(struct ieee80211vap *, int item); static void rt2560_beacon_expire(struct rt2560_softc *); static void rt2560_wakeup_expire(struct rt2560_softc *); static void rt2560_scan_start(struct ieee80211com *); static void rt2560_scan_end(struct ieee80211com *); static void rt2560_set_channel(struct ieee80211com *); static void rt2560_setup_tx_desc(struct rt2560_softc *, struct rt2560_tx_desc *, uint32_t, int, int, int, bus_addr_t); static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_transmit(struct ieee80211com *, struct mbuf *); static void rt2560_start(struct rt2560_softc *); static void rt2560_watchdog(void *); static void rt2560_parent(struct ieee80211com *); static void rt2560_bbp_write(struct rt2560_softc *, uint8_t, uint8_t); static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t); static void rt2560_rf_write(struct rt2560_softc *, uint8_t, uint32_t); static void rt2560_set_chan(struct rt2560_softc *, struct ieee80211_channel *); #if 0 static void rt2560_disable_rf_tune(struct rt2560_softc *); #endif static void rt2560_enable_tsf_sync(struct rt2560_softc *); static void rt2560_enable_tsf(struct rt2560_softc *); static void rt2560_update_plcp(struct rt2560_softc *); static void rt2560_update_slot(struct ieee80211com *); static void rt2560_set_basicrates(struct rt2560_softc *, const struct ieee80211_rateset *); static void rt2560_update_led(struct rt2560_softc *, int, int); static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *); static void rt2560_set_macaddr(struct rt2560_softc *, const uint8_t *); static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_update_promisc(struct ieee80211com *); static const char *rt2560_get_rf(int); static void rt2560_read_config(struct rt2560_softc *); static int rt2560_bbp_init(struct rt2560_softc *); static void rt2560_set_txantenna(struct rt2560_softc *, int); static void rt2560_set_rxantenna(struct rt2560_softc *, int); static void rt2560_init_locked(struct rt2560_softc *); static void rt2560_init(void *); static void rt2560_stop_locked(struct rt2560_softc *); static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static const struct { uint32_t reg; uint32_t val; } rt2560_def_mac[] = { RT2560_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2560_def_bbp[] = { RT2560_DEF_BBP }; static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2; static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2; static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2; static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2; static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2; static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2; static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2; static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2; static const struct { uint8_t chan; uint32_t r1, r2, r4; } rt2560_rf5222[] = { RT2560_RF5222 }; int rt2560_attach(device_t dev, int id) { struct rt2560_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; int error; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* retrieve RT2560 rev. no */ sc->asic_rev = RAL_READ(sc, RT2560_CSR0); /* retrieve RF rev. no and various other things from EEPROM */ rt2560_read_config(sc); device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n", sc->asic_rev, rt2560_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring\n"); goto fail1; } error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate ATIM ring\n"); goto fail2; } error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Prio ring\n"); goto fail3; } error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Beacon ring\n"); goto fail4; } error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail5; } /* retrieve MAC address */ rt2560_get_macaddr(sc, ic->ic_macaddr); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ #endif ; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2560_RF_5222) setbit(bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_raw_xmit = rt2560_raw_xmit; ic->ic_updateslot = rt2560_update_slot; ic->ic_update_promisc = rt2560_update_promisc; ic->ic_scan_start = rt2560_scan_start; ic->ic_scan_end = rt2560_scan_end; ic->ic_set_channel = rt2560_set_channel; ic->ic_vap_create = rt2560_vap_create; ic->ic_vap_delete = rt2560_vap_delete; ic->ic_parent = rt2560_parent; ic->ic_transmit = rt2560_transmit; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2560_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2560_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)"); if (bootverbose) ieee80211_announce(ic); return 0; fail5: rt2560_free_tx_ring(sc, &sc->bcnq); fail4: rt2560_free_tx_ring(sc, &sc->prioq); fail3: rt2560_free_tx_ring(sc, &sc->atimq); fail2: rt2560_free_tx_ring(sc, &sc->txq); fail1: mtx_destroy(&sc->sc_mtx); return ENXIO; } int rt2560_detach(void *xsc) { struct rt2560_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; rt2560_stop(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2560_free_tx_ring(sc, &sc->txq); rt2560_free_tx_ring(sc, &sc->atimq); rt2560_free_tx_ring(sc, &sc->prioq); rt2560_free_tx_ring(sc, &sc->bcnq); rt2560_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2560_softc *sc = ic->ic_softc; struct rt2560_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2560_newstate; vap->iv_update_beacon = rt2560_beacon_update; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2560_vap_delete(struct ieee80211vap *vap) { struct rt2560_vap *rvp = RT2560_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2560_resume(void *xsc) { struct rt2560_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2560_init(sc); } static void rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2560_free_tx_ring(sc, ring); return error; } static void rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; } static void rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring, int count) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; ring->cur_decrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2560_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2560_free_rx_ring(sc, ring); return error; } static void rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) { ring->desc[i].flags = htole32(RT2560_RX_BUSY); ring->data[i].drop = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; ring->cur_decrypt = 0; } static void rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { struct rt2560_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct rt2560_softc *sc = vap->iv_ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); /* turn association led off */ rt2560_update_led(sc, 0, 0); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2560_update_plcp(sc); rt2560_set_basicrates(sc, &ni->ni_rates); rt2560_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); return ENOBUFS; } ieee80211_ref_node(ni); error = rt2560_tx_bcn(sc, m, ni); if (error != 0) return error; } /* turn assocation led on */ rt2560_update_led(sc, 1, 0); if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2560_enable_tsf_sync(sc); else rt2560_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); /* write start bit (1) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); /* write READ opcode (10) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D)); RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C); } RT2560_EEPROM_CTL(sc, RT2560_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); tmp = RAL_READ(sc, RT2560_CSR21); val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n; RT2560_EEPROM_CTL(sc, RT2560_S); } RT2560_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_C); return val; } /* * Some frames were processed by the hardware cipher engine and are ready for * transmission. */ static void rt2560_encryption_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; int hw; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr; hw /= RT2560_TX_DESC_SIZE; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); while (sc->txq.next_encrypt != hw) { if (sc->txq.next_encrypt == sc->txq.cur_encrypt) { printf("hw encrypt %d, cur_encrypt %d\n", hw, sc->txq.cur_encrypt); break; } desc = &sc->txq.desc[sc->txq.next_encrypt]; if ((le32toh(desc->flags) & RT2560_TX_BUSY) || (le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY)) break; /* for TKIP, swap eiv field to fix a bug in ASIC */ if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) == RT2560_TX_CIPHER_TKIP) desc->eiv = bswap32(desc->eiv); /* mark the frame ready for transmission */ desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= htole32(RT2560_TX_BUSY); DPRINTFN(sc, 15, "encryption done idx=%u\n", sc->txq.next_encrypt); sc->txq.next_encrypt = (sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); /* kick Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX); } static void rt2560_tx_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *m; struct ieee80211vap *vap; struct ieee80211_node *ni; uint32_t flags; int retrycnt, status; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->txq.desc[sc->txq.next]; data = &sc->txq.data[sc->txq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_CIPHER_BUSY) || !(flags & RT2560_TX_VALID)) break; m = data->m; ni = data->ni; vap = ni->ni_vap; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: retrycnt = 0; DPRINTFN(sc, 10, "%s\n", "data frame sent successfully"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); status = 0; break; case RT2560_TX_SUCCESS_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame sent after %u retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); status = 0; break; case RT2560_TX_FAIL_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame failed after %d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); status = 1; break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending data frame failed " "0x%08x\n", flags); status = 1; } bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.data_dmat, data->map); ieee80211_tx_complete(ni, m, status); data->ni = NULL; data->m = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next); sc->txq.queued--; sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) rt2560_start(sc); } static void rt2560_prio_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_node *ni; struct mbuf *m; int flags; bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->prioq.desc[sc->prioq.next]; data = &sc->prioq.data[sc->prioq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0) break; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully"); break; case RT2560_TX_SUCCESS_RETRY: DPRINTFN(sc, 9, "mgt frame sent after %u retries\n", (flags >> 5) & 0x7); break; case RT2560_TX_FAIL_RETRY: DPRINTFN(sc, 9, "%s\n", "sending mgt frame failed (too much retries)"); break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending mgt frame failed " "0x%08x\n", flags); break; } bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->prioq.data_dmat, data->map); m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next); sc->prioq.queued--; sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, (flags & RT2560_TX_RESULT_MASK) &~ (RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY)); m_freem(m); ieee80211_free_node(ni); } bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) rt2560_start(sc); } /* * Some frames were processed by the hardware cipher engine and are ready for * handoff to the IEEE802.11 layer. */ static void rt2560_decryption_intr(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int hw, error; int8_t rssi, nf; /* retrieve last decriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; hw /= RT2560_RX_DESC_SIZE; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (; sc->rxq.cur_decrypt != hw;) { desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; data = &sc->rxq.data[sc->rxq.cur_decrypt]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; if (data->drop) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 && (le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = RT2560_RSSI(sc, desc->rssi); nf = RT2560_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2560_CSR17); tsf_lo = RAL_READ(sc, RT2560_CSR16); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2560_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RT2560_F_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RT2560_F_INPUT_RUNNING; skip: desc->flags = htole32(RT2560_RX_BUSY); DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt); sc->rxq.cur_decrypt = (sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* * Some frames were received. Pass them to the hardware cipher engine before * sending them to the 802.11 layer. */ static void rt2560_rx_intr(struct rt2560_softc *sc) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; data->drop = 0; if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled RXCSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); data->drop = 1; } if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) { DPRINTFN(sc, 5, "%s\n", "bad length"); data->drop = 1; } /* mark the frame for decryption */ desc->flags |= htole32(RT2560_RX_CIPHER_BUSY); DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); /* kick decrypt */ RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT); } static void rt2560_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; setbit(bo->bo_flags, item); } /* * This function is called periodically in IBSS mode when a new beacon must be * sent out. */ static void rt2560_beacon_expire(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_tx_data *data; if (ic->ic_opmode != IEEE80211_M_IBSS && ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) return; data = &sc->bcnq.data[sc->bcnq.next]; /* * Don't send beacon if bsschan isn't set */ if (data->ni == NULL) return; bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bcnq.data_dmat, data->map); /* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */ ieee80211_beacon_update(data->ni, data->m, 1); rt2560_tx_bcn(sc, data->m, data->ni); DPRINTFN(sc, 15, "%s", "beacon expired\n"); sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT; } /* ARGSUSED */ static void rt2560_wakeup_expire(struct rt2560_softc *sc) { DPRINTFN(sc, 2, "%s", "wakeup expired\n"); } void rt2560_intr(void *arg) { struct rt2560_softc *sc = arg; uint32_t r; RAL_LOCK(sc); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); return; } r = RAL_READ(sc, RT2560_CSR7); RAL_WRITE(sc, RT2560_CSR7, r); if (r & RT2560_BEACON_EXPIRE) rt2560_beacon_expire(sc); if (r & RT2560_WAKEUP_EXPIRE) rt2560_wakeup_expire(sc); if (r & RT2560_ENCRYPTION_DONE) rt2560_encryption_intr(sc); if (r & RT2560_TX_DONE) rt2560_tx_intr(sc); if (r & RT2560_PRIO_DONE) rt2560_prio_intr(sc); if (r & RT2560_DECRYPTION_DONE) rt2560_decryption_intr(sc); if (r & RT2560_RX_DONE) { rt2560_rx_intr(sc); rt2560_encryption_intr(sc); } /* re-enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); RAL_UNLOCK(sc); } #define RAL_SIFS 10 /* us */ #define RT2560_TXRX_TURNAROUND 10 /* us */ static uint8_t rt2560_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc, uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->physaddr = htole32(physaddr); desc->wme = htole16( RT2560_AIFSN(2) | RT2560_LOGCWMIN(3) | RT2560_LOGCWMAX(8)); /* setup PLCP fields */ desc->plcp_signal = rt2560_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2560_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { - plcp_length = (16 * len + rate - 1) / rate; + plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2560_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } if (!encrypt) desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY) : htole32(RT2560_TX_BUSY); } static int rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs, rate, error; desc = &sc->bcnq.desc[sc->bcnq.cur]; data = &sc->bcnq.data[sc->bcnq.cur]; /* XXX maybe a separate beacon rate? */ rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate; error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF | RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr); DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->bcnq.cur, rate); bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map, BUS_DMASYNC_PREWRITE); sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT; return 0; } static int rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2560_TX_TIMESTAMP; } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_sendprot(struct rt2560_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2560_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2560_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } desc = &sc->txq.desc[sc->txq.cur_encrypt]; data = &sc->txq.data[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; return 0; } static int rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint32_t flags; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2560_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rt2560_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(ni->ni_vap, m0); } data->m = m0; data->ni = ni; /* XXX need to setup descriptor ourself */ rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags; int nsegs, rate, error; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2560_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } } data = &sc->txq.data[sc->txq.cur_encrypt]; desc = &sc->txq.desc[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->txq.cur_encrypt, rate); /* kick encrypt */ sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT); return 0; } static int rt2560_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2560_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2560_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2560_start(struct rt2560_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc); while (sc->txq.queued < RT2560_TX_RING_COUNT - 1 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2560_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static void rt2560_watchdog(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RT2560_F_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; rt2560_encryption_intr(sc); rt2560_tx_intr(sc); if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2560_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2560_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_parent(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { rt2560_init_locked(sc); startall = 1; } else rt2560_update_promisc(ic); } else if (sc->sc_flags & RT2560_F_RUNNING) rt2560_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val; RAL_WRITE(sc, RT2560_BBPCSR, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2560_BBP_BUSY | reg << 8; RAL_WRITE(sc, RT2560_BBPCSR, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2560_BBPCSR); if (!(val & RT2560_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); RAL_WRITE(sc, RT2560_RFCSR, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RT2560_RF_2522: rt2560_rf_write(sc, RAL_RF1, 0x00814); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RT2560_RF_2523: rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2524: rt2560_rf_write(sc, RAL_RF1, 0x0c808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525E: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RT2560_RF_2526: rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RT2560_RF_5222: for (i = 0; rt2560_rf5222[i].chan != chan; i++); rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1); rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4); break; default: printf("unknown ral rev=%d\n", sc->rf_rev); } /* XXX */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = rt2560_bbp_read(sc, 70); tmp &= ~RT2560_JAPAN_FILTER; if (chan == 14) tmp |= RT2560_JAPAN_FILTER; rt2560_bbp_write(sc, 70, tmp); /* clear CRC errors */ RAL_READ(sc, RT2560_CNT0); } } static void rt2560_set_channel(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2560_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } #if 0 /* * Disable RF auto-tuning. */ static void rt2560_disable_rf_tune(struct rt2560_softc *sc) { uint32_t tmp; if (sc->rf_rev != RT2560_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; rt2560_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; rt2560_rf_write(sc, RAL_RF3, tmp); DPRINTFN(sc, 2, "%s", "disabling RF autotune\n"); } #endif /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void rt2560_enable_tsf_sync(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload; uint32_t tmp; /* first, disable TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); tmp = 16 * vap->iv_bss->ni_intval; RAL_WRITE(sc, RT2560_CSR12, tmp); RAL_WRITE(sc, RT2560_CSR13, 0); logcwmin = 5; preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024; tmp = logcwmin << 16 | preload; RAL_WRITE(sc, RT2560_BCNOCSR, tmp); /* finally, enable TSF synchronization */ tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2560_ENABLE_TSF_SYNC(1); else tmp |= RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_BEACON_GENERATOR; RAL_WRITE(sc, RT2560_CSR14, tmp); DPRINTF(sc, "%s", "enabling TSF synchronization\n"); } static void rt2560_enable_tsf(struct rt2560_softc *sc) { RAL_WRITE(sc, RT2560_CSR14, 0); RAL_WRITE(sc, RT2560_CSR14, RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF); } static void rt2560_update_plcp(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* no short preamble for 1Mbps */ RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400); if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) { /* values taken from the reference driver */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403); } else { /* same values as above or'ed 0x8 */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b); } DPRINTF(sc, "updating PLCP for %s preamble\n", (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long"); } /* * This function can be called by ieee80211_set_shortslottime(). Refer to * IEEE Std 802.11-1999 pp. 85 to know how these values are computed. */ static void rt2560_update_slot(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint8_t slottime; uint16_t tx_sifs, tx_pifs, tx_difs, eifs; uint32_t tmp; #ifndef FORCE_SLOTTIME slottime = IEEE80211_GET_SLOTTIME(ic); #else /* * Setting slot time according to "short slot time" capability * in beacon/probe_resp seems to cause problem to acknowledge * certain AP's data frames transimitted at CCK/DS rates: the * problematic AP keeps retransmitting data frames, probably * because MAC level acks are not received by hardware. * So we cheat a little bit here by claiming we are capable of * "short slot time" but setting hardware slot time to the normal * slot time. ral(4) does not seem to have trouble to receive * frames transmitted using short slot time even if hardware * slot time is set to normal slot time. If we didn't use this * trick, we would have to claim that short slot time is not * supported; this would give relative poor RX performance * (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short * slot time. */ slottime = IEEE80211_DUR_SLOT; #endif /* update the MAC slot boundaries */ tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND; tx_pifs = tx_sifs + slottime; tx_difs = IEEE80211_DUR_DIFS(tx_sifs, slottime); eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60; tmp = RAL_READ(sc, RT2560_CSR11); tmp = (tmp & ~0x1f00) | slottime << 8; RAL_WRITE(sc, RT2560_CSR11, tmp); tmp = tx_pifs << 16 | tx_sifs; RAL_WRITE(sc, RT2560_CSR18, tmp); tmp = eifs << 16 | tx_difs; RAL_WRITE(sc, RT2560_CSR19, tmp); DPRINTF(sc, "setting slottime to %uus\n", slottime); } static void rt2560_set_basicrates(struct rt2560_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } static void rt2560_update_led(struct rt2560_softc *sc, int led1, int led2) { uint32_t tmp; /* set ON period to 70ms and OFF period to 30ms */ tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30; RAL_WRITE(sc, RT2560_LEDCSR, tmp); } static void rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2560_CSR5, tmp); tmp = bssid[4] | bssid[5] << 8; RAL_WRITE(sc, RT2560_CSR6, tmp); DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":"); } static void rt2560_set_macaddr(struct rt2560_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2560_CSR3, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2560_CSR4, tmp); DPRINTF(sc, "setting MAC address to %6D\n", addr, ":"); } static void rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = RAL_READ(sc, RT2560_CSR3); addr[0] = tmp & 0xff; addr[1] = (tmp >> 8) & 0xff; addr[2] = (tmp >> 16) & 0xff; addr[3] = (tmp >> 24); tmp = RAL_READ(sc, RT2560_CSR4); addr[4] = tmp & 0xff; addr[5] = (tmp >> 8) & 0xff; } static void rt2560_update_promisc(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2560_RXCSR0); tmp &= ~RT2560_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2560_RXCSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } static const char * rt2560_get_rf(int rev) { switch (rev) { case RT2560_RF_2522: return "RT2522"; case RT2560_RF_2523: return "RT2523"; case RT2560_RF_2524: return "RT2524"; case RT2560_RF_2525: return "RT2525"; case RT2560_RF_2525E: return "RT2525e"; case RT2560_RF_2526: return "RT2526"; case RT2560_RF_5222: return "RT5222"; default: return "unknown"; } } static void rt2560_read_config(struct rt2560_softc *sc) { uint16_t val; int i; val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read default values for BBP registers */ for (i = 0; i < 16; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; } /* read Tx power for all b/g channels */ for (i = 0; i < 14 / 2; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i); sc->txpow[i * 2] = val & 0xff; sc->txpow[i * 2 + 1] = val >> 8; } for (i = 0; i < 14; ++i) { if (sc->txpow[i] > 31) sc->txpow[i] = 24; } val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE); if ((val & 0xff) == 0xff) sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR; else sc->rssi_corr = val & 0xff; DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n", sc->rssi_corr, val); } static void rt2560_scan_start(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); rt2560_set_bssid(sc, ieee80211broadcastaddr); } static void rt2560_scan_end(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; struct ieee80211vap *vap = ic->ic_scan->ss_vap; rt2560_enable_tsf_sync(sc); /* XXX keep local copy */ rt2560_set_bssid(sc, vap->iv_bss->ni_bssid); } static int rt2560_bbp_init(struct rt2560_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2560_def_bbp); i++) { rt2560_bbp_write(sc, rt2560_def_bbp[i].reg, rt2560_def_bbp[i].val); } /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0) break; rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */ return 0; } static void rt2560_set_txantenna(struct rt2560_softc *sc, int antenna) { uint32_t tmp; uint8_t tx; tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) tx |= RT2560_BBP_ANTA; else if (antenna == 2) tx |= RT2560_BBP_ANTB; else tx |= RT2560_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 || sc->rf_rev == RT2560_RF_5222) tx |= RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_TX, tx); /* update values for CCK and OFDM in BBPCSR1 */ tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007; tmp |= (tx & 0x7) << 16 | (tx & 0x7); RAL_WRITE(sc, RT2560_BBPCSR1, tmp); } static void rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna) { uint8_t rx; rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) rx |= RT2560_BBP_ANTA; else if (antenna == 2) rx |= RT2560_BBP_ANTB; else rx |= RT2560_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526) rx &= ~RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_RX, rx); } static void rt2560_init_locked(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i; RAL_LOCK_ASSERT(sc); rt2560_stop_locked(sc); /* setup tx rings */ tmp = RT2560_PRIO_RING_COUNT << 24 | RT2560_ATIM_RING_COUNT << 16 | RT2560_TX_RING_COUNT << 8 | RT2560_TX_DESC_SIZE; /* rings must be initialized in this exact order */ RAL_WRITE(sc, RT2560_TXCSR2, tmp); RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr); RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr); RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr); RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr); /* setup rx ring */ tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE; RAL_WRITE(sc, RT2560_RXCSR1, tmp); RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2560_def_mac); i++) RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val); rt2560_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set basic rate set (will be updated later) */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153); rt2560_update_slot(ic); rt2560_update_plcp(sc); rt2560_update_led(sc, 0, 0); RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY); if (rt2560_bbp_init(sc) != 0) { rt2560_stop_locked(sc); return; } rt2560_set_txantenna(sc, sc->tx_ant); rt2560_set_rxantenna(sc, sc->rx_ant); /* set default BSS channel */ rt2560_set_chan(sc, ic->ic_curchan); /* kick Rx */ tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2560_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2560_RXCSR0, tmp); /* clear old FCS and Rx FIFO errors */ RAL_READ(sc, RT2560_CNT0); RAL_READ(sc, RT2560_CNT4); /* clear any pending interrupts */ RAL_WRITE(sc, RT2560_CSR7, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); sc->sc_flags |= RT2560_F_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_init(void *priv) { struct rt2560_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2560_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RT2560_F_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rt2560_stop_locked(struct rt2560_softc *sc) { volatile int *flags = &sc->sc_flags; RAL_LOCK_ASSERT(sc); while (*flags & RT2560_F_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RT2560_F_RUNNING) { sc->sc_flags &= ~RT2560_F_RUNNING; /* abort Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX); /* disable Rx */ RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX); /* reset ASIC (imply reset BBP) */ RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* reset Tx and Rx rings */ rt2560_reset_tx_ring(sc, &sc->txq); rt2560_reset_tx_ring(sc, &sc->atimq); rt2560_reset_tx_ring(sc, &sc->prioq); rt2560_reset_tx_ring(sc, &sc->bcnq); rt2560_reset_rx_ring(sc, &sc->rxq); } } void rt2560_stop(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK(sc); rt2560_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rt2560_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rt2560_tx_raw(sc, m, ni, params)) goto bad; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c index a53d509bcf3d..2e9ceeca894c 100644 --- a/sys/dev/ral/rt2661.c +++ b/sys/dev/ral/rt2661.c @@ -1,2773 +1,2773 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2561, RT2561S and RT2661 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2661_vap_delete(struct ieee80211vap *); static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2661_alloc_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *, int); static void rt2661_reset_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_free_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static int rt2661_alloc_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *, int); static void rt2661_reset_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static void rt2661_free_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static int rt2661_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t); static void rt2661_rx_intr(struct rt2661_softc *); static void rt2661_tx_intr(struct rt2661_softc *); static void rt2661_tx_dma_intr(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_mcu_beacon_expire(struct rt2661_softc *); static void rt2661_mcu_wakeup(struct rt2661_softc *); static void rt2661_mcu_cmd_intr(struct rt2661_softc *); static void rt2661_scan_start(struct ieee80211com *); static void rt2661_scan_end(struct ieee80211com *); static void rt2661_set_channel(struct ieee80211com *); static void rt2661_setup_tx_desc(struct rt2661_softc *, struct rt2661_tx_desc *, uint32_t, uint16_t, int, int, const bus_dma_segment_t *, int, int); static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *, int); static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *); static int rt2661_transmit(struct ieee80211com *, struct mbuf *); static void rt2661_start(struct rt2661_softc *); static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rt2661_watchdog(void *); static void rt2661_parent(struct ieee80211com *); static void rt2661_bbp_write(struct rt2661_softc *, uint8_t, uint8_t); static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t); static void rt2661_rf_write(struct rt2661_softc *, uint8_t, uint32_t); static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t, uint16_t); static void rt2661_select_antenna(struct rt2661_softc *); static void rt2661_enable_mrr(struct rt2661_softc *); static void rt2661_set_txpreamble(struct rt2661_softc *); static void rt2661_set_basicrates(struct rt2661_softc *, const struct ieee80211_rateset *); static void rt2661_select_band(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_chan(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_bssid(struct rt2661_softc *, const uint8_t *); static void rt2661_set_macaddr(struct rt2661_softc *, const uint8_t *); static void rt2661_update_promisc(struct ieee80211com *); static int rt2661_wme_update(struct ieee80211com *) __unused; static void rt2661_update_slot(struct ieee80211com *); static const char *rt2661_get_rf(int); static void rt2661_read_eeprom(struct rt2661_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int rt2661_bbp_init(struct rt2661_softc *); static void rt2661_init_locked(struct rt2661_softc *); static void rt2661_init(void *); static void rt2661_stop_locked(struct rt2661_softc *); static void rt2661_stop(void *); static int rt2661_load_microcode(struct rt2661_softc *); #ifdef notyet static void rt2661_rx_tune(struct rt2661_softc *); static void rt2661_radar_start(struct rt2661_softc *); static int rt2661_radar_stop(struct rt2661_softc *); #endif static int rt2661_prepare_beacon(struct rt2661_softc *, struct ieee80211vap *); static void rt2661_enable_tsf_sync(struct rt2661_softc *); static void rt2661_enable_tsf(struct rt2661_softc *); static int rt2661_get_rssi(struct rt2661_softc *, uint8_t); static const struct { uint32_t reg; uint32_t val; } rt2661_def_mac[] = { RT2661_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2661_def_bbp[] = { RT2661_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2661_rf5225_1[] = { RT2661_RF5225_1 }, rt2661_rf5225_2[] = { RT2661_RF5225_2 }; int rt2661_attach(device_t dev, int id) { struct rt2661_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint32_t val; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; int error, ac, ntries; sc->sc_id = id; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* wait for NIC to initialize */ for (ntries = 0; ntries < 1000; ntries++) { if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } /* retrieve RF rev. no and various other things from EEPROM */ rt2661_read_eeprom(sc, ic->ic_macaddr); device_printf(dev, "MAC/BBP RT%X, RF %s\n", val, rt2661_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ for (ac = 0; ac < 4; ac++) { error = rt2661_alloc_tx_ring(sc, &sc->txq[ac], RT2661_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", ac); goto fail2; } } error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Mgt ring\n"); goto fail2; } error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail3; } ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_WME /* 802.11e */ #endif ; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) setbit(bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); #if 0 ic->ic_wme.wme_update = rt2661_wme_update; #endif ic->ic_scan_start = rt2661_scan_start; ic->ic_scan_end = rt2661_scan_end; ic->ic_set_channel = rt2661_set_channel; ic->ic_updateslot = rt2661_update_slot; ic->ic_update_promisc = rt2661_update_promisc; ic->ic_raw_xmit = rt2661_raw_xmit; ic->ic_transmit = rt2661_transmit; ic->ic_parent = rt2661_parent; ic->ic_vap_create = rt2661_vap_create; ic->ic_vap_delete = rt2661_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2661_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2661_RX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2661_free_tx_ring(sc, &sc->mgtq); fail2: while (--ac >= 0) rt2661_free_tx_ring(sc, &sc->txq[ac]); fail1: mtx_destroy(&sc->sc_mtx); return error; } int rt2661_detach(void *xsc) { struct rt2661_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2661_free_tx_ring(sc, &sc->txq[0]); rt2661_free_tx_ring(sc, &sc->txq[1]); rt2661_free_tx_ring(sc, &sc->txq[2]); rt2661_free_tx_ring(sc, &sc->txq[3]); rt2661_free_tx_ring(sc, &sc->mgtq); rt2661_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2661_softc *sc = ic->ic_softc; struct rt2661_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2661_newstate; #if 0 vap->iv_update_beacon = rt2661_beacon_update; #endif ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2661_vap_delete(struct ieee80211vap *vap) { struct rt2661_vap *rvp = RT2661_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2661_shutdown(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_suspend(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_resume(void *xsc) { struct rt2661_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2661_init(sc); } static void rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = ring->stat = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2661_free_tx_ring(sc, ring); return error; } static void rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = ring->stat = 0; } static void rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring, int count) { struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2661_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2661_free_rx_ring(sc, ring); return error; } static void rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) ring->desc[i].flags = htole32(RT2661_RX_BUSY); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; } static void rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { struct rt2661_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2661_vap *rvp = RT2661_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2661_softc *sc = ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2661_enable_mrr(sc); rt2661_set_txpreamble(sc); rt2661_set_basicrates(sc, &ni->ni_rates); rt2661_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { error = rt2661_prepare_beacon(sc, vap); if (error != 0) return error; } if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2661_enable_tsf_sync(sc); else rt2661_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); /* write start bit (1) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); /* write READ opcode (10) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D)); RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C); } RT2661_EEPROM_CTL(sc, RT2661_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); tmp = RAL_READ(sc, RT2661_E2PROM_CSR); val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n; RT2661_EEPROM_CTL(sc, RT2661_S); } RT2661_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_C); return val; } static void rt2661_tx_intr(struct rt2661_softc *sc) { struct rt2661_tx_ring *txq; struct rt2661_tx_data *data; uint32_t val; int error, qid, retrycnt; struct ieee80211vap *vap; for (;;) { struct ieee80211_node *ni; struct mbuf *m; val = RAL_READ(sc, RT2661_STA_CSR4); if (!(val & RT2661_TX_STAT_VALID)) break; /* retrieve the queue in which this frame was sent */ qid = RT2661_TX_QID(val); txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq; /* retrieve rate control algorithm context */ data = &txq->data[txq->stat]; m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* if no frame has been sent, ignore */ if (ni == NULL) continue; else vap = ni->ni_vap; switch (RT2661_TX_RESULT(val)) { case RT2661_TX_SUCCESS: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 10, "data frame sent successfully after " "%d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); error = 0; break; case RT2661_TX_RETRY_FAIL: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 9, "%s\n", "sending data frame failed (too much retries)"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); error = 1; break; default: /* other failure */ device_printf(sc->sc_dev, "sending data frame failed 0x%08x\n", val); error = 1; } DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat); txq->queued--; if (++txq->stat >= txq->count) /* faster than % count */ txq->stat = 0; ieee80211_tx_complete(ni, m, error); } sc->sc_tx_timer = 0; rt2661_start(sc); } static void rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &txq->desc[txq->next]; data = &txq->data[txq->next]; if ((le32toh(desc->flags) & RT2661_TX_BUSY) || !(le32toh(desc->flags) & RT2661_TX_VALID)) break; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2661_TX_VALID); DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next); if (++txq->next >= txq->count) /* faster than % count */ txq->next = 0; } bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); } static void rt2661_rx_intr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int error; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { int8_t rssi, nf; desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if (le32toh(desc->flags) & RT2661_RX_BUSY) break; if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled TXRX_CSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rt2661_get_rssi(sc, desc->rssi); /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ nf = RT2661_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13); tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2661_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RAL_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RAL_INPUT_RUNNING; skip: desc->flags |= htole32(RT2661_RX_BUSY); DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* ARGSUSED */ static void rt2661_mcu_beacon_expire(struct rt2661_softc *sc) { /* do nothing */ } static void rt2661_mcu_wakeup(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16); RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7); RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18); RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20); /* send wakeup command to MCU */ rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0); } static void rt2661_mcu_cmd_intr(struct rt2661_softc *sc) { RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); } void rt2661_intr(void *arg) { struct rt2661_softc *sc = arg; uint32_t r1, r2; RAL_LOCK(sc); /* disable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); return; } r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1); r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2); if (r1 & RT2661_MGT_DONE) rt2661_tx_dma_intr(sc, &sc->mgtq); if (r1 & RT2661_RX_DONE) rt2661_rx_intr(sc); if (r1 & RT2661_TX0_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[0]); if (r1 & RT2661_TX1_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[1]); if (r1 & RT2661_TX2_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[2]); if (r1 & RT2661_TX3_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[3]); if (r1 & RT2661_TX_DONE) rt2661_tx_intr(sc); if (r2 & RT2661_MCU_CMD_DONE) rt2661_mcu_cmd_intr(sc); if (r2 & RT2661_MCU_BEACON_EXPIRE) rt2661_mcu_beacon_expire(sc); if (r2 & RT2661_MCU_WAKEUP) rt2661_mcu_wakeup(sc); /* re-enable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); RAL_UNLOCK(sc); } static uint8_t rt2661_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate, const bus_dma_segment_t *segs, int nsegs, int ac) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int i, remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID); desc->xflags = htole16(xflags); desc->xflags |= htole16(nsegs << 13); desc->wme = htole16( RT2661_QID(ac) | RT2661_AIFSN(2) | RT2661_LOGCWMIN(4) | RT2661_LOGCWMAX(10)); /* * Remember in which queue this frame was sent. This field is driver * private data only. It will be made available by the NIC in STA_CSR4 * on Tx interrupts. */ desc->qid = ac; /* setup PLCP fields */ desc->plcp_signal = rt2661_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2661_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { - plcp_length = (16 * len + rate - 1) / rate; + plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2661_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } /* RT2x61 supports scatter with up to 5 segments */ for (i = 0; i < nsegs; i++) { desc->addr[i] = htole32(segs[i].ds_addr); desc->len [i] = htole16(segs[i].ds_len); } } static int rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; /* XXX HWSEQ */ int nsegs, rate, error; desc = &sc->mgtq.desc[sc->mgtq.cur]; data = &sc->mgtq.data[sc->mgtq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp in probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2661_TX_TIMESTAMP; } rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */, m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT); bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->mgtq.cur, rate); /* kick mgt */ sc->mgtq.queued++; sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT); return 0; } static int rt2661_sendprot(struct rt2661_softc *sc, int ac, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; const struct ieee80211_frame *wh; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2661_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2661_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate, segs, 1, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; return 0; } static int rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; const struct chanAccParams *cap; struct mbuf *mnew; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags; int error, nsegs, rate, noack = 0; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { cap = &ic->ic_wme.wme_chanParams; noack = cap->cap_wmeParams[ac].wmep_noackPolicy; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2661_sendprot(sc, ac, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS; } } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs, nsegs, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, txq->cur, rate); /* kick Tx */ txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac); return 0; } static int rt2661_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2661_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RAL_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2661_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2661_start(struct rt2661_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; int ac; RAL_LOCK_ASSERT(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING) || sc->sc_invalid) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ac = M_WME_GETAC(m); if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) { /* there is no place left in this ring */ mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2661_tx_data(sc, m, ni, ac) != 0) { ieee80211_free_node(ni); if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); break; } sc->sc_tx_timer = 5; } } static int rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (rt2661_tx_mgt(sc, m, ni) != 0) goto bad; sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } static void rt2661_watchdog(void *arg) { struct rt2661_softc *sc = (struct rt2661_softc *)arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RAL_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2661_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2661_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_parent(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RAL_RUNNING) == 0) { rt2661_init_locked(sc); startall = 1; } else rt2661_update_promisc(ic); } else if (sc->sc_flags & RAL_RUNNING) rt2661_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val; RAL_WRITE(sc, RT2661_PHY_CSR3, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8; RAL_WRITE(sc, RT2661_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2661_PHY_CSR3); if (!(val & RT2661_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2661_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff); } static int rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg) { if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY) return EIO; /* there is already a command pending */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd); return 0; } static void rt2661_select_antenna(struct rt2661_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rt2661_bbp_read(sc, 4); bbp77 = rt2661_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 4, bbp4); rt2661_bbp_write(sc, 77, bbp77); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rt2661_enable_mrr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2661_MRR_CCK_FALLBACK; tmp |= RT2661_MRR_ENABLED; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_txpreamble(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2661_SHORT_PREAMBLE; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_basicrates(struct rt2661_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2661_TXRX_CSR5, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } rt2661_bbp_write(sc, 17, bbp17); rt2661_bbp_write(sc, 96, bbp96); rt2661_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rt2661_bbp_write(sc, 75, 0x80); rt2661_bbp_write(sc, 86, 0x80); rt2661_bbp_write(sc, 88, 0x80); } rt2661_bbp_write(sc, 35, bbp35); rt2661_bbp_write(sc, 97, bbp97); rt2661_bbp_write(sc, 98, bbp98); tmp = RAL_READ(sc, RT2661_PHY_CSR0); tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2661_PA_PE_2GHZ; else tmp |= RT2661_PA_PE_5GHZ; RAL_WRITE(sc, RT2661_PHY_CSR0, tmp); } static void rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != sc->sc_curchan->ic_flags) { rt2661_select_band(sc, c); rt2661_select_antenna(sc); } sc->sc_curchan = c; rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rt2661_bbp_read(sc, 3); bbp3 &= ~RT2661_SMART_MODE; if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529) bbp3 |= RT2661_SMART_MODE; rt2661_bbp_write(sc, 3, bbp3); if (bbp94 != RT2661_BBPR94_DEFAULT) rt2661_bbp_write(sc, 94, bbp94); /* 5GHz radio needs a 1ms delay here */ if (IEEE80211_IS_CHAN_5GHZ(c)) DELAY(1000); } static void rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16; RAL_WRITE(sc, RT2661_MAC_CSR5, tmp); } static void rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2661_MAC_CSR3, tmp); } static void rt2661_update_promisc(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR0); tmp &= ~RT2661_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } /* * Update QoS (802.11e) settings for each h/w Tx ring. */ static int rt2661_wme_update(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; const struct wmeParams *wmep; wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; /* XXX: not sure about shifts. */ /* XXX: the reference driver plays with AC_VI settings too. */ /* update TxOp */ RAL_WRITE(sc, RT2661_AC_TXOP_CSR0, wmep[WME_AC_BE].wmep_txopLimit << 16 | wmep[WME_AC_BK].wmep_txopLimit); RAL_WRITE(sc, RT2661_AC_TXOP_CSR1, wmep[WME_AC_VI].wmep_txopLimit << 16 | wmep[WME_AC_VO].wmep_txopLimit); /* update CWmin */ RAL_WRITE(sc, RT2661_CWMIN_CSR, wmep[WME_AC_BE].wmep_logcwmin << 12 | wmep[WME_AC_BK].wmep_logcwmin << 8 | wmep[WME_AC_VI].wmep_logcwmin << 4 | wmep[WME_AC_VO].wmep_logcwmin); /* update CWmax */ RAL_WRITE(sc, RT2661_CWMAX_CSR, wmep[WME_AC_BE].wmep_logcwmax << 12 | wmep[WME_AC_BK].wmep_logcwmax << 8 | wmep[WME_AC_VI].wmep_logcwmax << 4 | wmep[WME_AC_VO].wmep_logcwmax); /* update Aifsn */ RAL_WRITE(sc, RT2661_AIFSN_CSR, wmep[WME_AC_BE].wmep_aifsn << 12 | wmep[WME_AC_BK].wmep_aifsn << 8 | wmep[WME_AC_VI].wmep_aifsn << 4 | wmep[WME_AC_VO].wmep_aifsn); return 0; } static void rt2661_update_slot(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint8_t slottime; uint32_t tmp; slottime = IEEE80211_GET_SLOTTIME(ic); tmp = RAL_READ(sc, RT2661_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; RAL_WRITE(sc, RT2661_MAC_CSR9, tmp); } static const char * rt2661_get_rf(int rev) { switch (rev) { case RT2661_RF_5225: return "RT5225"; case RT2661_RF_5325: return "RT5325 (MIMO XR)"; case RT2661_RF_2527: return "RT2527"; case RT2661_RF_2529: return "RT2529 (MIMO XR)"; default: return "unknown"; } } static void rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { uint16_t val; int i; /* read MAC address */ val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA); /* XXX: test if different from 0xffff? */ sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(sc, "RF revision=%d\n", sc->rf_rev); val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; /* adjust RSSI correction for external low-noise amplifier */ if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET); if ((val >> 8) != 0xff) sc->rfprog = (val >> 8) & 0x3; if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq); /* read Tx power for all a/b/g channels */ for (i = 0; i < 19; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i); sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]); sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]); } /* read vendor-specific BBP values */ for (i = 0; i < 16; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; /* skip invalid entries */ sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } } static int rt2661_bbp_init(struct rt2661_softc *sc) { int i, ntries; uint8_t val; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { val = rt2661_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2661_def_bbp); i++) { rt2661_bbp_write(sc, rt2661_def_bbp[i].reg, rt2661_def_bbp[i].val); } /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0) continue; rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rt2661_init_locked(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp, sta[3]; int i, error, ntries; RAL_LOCK_ASSERT(sc); if ((sc->sc_flags & RAL_FW_LOADED) == 0) { error = rt2661_load_microcode(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load 8051 microcode, error %d\n", __func__, error); return; } sc->sc_flags |= RAL_FW_LOADED; } rt2661_stop_locked(sc); /* initialize Tx rings */ RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr); RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr); RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr); RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr); /* initialize Mgt ring */ RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr); /* initialize Rx ring */ RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr); /* initialize Tx rings sizes */ RAL_WRITE(sc, RT2661_TX_RING_CSR0, RT2661_TX_RING_COUNT << 24 | RT2661_TX_RING_COUNT << 16 | RT2661_TX_RING_COUNT << 8 | RT2661_TX_RING_COUNT); RAL_WRITE(sc, RT2661_TX_RING_CSR1, RT2661_TX_DESC_WSIZE << 16 | RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */ RT2661_MGT_RING_COUNT); /* initialize Rx rings */ RAL_WRITE(sc, RT2661_RX_RING_CSR, RT2661_RX_DESC_BACK << 16 | RT2661_RX_DESC_WSIZE << 8 | RT2661_RX_RING_COUNT); /* XXX: some magic here */ RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa); /* load base addresses of all 5 Tx rings (4 data + 1 mgt) */ RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f); /* load base address of Rx ring */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2661_def_mac); i++) RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val); rt2661_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set host ready */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2661_MAC_CSR12) & 8) break; DELAY(1000); } if (ntries == 1000) { printf("timeout waiting for BBP/RF to wakeup\n"); rt2661_stop_locked(sc); return; } if (rt2661_bbp_init(sc) != 0) { rt2661_stop_locked(sc); return; } /* select default channel */ sc->sc_curchan = ic->ic_curchan; rt2661_select_band(sc, sc->sc_curchan); rt2661_select_antenna(sc); rt2661_set_chan(sc, sc->sc_curchan); /* update Rx filter */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff; tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR | RT2661_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2661_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); /* clear STA registers */ RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, nitems(sta)); /* initialize ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 4); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); /* kick Rx */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1); sc->sc_flags |= RAL_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_init(void *priv) { struct rt2661_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RAL_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } void rt2661_stop_locked(struct rt2661_softc *sc) { volatile int *flags = &sc->sc_flags; uint32_t tmp; while (*flags & RAL_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RAL_RUNNING) { sc->sc_flags &= ~RAL_RUNNING; /* abort Tx (for all 5 Tx rings) */ RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16); /* disable Rx (value remains after reset!) */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); /* reset ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff); /* reset Tx and Rx rings */ rt2661_reset_tx_ring(sc, &sc->txq[0]); rt2661_reset_tx_ring(sc, &sc->txq[1]); rt2661_reset_tx_ring(sc, &sc->txq[2]); rt2661_reset_tx_ring(sc, &sc->txq[3]); rt2661_reset_tx_ring(sc, &sc->mgtq); rt2661_reset_rx_ring(sc, &sc->rxq); } } void rt2661_stop(void *priv) { struct rt2661_softc *sc = priv; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2661_load_microcode(struct rt2661_softc *sc) { const struct firmware *fp; const char *imagename; int ntries, error; RAL_LOCK_ASSERT(sc); switch (sc->sc_id) { case 0x0301: imagename = "rt2561sfw"; break; case 0x0302: imagename = "rt2561fw"; break; case 0x0401: imagename = "rt2661fw"; break; default: device_printf(sc->sc_dev, "%s: unexpected pci device id 0x%x, " "don't know how to retrieve firmware\n", __func__, sc->sc_id); return EINVAL; } RAL_UNLOCK(sc); fp = firmware_get(imagename); RAL_LOCK(sc); if (fp == NULL) { device_printf(sc->sc_dev, "%s: unable to retrieve firmware image %s\n", __func__, imagename); return EINVAL; } /* * Load 8051 microcode into NIC. */ /* reset 8051 */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* cancel any pending Host to MCU command */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0); /* write 8051's microcode */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL); RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize); RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* kick 8051's ass */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0); /* wait for 8051 to initialize */ for (ntries = 0; ntries < 500; ntries++) { if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY) break; DELAY(100); } if (ntries == 500) { device_printf(sc->sc_dev, "%s: timeout waiting for MCU to initialize\n", __func__); error = EIO; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } #ifdef notyet /* * Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and * false CCA count. This function is called periodically (every seconds) when * in the RUN state. Values taken from the reference driver. */ static void rt2661_rx_tune(struct rt2661_softc *sc) { uint8_t bbp17; uint16_t cca; int lo, hi, dbm; /* * Tuning range depends on operating band and on the presence of an * external low-noise amplifier. */ lo = 0x20; if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan)) lo += 0x08; if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna)) lo += 0x10; hi = lo + 0x20; /* retrieve false CCA count since last call (clear on read) */ cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff; if (dbm >= -35) { bbp17 = 0x60; } else if (dbm >= -58) { bbp17 = hi; } else if (dbm >= -66) { bbp17 = lo + 0x10; } else if (dbm >= -74) { bbp17 = lo + 0x08; } else { /* RSSI < -74dBm, tune using false CCA count */ bbp17 = sc->bbp17; /* current value */ hi -= 2 * (-74 - dbm); if (hi < lo) hi = lo; if (bbp17 > hi) { bbp17 = hi; } else if (cca > 512) { if (++bbp17 > hi) bbp17 = hi; } else if (cca < 100) { if (--bbp17 < lo) bbp17 = lo; } } if (bbp17 != sc->bbp17) { rt2661_bbp_write(sc, 17, bbp17); sc->bbp17 = bbp17; } } /* * Enter/Leave radar detection mode. * This is for 802.11h additional regulatory domains. */ static void rt2661_radar_start(struct rt2661_softc *sc) { uint32_t tmp; /* disable Rx */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 82, 0x20); rt2661_bbp_write(sc, 83, 0x00); rt2661_bbp_write(sc, 84, 0x40); /* save current BBP registers values */ sc->bbp18 = rt2661_bbp_read(sc, 18); sc->bbp21 = rt2661_bbp_read(sc, 21); sc->bbp22 = rt2661_bbp_read(sc, 22); sc->bbp16 = rt2661_bbp_read(sc, 16); sc->bbp17 = rt2661_bbp_read(sc, 17); sc->bbp64 = rt2661_bbp_read(sc, 64); rt2661_bbp_write(sc, 18, 0xff); rt2661_bbp_write(sc, 21, 0x3f); rt2661_bbp_write(sc, 22, 0x3f); rt2661_bbp_write(sc, 16, 0xbd); rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34); rt2661_bbp_write(sc, 64, 0x21); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } static int rt2661_radar_stop(struct rt2661_softc *sc) { uint8_t bbp66; /* read radar detection result */ bbp66 = rt2661_bbp_read(sc, 66); /* restore BBP registers values */ rt2661_bbp_write(sc, 16, sc->bbp16); rt2661_bbp_write(sc, 17, sc->bbp17); rt2661_bbp_write(sc, 18, sc->bbp18); rt2661_bbp_write(sc, 21, sc->bbp21); rt2661_bbp_write(sc, 22, sc->bbp22); rt2661_bbp_write(sc, 64, sc->bbp64); return bbp66 == 1; } #endif static int rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rt2661_tx_desc desc; struct mbuf *m0; int rate; if ((m0 = ieee80211_beacon_alloc(vap->iv_bss))== NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOBUFS; } /* send beacons at the lowest available rate */ rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2; rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ, m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT); /* copy the first 24 bytes of Tx descriptor into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rt2661_enable_tsf_sync(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8); } tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2661_TSF_MODE(1); else tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON; RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp); } static void rt2661_enable_tsf(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_TXRX_CSR9, (RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000) | RT2661_TSF_TICKING | RT2661_TSF_MODE(2)); } /* * Retrieve the "Received Signal Strength Indicator" from the raw values * contained in Rx descriptors. The computation depends on which band the * frame was received. Correction values taken from the reference driver. */ static int rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw) { int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No mapping available. * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2661_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static void rt2661_scan_start(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff); rt2661_set_bssid(sc, ieee80211broadcastaddr); } static void rt2661_scan_end(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); rt2661_enable_tsf_sync(sc); /* XXX keep local copy */ rt2661_set_bssid(sc, vap->iv_bss->ni_bssid); } static void rt2661_set_channel(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2661_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } diff --git a/sys/dev/sbni/if_sbni.c b/sys/dev/sbni/if_sbni.c index 03200ef8d798..6281e7dce536 100644 --- a/sys/dev/sbni/if_sbni.c +++ b/sys/dev/sbni/if_sbni.c @@ -1,1277 +1,1277 @@ /*- * Copyright (c) 1997-2001 Granch, Ltd. All rights reserved. * Author: Denis I.Timofeev * * Redistributon and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Device driver for Granch SBNI12 leased line adapters * * Revision 2.0.0 1997/08/06 * Initial revision by Alexey Zverev * * Revision 2.0.1 1997/08/11 * Additional internal statistics support (tx statistics) * * Revision 2.0.2 1997/11/05 * if_bpf bug has been fixed * * Revision 2.0.3 1998/12/20 * Memory leakage has been eliminated in * the sbni_st and sbni_timeout routines. * * Revision 3.0 2000/08/10 by Yaroslav Polyakov * Support for PCI cards. 4.1 modification. * * Revision 3.1 2000/09/12 * Removed extra #defines around bpf functions * * Revision 4.0 2000/11/23 by Denis Timofeev * Completely redesigned the buffer management * * Revision 4.1 2001/01/21 * Support for PCI Dual cards and new SBNI12D-10, -11 Dual/ISA cards * * Written with reference to NE2000 driver developed by David Greenman. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sbni_init(void *); static void sbni_init_locked(struct sbni_softc *); static void sbni_start(struct ifnet *); static void sbni_start_locked(struct ifnet *); static int sbni_ioctl(struct ifnet *, u_long, caddr_t); static void sbni_stop(struct sbni_softc *); static void handle_channel(struct sbni_softc *); static void card_start(struct sbni_softc *); static int recv_frame(struct sbni_softc *); static void send_frame(struct sbni_softc *); static int upload_data(struct sbni_softc *, u_int, u_int, u_int, u_int32_t); static int skip_tail(struct sbni_softc *, u_int, u_int32_t); static void interpret_ack(struct sbni_softc *, u_int); static void download_data(struct sbni_softc *, u_int32_t *); static void prepare_to_send(struct sbni_softc *); static void drop_xmit_queue(struct sbni_softc *); static int get_rx_buf(struct sbni_softc *); static void indicate_pkt(struct sbni_softc *); static void change_level(struct sbni_softc *); static int check_fhdr(struct sbni_softc *, u_int *, u_int *, u_int *, u_int *, u_int32_t *); static int append_frame_to_pkt(struct sbni_softc *, u_int, u_int32_t); static void timeout_change_level(struct sbni_softc *); static void send_frame_header(struct sbni_softc *, u_int32_t *); static void set_initial_values(struct sbni_softc *, struct sbni_flags); static u_int32_t calc_crc32(u_int32_t, caddr_t, u_int); static timeout_t sbni_timeout; static __inline u_char sbni_inb(struct sbni_softc *, enum sbni_reg); static __inline void sbni_outb(struct sbni_softc *, enum sbni_reg, u_char); static __inline void sbni_insb(struct sbni_softc *, u_char *, u_int); static __inline void sbni_outsb(struct sbni_softc *, u_char *, u_int); static u_int32_t crc32tab[]; #ifdef SBNI_DUAL_COMPOUND static struct mtx headlist_lock; MTX_SYSINIT(headlist_lock, &headlist_lock, "sbni headlist", MTX_DEF); static struct sbni_softc *sbni_headlist; #endif /* -------------------------------------------------------------------------- */ static __inline u_char sbni_inb(struct sbni_softc *sc, enum sbni_reg reg) { return bus_space_read_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg); } static __inline void sbni_outb(struct sbni_softc *sc, enum sbni_reg reg, u_char value) { bus_space_write_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + reg, value); } static __inline void sbni_insb(struct sbni_softc *sc, u_char *to, u_int len) { bus_space_read_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, to, len); } static __inline void sbni_outsb(struct sbni_softc *sc, u_char *from, u_int len) { bus_space_write_multi_1( rman_get_bustag(sc->io_res), rman_get_bushandle(sc->io_res), sc->io_off + DAT, from, len); } /* Valid combinations in CSR0 (for probing): VALID_DECODER 0000,0011,1011,1010 ; 0 ; - TR_REQ ; 1 ; + TR_RDY ; 2 ; - TR_RDY TR_REQ ; 3 ; + BU_EMP ; 4 ; + BU_EMP TR_REQ ; 5 ; + BU_EMP TR_RDY ; 6 ; - BU_EMP TR_RDY TR_REQ ; 7 ; + RC_RDY ; 8 ; + RC_RDY TR_REQ ; 9 ; + RC_RDY TR_RDY ; 10 ; - RC_RDY TR_RDY TR_REQ ; 11 ; - RC_RDY BU_EMP ; 12 ; - RC_RDY BU_EMP TR_REQ ; 13 ; - RC_RDY BU_EMP TR_RDY ; 14 ; - RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; - */ #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200) int sbni_probe(struct sbni_softc *sc) { u_char csr0; csr0 = sbni_inb(sc, CSR0); if (csr0 != 0xff && csr0 != 0x00) { csr0 &= ~EN_INT; if (csr0 & BU_EMP) csr0 |= EN_INT; if (VALID_DECODER & (1 << (csr0 >> 4))) return (0); } return (ENXIO); } /* * Install interface into kernel networking data structures */ int sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags) { struct ifnet *ifp; u_char csr0; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOMEM); sbni_outb(sc, CSR0, 0); set_initial_values(sc, flags); /* Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, "sbni", unit); ifp->if_init = sbni_init; ifp->if_start = sbni_start; ifp->if_ioctl = sbni_ioctl; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); /* report real baud rate */ csr0 = sbni_inb(sc, CSR0); ifp->if_baudrate = (csr0 & 0x01 ? 500000 : 2000000) / (1 << flags.rate); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; mtx_init(&sc->lock, ifp->if_xname, MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->wch, &sc->lock, 0); ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ if_printf(ifp, "speed %ju, rxl ", (uintmax_t)ifp->if_baudrate); if (sc->delta_rxl) printf("auto\n"); else printf("%d (fixed)\n", sc->cur_rxl_index); return (0); } void sbni_detach(struct sbni_softc *sc) { SBNI_LOCK(sc); sbni_stop(sc); SBNI_UNLOCK(sc); callout_drain(&sc->wch); ether_ifdetach(sc->ifp); if (sc->irq_handle) bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); mtx_destroy(&sc->lock); if_free(sc->ifp); } void sbni_release_resources(struct sbni_softc *sc) { if (sc->irq_res) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->io_res && sc->io_off == 0) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); } /* -------------------------------------------------------------------------- */ static void sbni_init(void *xsc) { struct sbni_softc *sc; sc = (struct sbni_softc *)xsc; SBNI_LOCK(sc); sbni_init_locked(sc); SBNI_UNLOCK(sc); } static void sbni_init_locked(struct sbni_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; /* * kludge to avoid multiple initialization when more than once * protocols configured */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; card_start(sc); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* attempt to start output */ sbni_start_locked(ifp); } static void sbni_start(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; SBNI_LOCK(sc); sbni_start_locked(ifp); SBNI_UNLOCK(sc); } static void sbni_start_locked(struct ifnet *ifp) { struct sbni_softc *sc = ifp->if_softc; if (sc->tx_frameno == 0) prepare_to_send(sc); } static void sbni_stop(struct sbni_softc *sc) { sbni_outb(sc, CSR0, 0); drop_xmit_queue(sc); if (sc->rx_buf_p) { m_freem(sc->rx_buf_p); sc->rx_buf_p = NULL; } callout_stop(&sc->wch); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* -------------------------------------------------------------------------- */ /* interrupt handler */ /* * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not * be looked as two independent single-channel devices. Every channel seems * as Ethernet interface but interrupt handler must be common. Really, first * channel ("master") driver only registers the handler. In it's struct softc * it has got pointer to "slave" channel's struct softc and handles that's * interrupts too. * softc of successfully attached ISA SBNI boards is linked to list. * While next board driver is initialized, it scans this list. If one * has found softc with same irq and ioaddr different by 4 then it assumes * this board to be "master". */ void sbni_intr(void *arg) { struct sbni_softc *sc; int repeat; sc = (struct sbni_softc *)arg; do { repeat = 0; SBNI_LOCK(sc); if (sbni_inb(sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc); repeat = 1; } SBNI_UNLOCK(sc); if (sc->slave_sc) { /* second channel present */ SBNI_LOCK(sc->slave_sc); if (sbni_inb(sc->slave_sc, CSR0) & (RC_RDY | TR_RDY)) { handle_channel(sc->slave_sc); repeat = 1; } SBNI_UNLOCK(sc->slave_sc); } } while (repeat); } static void handle_channel(struct sbni_softc *sc) { int req_ans; u_char csr0; sbni_outb(sc, CSR0, (sbni_inb(sc, CSR0) & ~EN_INT) | TR_REQ); sc->timer_ticks = CHANGE_LEVEL_START_TICKS; for (;;) { csr0 = sbni_inb(sc, CSR0); if ((csr0 & (RC_RDY | TR_RDY)) == 0) break; req_ans = !(sc->state & FL_PREV_OK); if (csr0 & RC_RDY) req_ans = recv_frame(sc); /* * TR_RDY always equals 1 here because we have owned the marker, * and we set TR_REQ when disabled interrupts */ csr0 = sbni_inb(sc, CSR0); if ((csr0 & TR_RDY) == 0 || (csr0 & RC_RDY) != 0) if_printf(sc->ifp, "internal error!\n"); /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */ if (req_ans || sc->tx_frameno != 0) send_frame(sc); else { /* send the marker without any data */ sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) & ~TR_REQ); } } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | EN_INT); } /* * Routine returns 1 if it need to acknoweledge received frame. * Empty frame received without errors won't be acknoweledged. */ static int recv_frame(struct sbni_softc *sc) { u_int32_t crc; u_int framelen, frameno, ack; u_int is_first, frame_ok; crc = CRC32_INITIAL; if (check_fhdr(sc, &framelen, &frameno, &ack, &is_first, &crc)) { frame_ok = framelen > 4 ? upload_data(sc, framelen, frameno, is_first, crc) : skip_tail(sc, framelen, crc); if (frame_ok) interpret_ack(sc, ack); } else { framelen = 0; frame_ok = 0; } sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) ^ CT_ZER); if (frame_ok) { sc->state |= FL_PREV_OK; if (framelen > 4) sc->in_stats.all_rx_number++; } else { sc->state &= ~FL_PREV_OK; change_level(sc); sc->in_stats.all_rx_number++; sc->in_stats.bad_rx_number++; } return (!frame_ok || framelen > 4); } static void send_frame(struct sbni_softc *sc) { u_int32_t crc; u_char csr0; crc = CRC32_INITIAL; if (sc->state & FL_NEED_RESEND) { /* if frame was sended but not ACK'ed - resend it */ if (sc->trans_errors) { sc->trans_errors--; if (sc->framelen != 0) sc->in_stats.resend_tx_number++; } else { /* cannot xmit with many attempts */ drop_xmit_queue(sc); goto do_send; } } else sc->trans_errors = TR_ERROR_COUNT; send_frame_header(sc, &crc); sc->state |= FL_NEED_RESEND; /* * FL_NEED_RESEND will be cleared after ACK, but if empty * frame sended then in prepare_to_send next frame */ if (sc->framelen) { download_data(sc, &crc); sc->in_stats.all_tx_number++; sc->state |= FL_WAIT_ACK; } sbni_outsb(sc, (u_char *)&crc, sizeof crc); do_send: csr0 = sbni_inb(sc, CSR0); sbni_outb(sc, CSR0, csr0 & ~TR_REQ); if (sc->tx_frameno) { /* next frame exists - request to send */ sbni_outb(sc, CSR0, csr0 | TR_REQ); } } static void download_data(struct sbni_softc *sc, u_int32_t *crc_p) { struct mbuf *m; caddr_t data_p; u_int data_len, pos, slice; data_p = NULL; /* initialized to avoid warn */ pos = 0; for (m = sc->tx_buf_p; m != NULL && pos < sc->pktlen; m = m->m_next) { if (pos + m->m_len > sc->outpos) { data_len = m->m_len - (sc->outpos - pos); data_p = mtod(m, caddr_t) + (sc->outpos - pos); goto do_copy; } else pos += m->m_len; } data_len = 0; do_copy: pos = 0; do { if (data_len) { slice = min(data_len, sc->framelen - pos); sbni_outsb(sc, data_p, slice); *crc_p = calc_crc32(*crc_p, data_p, slice); pos += slice; if (data_len -= slice) data_p += slice; else { do { m = m->m_next; } while (m != NULL && m->m_len == 0); if (m) { data_len = m->m_len; data_p = mtod(m, caddr_t); } } } else { /* frame too short - zero padding */ pos = sc->framelen - pos; while (pos--) { sbni_outb(sc, DAT, 0); *crc_p = CRC32(0, *crc_p); } return; } } while (pos < sc->framelen); } static int upload_data(struct sbni_softc *sc, u_int framelen, u_int frameno, u_int is_first, u_int32_t crc) { int frame_ok; if (is_first) { sc->wait_frameno = frameno; sc->inppos = 0; } if (sc->wait_frameno == frameno) { if (sc->inppos + framelen <= ETHER_MAX_LEN) { frame_ok = append_frame_to_pkt(sc, framelen, crc); /* * if CRC is right but framelen incorrect then transmitter * error was occured... drop entire packet */ } else if ((frame_ok = skip_tail(sc, framelen, crc)) != 0) { sc->wait_frameno = 0; sc->inppos = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* now skip all frames until is_first != 0 */ } } else frame_ok = skip_tail(sc, framelen, crc); if (is_first && !frame_ok) { /* * Frame has been violated, but we have stored * is_first already... Drop entire packet. */ sc->wait_frameno = 0; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } return (frame_ok); } static __inline void send_complete(struct sbni_softc *); static __inline void send_complete(struct sbni_softc *sc) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); } static void interpret_ack(struct sbni_softc *sc, u_int ack) { if (ack == FRAME_SENT_OK) { sc->state &= ~FL_NEED_RESEND; if (sc->state & FL_WAIT_ACK) { sc->outpos += sc->framelen; if (--sc->tx_frameno) { sc->framelen = min( sc->maxframe, sc->pktlen - sc->outpos); } else { send_complete(sc); prepare_to_send(sc); } } } sc->state &= ~FL_WAIT_ACK; } /* * Glue received frame with previous fragments of packet. * Indicate packet when last frame would be accepted. */ static int append_frame_to_pkt(struct sbni_softc *sc, u_int framelen, u_int32_t crc) { caddr_t p; if (sc->inppos + framelen > ETHER_MAX_LEN) return (0); if (!sc->rx_buf_p && !get_rx_buf(sc)) return (0); p = sc->rx_buf_p->m_data + sc->inppos; sbni_insb(sc, p, framelen); if (calc_crc32(crc, p, framelen) != CRC32_REMAINDER) return (0); sc->inppos += framelen - 4; if (--sc->wait_frameno == 0) { /* last frame received */ indicate_pkt(sc); if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); } return (1); } /* * Prepare to start output on adapter. Current priority must be set to splimp * before this routine is called. * Transmitter will be actually activated when marker has been accepted. */ static void prepare_to_send(struct sbni_softc *sc) { struct mbuf *m; u_int len; /* sc->tx_buf_p == NULL here! */ if (sc->tx_buf_p) printf("sbni: memory leak!\n"); sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, sc->tx_buf_p); if (!sc->tx_buf_p) { /* nothing to transmit... */ sc->pktlen = 0; sc->tx_frameno = 0; sc->framelen = 0; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } for (len = 0, m = sc->tx_buf_p; m; m = m->m_next) len += m->m_len; if (len != 0) break; m_freem(sc->tx_buf_p); } if (len < SBNI_MIN_LEN) len = SBNI_MIN_LEN; sc->pktlen = len; - sc->tx_frameno = (len + sc->maxframe - 1) / sc->maxframe; + sc->tx_frameno = howmany(len, sc->maxframe); sc->framelen = min(len, sc->maxframe); sbni_outb(sc, CSR0, sbni_inb(sc, CSR0) | TR_REQ); sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; BPF_MTAP(sc->ifp, sc->tx_buf_p); } static void drop_xmit_queue(struct sbni_softc *sc) { struct mbuf *m; if (sc->tx_buf_p) { m_freem(sc->tx_buf_p); sc->tx_buf_p = NULL; if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } for (;;) { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; m_freem(m); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); } sc->tx_frameno = 0; sc->framelen = 0; sc->outpos = 0; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static void send_frame_header(struct sbni_softc *sc, u_int32_t *crc_p) { u_int32_t crc; u_int len_field; u_char value; crc = *crc_p; len_field = sc->framelen + 6; /* CRC + frameno + reserved */ if (sc->state & FL_NEED_RESEND) len_field |= FRAME_RETRY; /* non-first attempt... */ if (sc->outpos == 0) len_field |= FRAME_FIRST; len_field |= (sc->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; sbni_outb(sc, DAT, SBNI_SIG); value = (u_char)len_field; sbni_outb(sc, DAT, value); crc = CRC32(value, crc); value = (u_char)(len_field >> 8); sbni_outb(sc, DAT, value); crc = CRC32(value, crc); sbni_outb(sc, DAT, sc->tx_frameno); crc = CRC32(sc->tx_frameno, crc); sbni_outb(sc, DAT, 0); crc = CRC32(0, crc); *crc_p = crc; } /* * if frame tail not needed (incorrect number or received twice), * it won't store, but CRC will be calculated */ static int skip_tail(struct sbni_softc *sc, u_int tail_len, u_int32_t crc) { while (tail_len--) crc = CRC32(sbni_inb(sc, DAT), crc); return (crc == CRC32_REMAINDER); } static int check_fhdr(struct sbni_softc *sc, u_int *framelen, u_int *frameno, u_int *ack, u_int *is_first, u_int32_t *crc_p) { u_int32_t crc; u_char value; crc = *crc_p; if (sbni_inb(sc, DAT) != SBNI_SIG) return (0); value = sbni_inb(sc, DAT); *framelen = (u_int)value; crc = CRC32(value, crc); value = sbni_inb(sc, DAT); *framelen |= ((u_int)value) << 8; crc = CRC32(value, crc); *ack = *framelen & FRAME_ACK_MASK; *is_first = (*framelen & FRAME_FIRST) != 0; if ((*framelen &= FRAME_LEN_MASK) < 6 || *framelen > SBNI_MAX_FRAME - 3) return (0); value = sbni_inb(sc, DAT); *frameno = (u_int)value; crc = CRC32(value, crc); crc = CRC32(sbni_inb(sc, DAT), crc); /* reserved byte */ *framelen -= 2; *crc_p = crc; return (1); } static int get_rx_buf(struct sbni_softc *sc) { struct mbuf *m; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { if_printf(sc->ifp, "cannot allocate header mbuf\n"); return (0); } /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if (ETHER_MAX_LEN + 2 > MHLEN) { /* Attach an mbuf cluster */ if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); return (0); } } m->m_pkthdr.len = m->m_len = ETHER_MAX_LEN + 2; /* * The +2 is to longword align the start of the real packet. * (sizeof ether_header == 14) * This is important for NFS. */ m_adj(m, 2); sc->rx_buf_p = m; return (1); } static void indicate_pkt(struct sbni_softc *sc) { struct ifnet *ifp = sc->ifp; struct mbuf *m; m = sc->rx_buf_p; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sc->inppos; sc->rx_buf_p = NULL; SBNI_UNLOCK(sc); (*ifp->if_input)(ifp, m); SBNI_LOCK(sc); } /* -------------------------------------------------------------------------- */ /* * Routine checks periodically wire activity and regenerates marker if * connect was inactive for a long time. */ static void sbni_timeout(void *xsc) { struct sbni_softc *sc; u_char csr0; sc = (struct sbni_softc *)xsc; SBNI_ASSERT_LOCKED(sc); csr0 = sbni_inb(sc, CSR0); if (csr0 & RC_CHK) { if (sc->timer_ticks) { if (csr0 & (RC_RDY | BU_EMP)) /* receiving not active */ sc->timer_ticks--; } else { sc->in_stats.timeout_number++; if (sc->delta_rxl) timeout_change_level(sc); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); csr0 = sbni_inb(sc, CSR0); } } sbni_outb(sc, CSR0, csr0 | RC_CHK); callout_reset(&sc->wch, hz/SBNI_HZ, sbni_timeout, sc); } /* -------------------------------------------------------------------------- */ static void card_start(struct sbni_softc *sc) { sc->timer_ticks = CHANGE_LEVEL_START_TICKS; sc->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); sc->state |= FL_PREV_OK; sc->inppos = 0; sc->wait_frameno = 0; sbni_outb(sc, CSR1, *(u_char *)&sc->csr1 | PR_RES); sbni_outb(sc, CSR0, EN_INT); } /* -------------------------------------------------------------------------- */ static u_char rxl_tab[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f }; #define SIZE_OF_TIMEOUT_RXL_TAB 4 static u_char timeout_rxl_tab[] = { 0x03, 0x05, 0x08, 0x0b }; static void set_initial_values(struct sbni_softc *sc, struct sbni_flags flags) { if (flags.fixed_rxl) { sc->delta_rxl = 0; /* disable receive level autodetection */ sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->maxframe = DEFAULT_FRAME_LEN; /* * generate Ethernet address (0x00ff01xxxxxx) */ *(u_int16_t *) sc->enaddr = htons(0x00ff); if (flags.mac_addr) { *(u_int32_t *) (sc->enaddr + 2) = htonl(flags.mac_addr | 0x01000000); } else { *(u_char *) (sc->enaddr + 2) = 0x01; read_random(sc->enaddr + 3, 3); } } #ifdef SBNI_DUAL_COMPOUND void sbni_add(struct sbni_softc *sc) { mtx_lock(&headlist_lock); sc->link = sbni_headlist; sbni_headlist = sc; mtx_unlock(&headlist_lock); } struct sbni_softc * connect_to_master(struct sbni_softc *sc) { struct sbni_softc *p, *p_prev; mtx_lock(&headlist_lock); for (p = sbni_headlist, p_prev = NULL; p; p_prev = p, p = p->link) { if (rman_get_start(p->io_res) == rman_get_start(sc->io_res) + 4 || rman_get_start(p->io_res) == rman_get_start(sc->io_res) - 4) { p->slave_sc = sc; if (p_prev) p_prev->link = p->link; else sbni_headlist = p->link; mtx_unlock(&headlist_lock); return p; } } mtx_unlock(&headlist_lock); return (NULL); } #endif /* SBNI_DUAL_COMPOUND */ /* Receive level auto-selection */ static void change_level(struct sbni_softc *sc) { if (sc->delta_rxl == 0) /* do not auto-negotiate RxL */ return; if (sc->cur_rxl_index == 0) sc->delta_rxl = 1; else if (sc->cur_rxl_index == 15) sc->delta_rxl = -1; else if (sc->cur_rxl_rcvd < sc->prev_rxl_rcvd) sc->delta_rxl = -sc->delta_rxl; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index += sc->delta_rxl]; sbni_inb(sc, CSR0); /* it needed for PCI cards */ sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } static void timeout_change_level(struct sbni_softc *sc) { sc->cur_rxl_index = timeout_rxl_tab[sc->timeout_rxl]; if (++sc->timeout_rxl >= 4) sc->timeout_rxl = 0; sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sbni_inb(sc, CSR0); sbni_outb(sc, CSR1, *(u_char *)&sc->csr1); sc->prev_rxl_rcvd = sc->cur_rxl_rcvd; sc->cur_rxl_rcvd = 0; } /* -------------------------------------------------------------------------- */ /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int sbni_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sbni_softc *sc; struct ifreq *ifr; struct thread *td; struct sbni_in_stats *in_stats; struct sbni_flags flags; int error; sc = ifp->if_softc; ifr = (struct ifreq *)data; td = curthread; error = 0; switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ SBNI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) sbni_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { sbni_stop(sc); } } SBNI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ error = 0; /* if (ifr == NULL) error = EAFNOSUPPORT; */ break; /* * SBNI specific ioctl */ case SIOCGHWFLAGS: /* get flags */ SBNI_LOCK(sc); bcopy((caddr_t)IF_LLADDR(sc->ifp)+3, (caddr_t) &flags, 3); flags.rxl = sc->cur_rxl_index; flags.rate = sc->csr1.rate; flags.fixed_rxl = (sc->delta_rxl == 0); flags.fixed_rate = 1; SBNI_UNLOCK(sc); ifr->ifr_data = *(caddr_t*) &flags; break; case SIOCGINSTATS: in_stats = malloc(sizeof(struct sbni_in_stats), M_DEVBUF, M_WAITOK); SBNI_LOCK(sc); bcopy(&sc->in_stats, in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); error = copyout(ifr->ifr_data, in_stats, sizeof(struct sbni_in_stats)); free(in_stats, M_DEVBUF); break; case SIOCSHWFLAGS: /* set flags */ /* root only */ error = priv_check(td, PRIV_DRIVER); if (error) break; flags = *(struct sbni_flags*)&ifr->ifr_data; SBNI_LOCK(sc); if (flags.fixed_rxl) { sc->delta_rxl = 0; sc->cur_rxl_index = flags.rxl; } else { sc->delta_rxl = DEF_RXL_DELTA; sc->cur_rxl_index = DEF_RXL; } sc->csr1.rxl = rxl_tab[sc->cur_rxl_index]; sc->csr1.rate = flags.fixed_rate ? flags.rate : DEFAULT_RATE; if (flags.mac_addr) bcopy((caddr_t) &flags, (caddr_t) IF_LLADDR(sc->ifp)+3, 3); /* Don't be afraid... */ sbni_outb(sc, CSR1, *(char*)(&sc->csr1) | PR_RES); SBNI_UNLOCK(sc); break; case SIOCRINSTATS: SBNI_LOCK(sc); if (!(error = priv_check(td, PRIV_DRIVER))) /* root only */ bzero(&sc->in_stats, sizeof(struct sbni_in_stats)); SBNI_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* -------------------------------------------------------------------------- */ static u_int32_t calc_crc32(u_int32_t crc, caddr_t p, u_int len) { while (len--) crc = CRC32(*p++, crc); return (crc); } static u_int32_t crc32tab[] __aligned(8) = { 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605, 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C, 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53, 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A, 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661, 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278, 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF, 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6, 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD, 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4, 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B, 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82, 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9, 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0, 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7, 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE, 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795, 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C, 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3, 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA, 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1, 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8, 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F, 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76, 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D, 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344, 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B, 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12, 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739, 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320, 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17, 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E, 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525, 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C, 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73, 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A, 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541, 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158, 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF, 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6, 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED, 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4, 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB, 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2, 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589, 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190, 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87, 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E, 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5, 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC, 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3, 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA, 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1, 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8, 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F, 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856, 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D, 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064, 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B, 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832, 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419, 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000 }; diff --git a/sys/dev/scd/scd.c b/sys/dev/scd/scd.c index 3959d646c9fa..e84a7072ec4f 100644 --- a/sys/dev/scd/scd.c +++ b/sys/dev/scd/scd.c @@ -1,1431 +1,1431 @@ /*- * Copyright (c) 1995 Mikael Hybsch * All rights reserved. * * Portions of this file are copied from mcd.c * which has the following copyrights: * * Copyright 1993 by Holger Veit (data part) * Copyright 1993 by Brian Moore (audio part) * Changes Copyright 1993 by Gary Clark II * Changes Copyright (C) 1994 by Andrew A. Chernov * * Rewrote probe routine to work on newer Mitsumi drives. * Additional changes (C) 1994 by Jordan K. Hubbard * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #undef SCD_DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* flags */ #define SCDOPEN 0x0001 /* device opened */ #define SCDVALID 0x0002 /* parameters loaded */ #define SCDINIT 0x0004 /* device is init'd */ #define SCDPROBING 0x0020 /* probing */ #define SCDTOC 0x0100 /* already read toc */ #define SCDMBXBSY 0x0200 /* local mbx is busy */ #define SCDSPINNING 0x0400 /* drive is spun up */ #define SCD_S_BEGIN 0 #define SCD_S_BEGIN1 1 #define SCD_S_WAITSTAT 2 #define SCD_S_WAITFIFO 3 #define SCD_S_WAITSPIN 4 #define SCD_S_WAITREAD 5 #define SCD_S_WAITPARAM 6 #define RDELAY_WAIT 300 #define RDELAY_WAITREAD 300 #define SCDBLKSIZE 2048 #ifdef SCD_DEBUG static int scd_debuglevel = SCD_DEBUG; # define XDEBUG(sc, level, fmt, args...) \ do { \ if (scd_debuglevel >= level) \ device_printf(sc->dev, fmt, ## args); \ } while (0) #else # define XDEBUG(sc, level, fmt, args...) #endif #define IS_ATTENTION(sc) ((SCD_READ(sc, IREG_STATUS) & SBIT_ATTENTION) != 0) #define IS_BUSY(sc) ((SCD_READ(sc, IREG_STATUS) & SBIT_BUSY) != 0) #define IS_DATA_RDY(sc) ((SCD_READ(sc, IREG_STATUS) & SBIT_DATA_READY) != 0) #define STATUS_BIT(sc, bit) ((SCD_READ(sc, IREG_STATUS) & (bit)) != 0) #define FSTATUS_BIT(sc, bit) ((SCD_READ(sc, IREG_FSTATUS) & (bit)) != 0) /* prototypes */ static void hsg2msf(int hsg, bcd_t *msf); static int msf2hsg(bcd_t *msf); static void process_attention(struct scd_softc *); static int waitfor_status_bits(struct scd_softc *, int bits_set, int bits_clear); static int send_cmd(struct scd_softc *, u_char cmd, u_int nargs, ...); static void init_drive(struct scd_softc *); static int spin_up(struct scd_softc *); static int read_toc(struct scd_softc *); static int get_result(struct scd_softc *, int result_len, u_char *result); static void print_error(struct scd_softc *, int errcode); static void scd_start(struct scd_softc *); static void scd_timeout(void *); static void scd_doread(struct scd_softc *, int state, struct scd_mbx *mbxin); static int scd_eject(struct scd_softc *); static int scd_stop(struct scd_softc *); static int scd_pause(struct scd_softc *); static int scd_resume(struct scd_softc *); static int scd_playtracks(struct scd_softc *, struct ioc_play_track *pt); static int scd_playmsf(struct scd_softc *, struct ioc_play_msf *msf); static int scd_play(struct scd_softc *, struct ioc_play_msf *msf); static int scd_subchan(struct scd_softc *, struct ioc_read_subchannel *sch, int nocopyout); static int read_subcode(struct scd_softc *, struct sony_subchannel_position_data *sch); /* for xcdplayer */ static int scd_toc_header(struct scd_softc *, struct ioc_toc_header *th); static int scd_toc_entrys(struct scd_softc *, struct ioc_read_toc_entry *te); static int scd_toc_entry(struct scd_softc *, struct ioc_read_toc_single_entry *te); #define SCD_LASTPLUS1 170 /* don't ask, xcdplayer passes this in */ static d_open_t scdopen; static d_close_t scdclose; static d_ioctl_t scdioctl; static d_strategy_t scdstrategy; static struct cdevsw scd_cdevsw = { .d_version = D_VERSION, .d_open = scdopen, .d_close = scdclose, .d_read = physread, .d_ioctl = scdioctl, .d_strategy = scdstrategy, .d_name = "scd", .d_flags = D_DISK, }; int scd_attach(struct scd_softc *sc) { int unit; unit = device_get_unit(sc->dev); SCD_LOCK(sc); init_drive(sc); sc->data.flags = SCDINIT; sc->data.audio_status = CD_AS_AUDIO_INVALID; bioq_init(&sc->data.head); SCD_UNLOCK(sc); sc->scd_dev_t = make_dev(&scd_cdevsw, 8 * unit, UID_ROOT, GID_OPERATOR, 0640, "scd%d", unit); sc->scd_dev_t->si_drv1 = (void *)sc; return (0); } static int scdopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct scd_softc *sc; int rc; sc = (struct scd_softc *)dev->si_drv1; /* mark all open part's invalid */ SCD_LOCK(sc); if (sc->data.openflag) { SCD_UNLOCK(sc); return (ENXIO); } XDEBUG(sc, 1, "DEBUG: status = 0x%x\n", SCD_READ(sc, IREG_STATUS)); if ((rc = spin_up(sc)) != 0) { print_error(sc, rc); SCD_UNLOCK(sc); return (EIO); } if (!(sc->data.flags & SCDTOC)) { int loop_count = 3; while (loop_count-- > 0 && (rc = read_toc(sc)) != 0) { if (rc == ERR_NOT_SPINNING) { rc = spin_up(sc); if (rc) { print_error(sc, rc); SCD_UNLOCK(sc); return (EIO); } continue; } device_printf(sc->dev, "TOC read error 0x%x\n", rc); SCD_UNLOCK(sc); return (EIO); } } sc->data.openflag = 1; sc->data.flags |= SCDVALID; SCD_UNLOCK(sc); return (0); } static int scdclose(struct cdev *dev, int flags, int fmt, struct thread *td) { struct scd_softc *sc; sc = (struct scd_softc *)dev->si_drv1; SCD_LOCK(sc); KASSERT(sc->data.openflag, ("device not open")); if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS) { (void)send_cmd(sc, CMD_SPIN_DOWN, 0); sc->data.flags &= ~SCDSPINNING; } /* close channel */ sc->data.openflag = 0; SCD_UNLOCK(sc); return (0); } static void scdstrategy(struct bio *bp) { struct scd_softc *sc; sc = (struct scd_softc *)bp->bio_dev->si_drv1; /* if device invalidated (e.g. media change, door open), error */ SCD_LOCK(sc); if (!(sc->data.flags & SCDVALID)) { device_printf(sc->dev, "media changed\n"); bp->bio_error = EIO; goto bad; } /* read only */ if (!(bp->bio_cmd == BIO_READ)) { bp->bio_error = EROFS; goto bad; } /* no data to read */ if (bp->bio_bcount == 0) goto done; if (!(sc->data.flags & SCDTOC)) { bp->bio_error = EIO; goto bad; } bp->bio_resid = 0; /* queue it */ bioq_disksort(&sc->data.head, bp); /* now check whether we can perform processing */ scd_start(sc); SCD_UNLOCK(sc); return; bad: bp->bio_flags |= BIO_ERROR; done: SCD_UNLOCK(sc); bp->bio_resid = bp->bio_bcount; biodone(bp); return; } static void scd_start(struct scd_softc *sc) { struct bio *bp; SCD_ASSERT_LOCKED(sc); if (sc->data.flags & SCDMBXBSY) return; bp = bioq_takefirst(&sc->data.head); if (bp != 0) { /* block found to process, dequeue */ sc->data.flags |= SCDMBXBSY; } else { /* nothing to do */ return; } sc->data.mbx.retry = 3; sc->data.mbx.bp = bp; scd_doread(sc, SCD_S_BEGIN, &(sc->data.mbx)); return; } static int scdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { struct scd_softc *sc; int error; sc = (struct scd_softc *)dev->si_drv1; XDEBUG(sc, 1, "ioctl: cmd=0x%lx\n", cmd); SCD_LOCK(sc); if (!(sc->data.flags & SCDVALID)) { SCD_UNLOCK(sc); return (EIO); } error = 0; switch (cmd) { case DIOCGMEDIASIZE: *(off_t *)addr = (off_t)sc->data.disksize * sc->data.blksize; break; case DIOCGSECTORSIZE: *(u_int *)addr = sc->data.blksize; break; case CDIOCPLAYTRACKS: error = scd_playtracks(sc, (struct ioc_play_track *) addr); break; case CDIOCPLAYBLOCKS: error = EINVAL; break; case CDIOCPLAYMSF: error = scd_playmsf(sc, (struct ioc_play_msf *) addr); break; case CDIOCREADSUBCHANNEL_SYSSPACE: return scd_subchan(sc, (struct ioc_read_subchannel *) addr, 1); case CDIOCREADSUBCHANNEL: return scd_subchan(sc, (struct ioc_read_subchannel *) addr, 0); case CDIOREADTOCHEADER: error = scd_toc_header (sc, (struct ioc_toc_header *) addr); break; case CDIOREADTOCENTRYS: return scd_toc_entrys (sc, (struct ioc_read_toc_entry*) addr); case CDIOREADTOCENTRY: error = scd_toc_entry (sc, (struct ioc_read_toc_single_entry*) addr); break; case CDIOCSETPATCH: case CDIOCGETVOL: case CDIOCSETVOL: case CDIOCSETMONO: case CDIOCSETSTERIO: case CDIOCSETMUTE: case CDIOCSETLEFT: case CDIOCSETRIGHT: error = EINVAL; break; case CDIOCRESUME: error = scd_resume(sc); break; case CDIOCPAUSE: error = scd_pause(sc); break; case CDIOCSTART: error = EINVAL; break; case CDIOCSTOP: error = scd_stop(sc); break; case CDIOCEJECT: error = scd_eject(sc); break; case CDIOCALLOW: break; case CDIOCSETDEBUG: #ifdef SCD_DEBUG scd_debuglevel++; #endif break; case CDIOCCLRDEBUG: #ifdef SCD_DEBUG scd_debuglevel = 0; #endif break; default: device_printf(sc->dev, "unsupported ioctl (cmd=0x%lx)\n", cmd); error = ENOTTY; break; } SCD_UNLOCK(sc); return (error); } /*************************************************************** * lower level of driver starts here **************************************************************/ static int scd_playtracks(struct scd_softc *sc, struct ioc_play_track *pt) { struct ioc_play_msf msf; int a = pt->start_track; int z = pt->end_track; int rc; if (!(sc->data.flags & SCDTOC) && (rc = read_toc(sc)) != 0) { if (rc == -ERR_NOT_SPINNING) { if (spin_up(sc) != 0) return (EIO); rc = read_toc(sc); } if (rc != 0) { print_error(sc, rc); return (EIO); } } XDEBUG(sc, 1, "playtracks from %d:%d to %d:%d\n", a, pt->start_index, z, pt->end_index); if ( a < sc->data.first_track || a > sc->data.last_track || a > z || z > sc->data.last_track) return (EINVAL); bcopy(sc->data.toc[a].start_msf, &msf.start_m, 3); hsg2msf(msf2hsg(sc->data.toc[z+1].start_msf)-1, &msf.end_m); return scd_play(sc, &msf); } /* The start/end msf is expected to be in bin format */ static int scd_playmsf(struct scd_softc *sc, struct ioc_play_msf *msfin) { struct ioc_play_msf msf; msf.start_m = bin2bcd(msfin->start_m); msf.start_s = bin2bcd(msfin->start_s); msf.start_f = bin2bcd(msfin->start_f); msf.end_m = bin2bcd(msfin->end_m); msf.end_s = bin2bcd(msfin->end_s); msf.end_f = bin2bcd(msfin->end_f); return scd_play(sc, &msf); } /* The start/end msf is expected to be in bcd format */ static int scd_play(struct scd_softc *sc, struct ioc_play_msf *msf) { int i, rc; XDEBUG(sc, 1, "playing: %02x:%02x:%02x -> %02x:%02x:%02x\n", msf->start_m, msf->start_s, msf->start_f, msf->end_m, msf->end_s, msf->end_f); for (i = 0; i < 2; i++) { rc = send_cmd(sc, CMD_PLAY_AUDIO, 7, 0x03, msf->start_m, msf->start_s, msf->start_f, msf->end_m, msf->end_s, msf->end_f); if (rc == -ERR_NOT_SPINNING) { sc->data.flags &= ~SCDSPINNING; if (spin_up(sc) != 0) return (EIO); } else if (rc < 0) { print_error(sc, rc); return (EIO); } else { break; } } sc->data.audio_status = CD_AS_PLAY_IN_PROGRESS; bcopy((char *)msf, (char *)&sc->data.last_play, sizeof(struct ioc_play_msf)); return (0); } static int scd_stop(struct scd_softc *sc) { (void)send_cmd(sc, CMD_STOP_AUDIO, 0); sc->data.audio_status = CD_AS_PLAY_COMPLETED; return (0); } static int scd_pause(struct scd_softc *sc) { struct sony_subchannel_position_data subpos; if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS) return (EINVAL); if (read_subcode(sc, &subpos) != 0) return (EIO); if (send_cmd(sc, CMD_STOP_AUDIO, 0) != 0) return (EIO); sc->data.last_play.start_m = subpos.abs_msf[0]; sc->data.last_play.start_s = subpos.abs_msf[1]; sc->data.last_play.start_f = subpos.abs_msf[2]; sc->data.audio_status = CD_AS_PLAY_PAUSED; XDEBUG(sc, 1, "pause @ %02x:%02x:%02x\n", sc->data.last_play.start_m, sc->data.last_play.start_s, sc->data.last_play.start_f); return (0); } static int scd_resume(struct scd_softc *sc) { if (sc->data.audio_status != CD_AS_PLAY_PAUSED) return (EINVAL); return scd_play(sc, &sc->data.last_play); } static int scd_eject(struct scd_softc *sc) { sc->data.audio_status = CD_AS_AUDIO_INVALID; sc->data.flags &= ~(SCDSPINNING|SCDTOC); if (send_cmd(sc, CMD_STOP_AUDIO, 0) != 0 || send_cmd(sc, CMD_SPIN_DOWN, 0) != 0 || send_cmd(sc, CMD_EJECT, 0) != 0) { return (EIO); } return (0); } static int scd_subchan(struct scd_softc *sc, struct ioc_read_subchannel *sch, int nocopyout) { struct sony_subchannel_position_data q; struct cd_sub_channel_info data; XDEBUG(sc, 1, "subchan af=%d, df=%d\n", sch->address_format, sch->data_format); if (sch->address_format != CD_MSF_FORMAT) return (EINVAL); if (sch->data_format != CD_CURRENT_POSITION) return (EINVAL); if (read_subcode(sc, &q) != 0) return (EIO); data.header.audio_status = sc->data.audio_status; data.what.position.data_format = CD_MSF_FORMAT; data.what.position.track_number = bcd2bin(q.track_number); data.what.position.reladdr.msf.unused = 0; data.what.position.reladdr.msf.minute = bcd2bin(q.rel_msf[0]); data.what.position.reladdr.msf.second = bcd2bin(q.rel_msf[1]); data.what.position.reladdr.msf.frame = bcd2bin(q.rel_msf[2]); data.what.position.absaddr.msf.unused = 0; data.what.position.absaddr.msf.minute = bcd2bin(q.abs_msf[0]); data.what.position.absaddr.msf.second = bcd2bin(q.abs_msf[1]); data.what.position.absaddr.msf.frame = bcd2bin(q.abs_msf[2]); SCD_UNLOCK(sc); if (nocopyout == 0) { if (copyout(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len))!=0) return (EFAULT); } else { bcopy(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len)); } return (0); } int scd_probe(struct scd_softc *sc) { struct sony_drive_configuration drive_config; int rc; static char namebuf[8+16+8+3]; char *s = namebuf; int loop_count = 0; sc->data.flags = SCDPROBING; bzero(&drive_config, sizeof(drive_config)); again: /* Reset drive */ SCD_WRITE(sc, OREG_CONTROL, CBIT_RESET_DRIVE); /* Calm down */ DELAY(300000); /* Only the ATTENTION bit may be set */ if ((SCD_READ(sc, IREG_STATUS) & ~1) != 0) { XDEBUG(sc, 1, "too many bits set. probe failed.\n"); return (ENXIO); } rc = send_cmd(sc, CMD_GET_DRIVE_CONFIG, 0); if (rc != sizeof(drive_config)) { /* Sometimes if the drive is playing audio I get */ /* the bad result 82. Fix by repeating the reset */ if (rc > 0 && loop_count++ == 0) goto again; return (ENXIO); } if (get_result(sc, rc, (u_char *)&drive_config) != 0) return (ENXIO); bcopy(drive_config.vendor, namebuf, 8); s = namebuf+8; while (*(s-1) == ' ') /* Strip trailing spaces */ s--; *s++ = ' '; bcopy(drive_config.product, s, 16); s += 16; while (*(s-1) == ' ') s--; *s++ = ' '; bcopy(drive_config.revision, s, 8); s += 8; while (*(s-1) == ' ') s--; *s = 0; sc->data.name = namebuf; if (drive_config.config & 0x10) sc->data.double_speed = 1; else sc->data.double_speed = 0; return (0); } static int read_subcode(struct scd_softc *sc, struct sony_subchannel_position_data *scp) { int rc; rc = send_cmd(sc, CMD_GET_SUBCHANNEL_DATA, 0); if (rc < 0 || rc < sizeof(*scp)) return (EIO); if (get_result(sc, rc, (u_char *)scp) != 0) return (EIO); return (0); } /* State machine copied from mcd.c */ /* This (and the code in mcd.c) will not work with more than one drive */ /* because there is only one sc->ch_mbxsave below. Should fix that some day. */ /* (sc->ch_mbxsave & state should probably be included in the scd_data struct and */ /* the unit number used as first argument to scd_doread().) /Micke */ /* state machine to process read requests * initialize with SCD_S_BEGIN: reset state machine * SCD_S_WAITSTAT: wait for ready (!busy) * SCD_S_WAITSPIN: wait for drive to spin up (if not spinning) * SCD_S_WAITFIFO: wait for param fifo to get ready, them exec. command. * SCD_S_WAITREAD: wait for data ready, read data * SCD_S_WAITPARAM: wait for command result params, read them, error if bad data read. */ static void scd_timeout(void *arg) { struct scd_softc *sc; sc = (struct scd_softc *)arg; SCD_ASSERT_LOCKED(sc); scd_doread(sc, sc->ch_state, sc->ch_mbxsave); } static void scd_doread(struct scd_softc *sc, int state, struct scd_mbx *mbxin) { struct scd_mbx *mbx = (state!=SCD_S_BEGIN) ? sc->ch_mbxsave : mbxin; struct bio *bp = mbx->bp; int i; int blknum; caddr_t addr; static char sdata[3]; /* Must be preserved between calls to this function */ SCD_ASSERT_LOCKED(sc); loop: switch (state) { case SCD_S_BEGIN: mbx = sc->ch_mbxsave = mbxin; case SCD_S_BEGIN1: /* get status */ mbx->count = RDELAY_WAIT; process_attention(sc); goto trystat; case SCD_S_WAITSTAT: sc->ch_state = SCD_S_WAITSTAT; callout_stop(&sc->timer); if (mbx->count-- <= 0) { device_printf(sc->dev, "timeout. drive busy.\n"); goto harderr; } trystat: if (IS_BUSY(sc)) { sc->ch_state = SCD_S_WAITSTAT; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } process_attention(sc); /* reject, if audio active */ if (sc->data.audio_status & CD_AS_PLAY_IN_PROGRESS) { device_printf(sc->dev, "audio is active\n"); goto harderr; } mbx->sz = sc->data.blksize; /* for first block */ - mbx->nblk = (bp->bio_bcount + (mbx->sz-1)) / mbx->sz; + mbx->nblk = howmany(bp->bio_bcount, mbx->sz); mbx->skip = 0; nextblock: if (!(sc->data.flags & SCDVALID)) goto changed; blknum = bp->bio_offset / mbx->sz + mbx->skip/mbx->sz; XDEBUG(sc, 2, "scd_doread: read blknum=%d\n", blknum); /* build parameter block */ hsg2msf(blknum, sdata); SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); SCD_WRITE(sc, OREG_CONTROL, CBIT_RPARAM_CLEAR); SCD_WRITE(sc, OREG_CONTROL, CBIT_DATA_READY_CLEAR); if (FSTATUS_BIT(sc, FBIT_WPARAM_READY)) goto writeparam; mbx->count = 100; sc->ch_state = SCD_S_WAITFIFO; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; case SCD_S_WAITSPIN: sc->ch_state = SCD_S_WAITSPIN; callout_stop(&sc->timer); if (mbx->count-- <= 0) { device_printf(sc->dev, "timeout waiting for drive to spin up.\n"); goto harderr; } if (!STATUS_BIT(sc, SBIT_RESULT_READY)) { sc->ch_state = SCD_S_WAITSPIN; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); switch ((i = SCD_READ(sc, IREG_RESULT)) & 0xf0) { case 0x20: i = SCD_READ(sc, IREG_RESULT); print_error(sc, i); goto harderr; case 0x00: (void)SCD_READ(sc, IREG_RESULT); sc->data.flags |= SCDSPINNING; break; } XDEBUG(sc, 1, "DEBUG: spin up complete\n"); state = SCD_S_BEGIN1; goto loop; case SCD_S_WAITFIFO: sc->ch_state = SCD_S_WAITFIFO; callout_stop(&sc->timer); if (mbx->count-- <= 0) { device_printf(sc->dev, "timeout. write param not ready.\n"); goto harderr; } if (!FSTATUS_BIT(sc, FBIT_WPARAM_READY)) { sc->ch_state = SCD_S_WAITFIFO; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } XDEBUG(sc, 1, "mbx->count (writeparamwait) = %d(%d)\n", mbx->count, 100); writeparam: /* The reason this test isn't done 'till now is to make sure */ /* that it is ok to send the SPIN_UP cmd below. */ if (!(sc->data.flags & SCDSPINNING)) { XDEBUG(sc, 1, "spinning up drive ...\n"); SCD_WRITE(sc, OREG_COMMAND, CMD_SPIN_UP); mbx->count = 300; sc->ch_state = SCD_S_WAITSPIN; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } /* send the read command */ SCD_WRITE(sc, OREG_WPARAMS, sdata[0]); SCD_WRITE(sc, OREG_WPARAMS, sdata[1]); SCD_WRITE(sc, OREG_WPARAMS, sdata[2]); SCD_WRITE(sc, OREG_WPARAMS, 0); SCD_WRITE(sc, OREG_WPARAMS, 0); SCD_WRITE(sc, OREG_WPARAMS, 1); SCD_WRITE(sc, OREG_COMMAND, CMD_READ); mbx->count = RDELAY_WAITREAD; for (i = 0; i < 50; i++) { if (STATUS_BIT(sc, SBIT_DATA_READY)) goto got_data; DELAY(100); } sc->ch_state = SCD_S_WAITREAD; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; case SCD_S_WAITREAD: sc->ch_state = SCD_S_WAITREAD; callout_stop(&sc->timer); if (mbx->count-- <= 0) { if (STATUS_BIT(sc, SBIT_RESULT_READY)) goto got_param; device_printf(sc->dev, "timeout while reading data\n"); goto readerr; } if (!STATUS_BIT(sc, SBIT_DATA_READY)) { process_attention(sc); if (!(sc->data.flags & SCDVALID)) goto changed; sc->ch_state = SCD_S_WAITREAD; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } XDEBUG(sc, 2, "mbx->count (after RDY_BIT) = %d(%d)\n", mbx->count, RDELAY_WAITREAD); got_data: /* data is ready */ addr = bp->bio_data + mbx->skip; SCD_WRITE(sc, OREG_CONTROL, CBIT_DATA_READY_CLEAR); SCD_READ_MULTI(sc, IREG_DATA, addr, mbx->sz); mbx->count = 100; for (i = 0; i < 20; i++) { if (STATUS_BIT(sc, SBIT_RESULT_READY)) goto waitfor_param; DELAY(100); } goto waitfor_param; case SCD_S_WAITPARAM: sc->ch_state = SCD_S_WAITPARAM; callout_stop(&sc->timer); if (mbx->count-- <= 0) { device_printf(sc->dev, "timeout waiting for params\n"); goto readerr; } waitfor_param: if (!STATUS_BIT(sc, SBIT_RESULT_READY)) { sc->ch_state = SCD_S_WAITPARAM; callout_reset(&sc->timer, hz / 100, scd_timeout, sc); /* XXX */ return; } #ifdef SCD_DEBUG if (mbx->count < 100 && scd_debuglevel > 0) device_printf(sc->dev, "mbx->count (paramwait) = %d(%d)\n", mbx->count, 100); #endif got_param: SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); switch ((i = SCD_READ(sc, IREG_RESULT)) & 0xf0) { case 0x50: switch (i) { case ERR_FATAL_READ_ERROR1: case ERR_FATAL_READ_ERROR2: device_printf(sc->dev, "unrecoverable read error 0x%x\n", i); goto harderr; } break; case 0x20: i = SCD_READ(sc, IREG_RESULT); switch (i) { case ERR_NOT_SPINNING: XDEBUG(sc, 1, "read error: drive not spinning\n"); if (mbx->retry-- > 0) { state = SCD_S_BEGIN1; sc->data.flags &= ~SCDSPINNING; goto loop; } goto harderr; default: print_error(sc, i); goto readerr; } case 0x00: i = SCD_READ(sc, IREG_RESULT); break; } if (--mbx->nblk > 0) { mbx->skip += mbx->sz; goto nextblock; } /* return buffer */ bp->bio_resid = 0; biodone(bp); sc->data.flags &= ~SCDMBXBSY; scd_start(sc); return; } readerr: if (mbx->retry-- > 0) { device_printf(sc->dev, "retrying ...\n"); state = SCD_S_BEGIN1; goto loop; } harderr: /* invalidate the buffer */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); sc->data.flags &= ~SCDMBXBSY; scd_start(sc); return; changed: device_printf(sc->dev, "media changed\n"); goto harderr; } static void hsg2msf(int hsg, bcd_t *msf) { hsg += 150; M_msf(msf) = bin2bcd(hsg / 4500); hsg %= 4500; S_msf(msf) = bin2bcd(hsg / 75); F_msf(msf) = bin2bcd(hsg % 75); } static int msf2hsg(bcd_t *msf) { return (bcd2bin(M_msf(msf)) * 60 + bcd2bin(S_msf(msf))) * 75 + bcd2bin(F_msf(msf)) - 150; } static void process_attention(struct scd_softc *sc) { unsigned char code; int count = 0; while (IS_ATTENTION(sc) && count++ < 30) { SCD_WRITE(sc, OREG_CONTROL, CBIT_ATTENTION_CLEAR); code = SCD_READ(sc, IREG_RESULT); #ifdef SCD_DEBUG if (scd_debuglevel > 0) { if (count == 1) device_printf(sc->dev, "DEBUG: ATTENTIONS = 0x%x", code); else printf(",0x%x", code); } #endif switch (code) { case ATTEN_SPIN_DOWN: sc->data.flags &= ~SCDSPINNING; break; case ATTEN_SPIN_UP_DONE: sc->data.flags |= SCDSPINNING; break; case ATTEN_AUDIO_DONE: sc->data.audio_status = CD_AS_PLAY_COMPLETED; break; case ATTEN_DRIVE_LOADED: sc->data.flags &= ~(SCDTOC|SCDSPINNING|SCDVALID); sc->data.audio_status = CD_AS_AUDIO_INVALID; break; case ATTEN_EJECT_PUSHED: sc->data.flags &= ~SCDVALID; break; } DELAY(100); } #ifdef SCD_DEBUG if (scd_debuglevel > 0 && count > 0) printf("\n"); #endif } /* Returns 0 OR sony error code */ static int spin_up(struct scd_softc *sc) { unsigned char res_reg[12]; unsigned int res_size; int rc; int loop_count = 0; again: rc = send_cmd(sc, CMD_SPIN_UP, 0, 0, res_reg, &res_size); if (rc != 0) { XDEBUG(sc, 2, "CMD_SPIN_UP error 0x%x\n", rc); return (rc); } if (!(sc->data.flags & SCDTOC)) { rc = send_cmd(sc, CMD_READ_TOC, 0); if (rc == ERR_NOT_SPINNING) { if (loop_count++ < 3) goto again; return (rc); } if (rc != 0) return (rc); } sc->data.flags |= SCDSPINNING; return (0); } static struct sony_tracklist * get_tl(struct sony_toc *toc, int size) { struct sony_tracklist *tl = &toc->tracks[0]; if (tl->track != 0xb0) return (tl); if (tl->track != 0xb1) return (tl); tl = (struct sony_tracklist *)((char *)tl + 9); if (tl->track != 0xb2) return (tl); tl = (struct sony_tracklist *)((char *)tl + 9); if (tl->track != 0xb3) return (tl); tl = (struct sony_tracklist *)((char *)tl + 9); if (tl->track != 0xb4) return (tl); tl = (struct sony_tracklist *)((char *)tl + 9); if (tl->track != 0xc0) return (tl); tl = (struct sony_tracklist *)((char *)tl + 9); return (tl); } static int read_toc(struct scd_softc *sc) { struct sony_toc toc; struct sony_tracklist *tl; int rc, i, j; u_long first, last; rc = send_cmd(sc, CMD_GET_TOC, 1, 1); if (rc < 0) return (rc); if (rc > sizeof(toc)) { device_printf(sc->dev, "program error: toc too large (%d)\n", rc); return (EIO); } if (get_result(sc, rc, (u_char *)&toc) != 0) return (EIO); XDEBUG(sc, 1, "toc read. len = %d, sizeof(toc) = %d\n", rc, sizeof(toc)); tl = get_tl(&toc, rc); first = msf2hsg(tl->start_msf); last = msf2hsg(toc.lead_out_start_msf); sc->data.blksize = SCDBLKSIZE; sc->data.disksize = last*sc->data.blksize/DEV_BSIZE; XDEBUG(sc, 1, "firstsector = %ld, lastsector = %ld", first, last); sc->data.first_track = bcd2bin(toc.first_track); sc->data.last_track = bcd2bin(toc.last_track); if (sc->data.last_track > (MAX_TRACKS-2)) sc->data.last_track = MAX_TRACKS-2; for (j = 0, i = sc->data.first_track; i <= sc->data.last_track; i++, j++) { sc->data.toc[i].adr = tl[j].adr; sc->data.toc[i].ctl = tl[j].ctl; /* for xcdplayer */ bcopy(tl[j].start_msf, sc->data.toc[i].start_msf, 3); #ifdef SCD_DEBUG if (scd_debuglevel > 0) { if ((j % 3) == 0) { printf("\n"); device_printf(sc->dev, "tracks "); } printf("[%03d: %2d %2d %2d] ", i, bcd2bin(sc->data.toc[i].start_msf[0]), bcd2bin(sc->data.toc[i].start_msf[1]), bcd2bin(sc->data.toc[i].start_msf[2])); } #endif } bcopy(toc.lead_out_start_msf, sc->data.toc[sc->data.last_track+1].start_msf, 3); #ifdef SCD_DEBUG if (scd_debuglevel > 0) { i = sc->data.last_track+1; printf("[END: %2d %2d %2d]\n", bcd2bin(sc->data.toc[i].start_msf[0]), bcd2bin(sc->data.toc[i].start_msf[1]), bcd2bin(sc->data.toc[i].start_msf[2])); } #endif sc->data.flags |= SCDTOC; return (0); } static void init_drive(struct scd_softc *sc) { int rc; rc = send_cmd(sc, CMD_SET_DRIVE_PARAM, 2, 0x05, 0x03 | ((sc->data.double_speed) ? 0x04: 0)); if (rc != 0) device_printf(sc->dev, "Unable to set parameters. Errcode = 0x%x\n", rc); } /* Returns 0 or errno */ static int get_result(struct scd_softc *sc, int result_len, u_char *result) { int loop_index = 2; /* send_cmd() reads two bytes ... */ XDEBUG(sc, 1, "DEBUG: get_result: bytes=%d\n", result_len); while (result_len-- > 0) { if (loop_index++ >= 10) { loop_index = 1; if (waitfor_status_bits(sc, SBIT_RESULT_READY, 0)) return (EIO); SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); } if (result) *result++ = SCD_READ(sc, IREG_RESULT); else (void)SCD_READ(sc, IREG_RESULT); } return (0); } /* Returns -0x100 for timeout, -(drive error code) OR number of result bytes */ static int send_cmd(struct scd_softc *sc, u_char cmd, u_int nargs, ...) { va_list ap; u_char c; int rc; int i; if (waitfor_status_bits(sc, 0, SBIT_BUSY)) { device_printf(sc->dev, "drive busy\n"); return (-0x100); } XDEBUG(sc, 1, "DEBUG: send_cmd: cmd=0x%x nargs=%d", cmd, nargs); SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); SCD_WRITE(sc, OREG_CONTROL, CBIT_RPARAM_CLEAR); for (i = 0; i < 100; i++) if (FSTATUS_BIT(sc, FBIT_WPARAM_READY)) break; if (!FSTATUS_BIT(sc, FBIT_WPARAM_READY)) { XDEBUG(sc, 1, "\nwparam timeout\n"); return (-EIO); } va_start(ap, nargs); for (i = 0; i < nargs; i++) { c = (u_char)va_arg(ap, int); SCD_WRITE(sc, OREG_WPARAMS, c); XDEBUG(sc, 1, ",{0x%x}", c); } va_end(ap); XDEBUG(sc, 1, "\n"); SCD_WRITE(sc, OREG_COMMAND, cmd); rc = waitfor_status_bits(sc, SBIT_RESULT_READY, SBIT_BUSY); if (rc) return (-0x100); SCD_WRITE(sc, OREG_CONTROL, CBIT_RESULT_READY_CLEAR); switch ((rc = SCD_READ(sc, IREG_RESULT)) & 0xf0) { case 0x20: rc = SCD_READ(sc, IREG_RESULT); /* FALLTHROUGH */ case 0x50: XDEBUG(sc, 1, "DEBUG: send_cmd: drive_error=0x%x\n", rc); return (-rc); case 0x00: default: rc = SCD_READ(sc, IREG_RESULT); XDEBUG(sc, 1, "DEBUG: send_cmd: result_len=%d\n", rc); return (rc); } } static void print_error(struct scd_softc *sc, int errcode) { switch (errcode) { case -ERR_CD_NOT_LOADED: device_printf(sc->dev, "door is open\n"); break; case -ERR_NO_CD_INSIDE: device_printf(sc->dev, "no cd inside\n"); break; default: if (errcode == -0x100 || errcode > 0) device_printf(sc->dev, "device timeout\n"); else device_printf(sc->dev, "unexpected error 0x%x\n", -errcode); break; } } /* Returns 0 or errno value */ static int waitfor_status_bits(struct scd_softc *sc, int bits_set, int bits_clear) { u_int flags = sc->data.flags; u_int max_loop; u_char c = 0; if (flags & SCDPROBING) { max_loop = 0; while (max_loop++ < 1000) { c = SCD_READ(sc, IREG_STATUS); if (c == 0xff) return (EIO); if (c & SBIT_ATTENTION) { process_attention(sc); continue; } if ((c & bits_set) == bits_set && (c & bits_clear) == 0) { break; } DELAY(10000); } } else { max_loop = 100; while (max_loop-- > 0) { c = SCD_READ(sc, IREG_STATUS); if (c & SBIT_ATTENTION) { process_attention(sc); continue; } if ((c & bits_set) == bits_set && (c & bits_clear) == 0) { break; } SCD_UNLOCK(sc); pause("waitfor", hz/10); SCD_LOCK(sc); } } if ((c & bits_set) == bits_set && (c & bits_clear) == 0) { return (0); } #ifdef SCD_DEBUG if (scd_debuglevel > 0) device_printf(sc->dev, "DEBUG: waitfor: TIMEOUT (0x%x,(0x%x,0x%x))\n", c, bits_set, bits_clear); else #endif device_printf(sc->dev, "timeout.\n"); return (EIO); } /* these two routines for xcdplayer - "borrowed" from mcd.c */ static int scd_toc_header (struct scd_softc *sc, struct ioc_toc_header* th) { int rc; if (!(sc->data.flags & SCDTOC) && (rc = read_toc(sc)) != 0) { print_error(sc, rc); return (EIO); } th->starting_track = sc->data.first_track; th->ending_track = sc->data.last_track; th->len = 0; /* not used */ return (0); } static int scd_toc_entrys (struct scd_softc *sc, struct ioc_read_toc_entry *te) { struct cd_toc_entry toc_entry; int rc, i, len = te->data_len; if (!(sc->data.flags & SCDTOC) && (rc = read_toc(sc)) != 0) { print_error(sc, rc); return (EIO); } /* find the toc to copy*/ i = te->starting_track; if (i == SCD_LASTPLUS1) i = sc->data.last_track + 1; /* verify starting track */ if (i < sc->data.first_track || i > sc->data.last_track+1) return (EINVAL); /* valid length ? */ if (len < sizeof(struct cd_toc_entry) || (len % sizeof(struct cd_toc_entry)) != 0) return (EINVAL); /* copy the toc data */ toc_entry.control = sc->data.toc[i].ctl; toc_entry.addr_type = te->address_format; toc_entry.track = i; if (te->address_format == CD_MSF_FORMAT) { toc_entry.addr.msf.unused = 0; toc_entry.addr.msf.minute = bcd2bin(sc->data.toc[i].start_msf[0]); toc_entry.addr.msf.second = bcd2bin(sc->data.toc[i].start_msf[1]); toc_entry.addr.msf.frame = bcd2bin(sc->data.toc[i].start_msf[2]); } SCD_UNLOCK(sc); /* copy the data back */ if (copyout(&toc_entry, te->data, sizeof(struct cd_toc_entry)) != 0) return (EFAULT); return (0); } static int scd_toc_entry (struct scd_softc *sc, struct ioc_read_toc_single_entry *te) { struct cd_toc_entry toc_entry; int rc, i; if (!(sc->data.flags & SCDTOC) && (rc = read_toc(sc)) != 0) { print_error(sc, rc); return (EIO); } /* find the toc to copy*/ i = te->track; if (i == SCD_LASTPLUS1) i = sc->data.last_track + 1; /* verify starting track */ if (i < sc->data.first_track || i > sc->data.last_track+1) return (EINVAL); /* copy the toc data */ toc_entry.control = sc->data.toc[i].ctl; toc_entry.addr_type = te->address_format; toc_entry.track = i; if (te->address_format == CD_MSF_FORMAT) { toc_entry.addr.msf.unused = 0; toc_entry.addr.msf.minute = bcd2bin(sc->data.toc[i].start_msf[0]); toc_entry.addr.msf.second = bcd2bin(sc->data.toc[i].start_msf[1]); toc_entry.addr.msf.frame = bcd2bin(sc->data.toc[i].start_msf[2]); } /* copy the data back */ bcopy(&toc_entry, &te->entry, sizeof(struct cd_toc_entry)); return (0); } diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c index e729360c6b63..6e93b12fbec9 100644 --- a/sys/dev/sfxge/sfxge_tx.c +++ b/sys/dev/sfxge/sfxge_tx.c @@ -1,1988 +1,1988 @@ /*- * Copyright (c) 2010-2015 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ /* Theory of operation: * * Tx queues allocation and mapping * * One Tx queue with enabled checksum offload is allocated per Rx channel * (event queue). Also 2 Tx queues (one without checksum offload and one * with IP checksum offload only) are allocated and bound to event queue 0. * sfxge_txq_type is used as Tx queue label. * * So, event queue plus label mapping to Tx queue index is: * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 * See sfxge_get_txq_by_label() sfxge_ev.c */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" #include "sfxge_tx.h" #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_get_max, 0, "Maximum number of any packets in deferred packet get-list"); #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ SFXGE_PARAM(tx_dpl_get_non_tcp_max) static int sfxge_tx_dpl_get_non_tcp_max = SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_get_non_tcp_max, 0, "Maximum number of non-TCP packets in deferred packet get-list"); #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_put_max, 0, "Maximum number of any packets in deferred packet put-list"); #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted) static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2); TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted); SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN, &sfxge_tso_fw_assisted, 0, "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware"); static const struct { const char *name; size_t offset; } sfxge_tx_stats[] = { #define SFXGE_TX_STAT(name, member) \ { #name, offsetof(struct sfxge_txq, member) } SFXGE_TX_STAT(tso_bursts, tso_bursts), SFXGE_TX_STAT(tso_packets, tso_packets), SFXGE_TX_STAT(tso_long_headers, tso_long_headers), SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many), SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc), SFXGE_TX_STAT(tx_collapses, collapses), SFXGE_TX_STAT(tx_drops, drops), SFXGE_TX_STAT(tx_get_overflow, get_overflow), SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), SFXGE_TX_STAT(tx_put_overflow, put_overflow), SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), }; /* Forward declarations. */ static void sfxge_tx_qdpl_service(struct sfxge_txq *txq); static void sfxge_tx_qlist_post(struct sfxge_txq *txq); static void sfxge_tx_qunblock(struct sfxge_txq *txq); static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg, int vlan_tagged); static int sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf) { uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ? mbuf->m_pkthdr.ether_vtag : 0); if (this_tag == txq->hw_vlan_tci) return (0); efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), &txq->pend_desc[0]); txq->n_pend_desc = 1; txq->hw_vlan_tci = this_tag; return (1); } static inline void sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp) { KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0")); if (__predict_false(*pstmp == &txq->stmp[txq->ptr_mask])) *pstmp = &txq->stmp[0]; else (*pstmp)++; } void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) { unsigned int completed; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); completed = txq->completed; while (completed != txq->pending) { struct sfxge_tx_mapping *stmp; unsigned int id; id = completed++ & txq->ptr_mask; stmp = &txq->stmp[id]; if (stmp->flags & TX_BUF_UNMAP) { bus_dmamap_unload(txq->packet_dma_tag, stmp->map); if (stmp->flags & TX_BUF_MBUF) { struct mbuf *m = stmp->u.mbuf; do m = m_free(m); while (m != NULL); } else { free(stmp->u.heap_buf, M_SFXGE); } stmp->flags = 0; } } txq->completed = completed; /* Check whether we need to unblock the queue. */ mb(); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) sfxge_tx_qunblock(txq); } } static unsigned int sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) { /* Absense of TCP checksum flags does not mean that it is non-TCP * but it should be true if user wants to achieve high throughput. */ return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); } /* * Reorder the put list and append it to the get list. */ static void sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *get_next, **get_tailp; volatile uintptr_t *putp; uintptr_t put; unsigned int count; unsigned int non_tcp_count; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); stdp = &txq->dpl; /* Acquire the put list. */ putp = &stdp->std_put; put = atomic_readandclear_ptr(putp); mbuf = (void *)put; if (mbuf == NULL) return; /* Reverse the put list. */ get_tailp = &mbuf->m_nextpkt; get_next = NULL; count = 0; non_tcp_count = 0; do { struct mbuf *put_next; non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); put_next = mbuf->m_nextpkt; mbuf->m_nextpkt = get_next; get_next = mbuf; mbuf = put_next; count++; } while (mbuf != NULL); if (count > stdp->std_put_hiwat) stdp->std_put_hiwat = count; /* Append the reversed put list to the get list. */ KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); *stdp->std_getp = get_next; stdp->std_getp = get_tailp; stdp->std_get_count += count; stdp->std_get_non_tcp_count += non_tcp_count; } static void sfxge_tx_qreap(struct sfxge_txq *txq) { SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); txq->reaped = txq->completed; } static void sfxge_tx_qlist_post(struct sfxge_txq *txq) { unsigned int old_added; unsigned int block_level; unsigned int level; int rc; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); KASSERT(txq->n_pend_desc <= txq->max_pkt_desc, ("txq->n_pend_desc too large")); KASSERT(!txq->blocked, ("txq->blocked")); old_added = txq->added; /* Post the fragment list. */ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc, txq->reaped, &txq->added); KASSERT(rc == 0, ("efx_tx_qdesc_post() failed")); /* If efx_tx_qdesc_post() had to refragment, our information about * buffers to free may be associated with the wrong * descriptors. */ KASSERT(txq->added - old_added == txq->n_pend_desc, ("efx_tx_qdesc_post() refragmented descriptors")); level = txq->added - txq->reaped; KASSERT(level <= txq->entries, ("overfilled TX queue")); /* Clear the fragment list. */ txq->n_pend_desc = 0; /* * Set the block level to ensure there is space to generate a * large number of descriptors for TSO. */ block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc; /* Have we reached the block level? */ if (level < block_level) return; /* Reap, and check again */ sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < block_level) return; txq->blocked = 1; /* * Avoid a race with completion interrupt handling that could leave * the queue blocked. */ mb(); sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < block_level) { mb(); txq->blocked = 0; } } static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) { bus_dmamap_t *used_map; bus_dmamap_t map; bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; unsigned int id; struct sfxge_tx_mapping *stmp; efx_desc_t *desc; int n_dma_seg; int rc; int i; int eop; int vlan_tagged; KASSERT(!txq->blocked, ("txq->blocked")); if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) prefetch_read_many(mbuf->m_data); if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) { rc = EINTR; goto reject; } /* Load the packet for DMA. */ id = txq->added & txq->ptr_mask; stmp = &txq->stmp[id]; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); if (rc == EFBIG) { /* Try again. */ struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, SFXGE_TX_MAPPING_MAX_SEG); if (new_mbuf == NULL) goto reject; ++txq->collapses; mbuf = new_mbuf; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); } if (rc != 0) goto reject; /* Make the packet visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); used_map = &stmp->map; vlan_tagged = sfxge_tx_maybe_insert_tag(txq, mbuf); if (vlan_tagged) { sfxge_next_stmp(txq, &stmp); } if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, vlan_tagged); if (rc < 0) goto reject_mapped; stmp = &txq->stmp[(rc - 1) & txq->ptr_mask]; } else { /* Add the mapping to the fragment list, and set flags * for the buffer. */ i = 0; for (;;) { desc = &txq->pend_desc[i + vlan_tagged]; eop = (i == n_dma_seg - 1); efx_tx_qdesc_dma_create(txq->common, dma_seg[i].ds_addr, dma_seg[i].ds_len, eop, desc); if (eop) break; i++; sfxge_next_stmp(txq, &stmp); } txq->n_pend_desc = n_dma_seg + vlan_tagged; } /* * If the mapping required more than one descriptor * then we need to associate the DMA map with the last * descriptor, not the first. */ if (used_map != &stmp->map) { map = stmp->map; stmp->map = *used_map; *used_map = map; } stmp->u.mbuf = mbuf; stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; /* Post the fragment list. */ sfxge_tx_qlist_post(txq); return (0); reject_mapped: bus_dmamap_unload(txq->packet_dma_tag, *used_map); reject: /* Drop the packet on the floor. */ m_freem(mbuf); ++txq->drops; return (rc); } /* * Drain the deferred packet list into the transmit queue. */ static void sfxge_tx_qdpl_drain(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *next; unsigned int count; unsigned int non_tcp_count; unsigned int pushed; int rc; SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); sc = txq->sc; stdp = &txq->dpl; pushed = txq->added; if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) { prefetch_read_many(sc->enp); prefetch_read_many(txq->common); } mbuf = stdp->std_get; count = stdp->std_get_count; non_tcp_count = stdp->std_get_non_tcp_count; if (count > stdp->std_get_hiwat) stdp->std_get_hiwat = count; while (count != 0) { KASSERT(mbuf != NULL, ("mbuf == NULL")); next = mbuf->m_nextpkt; mbuf->m_nextpkt = NULL; ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ if (next != NULL) prefetch_read_many(next); rc = sfxge_tx_queue_mbuf(txq, mbuf); --count; non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); mbuf = next; if (rc != 0) continue; if (txq->blocked) break; /* Push the fragments to the hardware in batches. */ if (txq->added - pushed >= SFXGE_TX_BATCH) { efx_tx_qpush(txq->common, txq->added, pushed); pushed = txq->added; } } if (count == 0) { KASSERT(mbuf == NULL, ("mbuf != NULL")); KASSERT(non_tcp_count == 0, ("inconsistent TCP/non-TCP detection")); stdp->std_get = NULL; stdp->std_get_count = 0; stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; } else { stdp->std_get = mbuf; stdp->std_get_count = count; stdp->std_get_non_tcp_count = non_tcp_count; } if (txq->added != pushed) efx_tx_qpush(txq->common, txq->added, pushed); KASSERT(txq->blocked || stdp->std_get_count == 0, ("queue unblocked but count is non-zero")); } #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0) /* * Service the deferred packet list. * * NOTE: drops the txq mutex! */ static void sfxge_tx_qdpl_service(struct sfxge_txq *txq) { SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); do { if (SFXGE_TX_QDPL_PENDING(txq)) sfxge_tx_qdpl_swizzle(txq); if (!txq->blocked) sfxge_tx_qdpl_drain(txq); SFXGE_TXQ_UNLOCK(txq); } while (SFXGE_TX_QDPL_PENDING(txq) && SFXGE_TXQ_TRYLOCK(txq)); } /* * Put a packet on the deferred packet get-list. */ static int sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf) { struct sfxge_tx_dpl *stdp; stdp = &txq->dpl; KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); SFXGE_TXQ_LOCK_ASSERT_OWNED(txq); if (stdp->std_get_count >= stdp->std_get_max) { txq->get_overflow++; return (ENOBUFS); } if (sfxge_is_mbuf_non_tcp(mbuf)) { if (stdp->std_get_non_tcp_count >= stdp->std_get_non_tcp_max) { txq->get_non_tcp_overflow++; return (ENOBUFS); } stdp->std_get_non_tcp_count++; } *(stdp->std_getp) = mbuf; stdp->std_getp = &mbuf->m_nextpkt; stdp->std_get_count++; return (0); } /* * Put a packet on the deferred packet put-list. * * We overload the csum_data field in the mbuf to keep track of this length * because there is no cheap alternative to avoid races. */ static int sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf) { struct sfxge_tx_dpl *stdp; volatile uintptr_t *putp; uintptr_t old; uintptr_t new; unsigned old_len; KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); stdp = &txq->dpl; putp = &stdp->std_put; new = (uintptr_t)mbuf; do { old = *putp; if (old != 0) { struct mbuf *mp = (struct mbuf *)old; old_len = mp->m_pkthdr.csum_data; } else old_len = 0; if (old_len >= stdp->std_put_max) { atomic_add_long(&txq->put_overflow, 1); return (ENOBUFS); } mbuf->m_pkthdr.csum_data = old_len + 1; mbuf->m_nextpkt = (void *)old; } while (atomic_cmpset_ptr(putp, old, new) == 0); return (0); } /* * Called from if_transmit - will try to grab the txq lock and enqueue to the * put list if it succeeds, otherwise try to push onto the defer list if space. */ static int sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) { int rc; if (!SFXGE_LINK_UP(txq->sc)) { atomic_add_long(&txq->netdown_drops, 1); return (ENETDOWN); } /* * Try to grab the txq lock. If we are able to get the lock, * the packet will be appended to the "get list" of the deferred * packet list. Otherwise, it will be pushed on the "put list". */ if (SFXGE_TXQ_TRYLOCK(txq)) { /* First swizzle put-list to get-list to keep order */ sfxge_tx_qdpl_swizzle(txq); rc = sfxge_tx_qdpl_put_locked(txq, m); /* Try to service the list. */ sfxge_tx_qdpl_service(txq); /* Lock has been dropped. */ } else { rc = sfxge_tx_qdpl_put_unlocked(txq, m); /* * Try to grab the lock again. * * If we are able to get the lock, we need to process * the deferred packet list. If we are not able to get * the lock, another thread is processing the list. */ if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) { sfxge_tx_qdpl_service(txq); /* Lock has been dropped. */ } } SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq); return (rc); } static void sfxge_tx_qdpl_flush(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp = &txq->dpl; struct mbuf *mbuf, *next; SFXGE_TXQ_LOCK(txq); sfxge_tx_qdpl_swizzle(txq); for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { next = mbuf->m_nextpkt; m_freem(mbuf); } stdp->std_get = NULL; stdp->std_get_count = 0; stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; SFXGE_TXQ_UNLOCK(txq); } void sfxge_if_qflush(struct ifnet *ifp) { struct sfxge_softc *sc; unsigned int i; sc = ifp->if_softc; for (i = 0; i < sc->txq_count; i++) sfxge_tx_qdpl_flush(sc->txq[i]); } #if SFXGE_TX_PARSE_EARLY /* There is little space for user data in mbuf pkthdr, so we * use l*hlen fields which are not used by the driver otherwise * to store header offsets. * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes. */ #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0]) /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */ #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen) #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1]) #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1]) static void sfxge_parse_tx_packet(struct mbuf *mbuf) { struct ether_header *eh = mtod(mbuf, struct ether_header *); const struct tcphdr *th; struct tcphdr th_copy; /* Find network protocol and header */ TSO_MBUF_PROTO(mbuf) = eh->ether_type; if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = mtod(mbuf, struct ether_vlan_header *); TSO_MBUF_PROTO(mbuf) = veh->evl_proto; mbuf->m_pkthdr.l2hlen = sizeof(*veh); } else { mbuf->m_pkthdr.l2hlen = sizeof(*eh); } /* Find TCP header */ if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) { const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen); KASSERT(iph->ip_p == IPPROTO_TCP, ("TSO required on non-TCP packet")); mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl; TSO_MBUF_PACKETID(mbuf) = iph->ip_id; } else { KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6), ("TSO required on non-IP packet")); KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt == IPPROTO_TCP, ("TSO required on non-TCP packet")); mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr); TSO_MBUF_PACKETID(mbuf) = 0; } KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen, ("network header is fragmented in mbuf")); /* We need TCP header including flags (window is the next) */ if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) { m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy), (caddr_t)&th_copy); th = &th_copy; } else { th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen); } mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off; TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq); /* These flags must not be duplicated */ /* * RST should not be duplicated as well, but FreeBSD kernel * generates TSO packets with RST flag. So, do not assert * its absence. */ KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), ("incompatible TCP flag 0x%x on TSO packet", th->th_flags & (TH_URG | TH_SYN))); TSO_MBUF_FLAGS(mbuf) = th->th_flags; } #endif /* * TX start -- called by the stack. */ int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) { struct sfxge_softc *sc; struct sfxge_txq *txq; int rc; sc = (struct sfxge_softc *)ifp->if_softc; /* * Transmit may be called when interface is up from the kernel * point of view, but not yet up (in progress) from the driver * point of view. I.e. link aggregation bring up. * Transmit may be called when interface is up from the driver * point of view, but already down from the kernel point of * view. I.e. Rx when interface shutdown is in progress. */ KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP), ("interface not up")); /* Pick the desired transmit queue. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO)) { int index = 0; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { uint32_t hash = m->m_pkthdr.flowid; index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; } #if SFXGE_TX_PARSE_EARLY if (m->m_pkthdr.csum_flags & CSUM_TSO) sfxge_parse_tx_packet(m); #endif txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; } else { txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; } rc = sfxge_tx_packet_add(txq, m); if (rc != 0) m_freem(m); return (rc); } /* * Software "TSO". Not quite as good as doing it in hardware, but * still faster than segmenting in the stack. */ struct sfxge_tso_state { /* Output position */ unsigned out_len; /* Remaining length in current segment */ unsigned seqnum; /* Current sequence number */ unsigned packet_space; /* Remaining space in current packet */ unsigned segs_space; /* Remaining number of DMA segments for the packet (FATSOv2 only) */ /* Input position */ uint64_t dma_addr; /* DMA address of current position */ unsigned in_len; /* Remaining length in current mbuf */ const struct mbuf *mbuf; /* Input mbuf (head of chain) */ u_short protocol; /* Network protocol (after VLAN decap) */ ssize_t nh_off; /* Offset of network header */ ssize_t tcph_off; /* Offset of TCP header */ unsigned header_len; /* Number of bytes of header */ unsigned seg_size; /* TCP segment size */ int fw_assisted; /* Use FW-assisted TSO */ u_short packet_id; /* IPv4 packet ID from the original packet */ uint8_t tcp_flags; /* TCP flags */ efx_desc_t header_desc; /* Precomputed header descriptor for * FW-assisted TSO */ }; #if !SFXGE_TX_PARSE_EARLY static const struct ip *tso_iph(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IP), ("tso_iph() in non-IPv4 state")); return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); } static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("tso_ip6h() in non-IPv6 state")); return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); } static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) { return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); } #endif /* Size of preallocated TSO header buffers. Larger blocks must be * allocated from the heap. */ #define TSOH_STD_SIZE 128 /* At most half the descriptors in the queue at any time will refer to * a TSO header buffer, since they must always be followed by a * payload descriptor referring to an mbuf. */ #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) #define TSOH_PAGE_COUNT(_txq_entries) \ - ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) + howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE) static int tso_init(struct sfxge_txq *txq) { struct sfxge_softc *sc = txq->sc; unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); int i, rc; /* Allocate TSO header buffers */ txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), M_SFXGE, M_WAITOK); for (i = 0; i < tsoh_page_count; i++) { rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); if (rc != 0) goto fail; } return (0); fail: while (i-- > 0) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); txq->tsoh_buffer = NULL; return (rc); } static void tso_fini(struct sfxge_txq *txq) { int i; if (txq->tsoh_buffer != NULL) { for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); } } static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso, const bus_dma_segment_t *hdr_dma_seg, struct mbuf *mbuf) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp); #if !SFXGE_TX_PARSE_EARLY struct ether_header *eh = mtod(mbuf, struct ether_header *); const struct tcphdr *th; struct tcphdr th_copy; #endif tso->fw_assisted = txq->tso_fw_assisted; tso->mbuf = mbuf; /* Find network protocol and header */ #if !SFXGE_TX_PARSE_EARLY tso->protocol = eh->ether_type; if (tso->protocol == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = mtod(mbuf, struct ether_vlan_header *); tso->protocol = veh->evl_proto; tso->nh_off = sizeof(*veh); } else { tso->nh_off = sizeof(*eh); } #else tso->protocol = TSO_MBUF_PROTO(mbuf); tso->nh_off = mbuf->m_pkthdr.l2hlen; tso->tcph_off = mbuf->m_pkthdr.l3hlen; tso->packet_id = TSO_MBUF_PACKETID(mbuf); #endif #if !SFXGE_TX_PARSE_EARLY /* Find TCP header */ if (tso->protocol == htons(ETHERTYPE_IP)) { KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; tso->packet_id = tso_iph(tso)->ip_id; } else { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("TSO required on non-IP packet")); KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); tso->packet_id = 0; } #endif if (tso->fw_assisted && __predict_false(tso->tcph_off > encp->enc_tx_tso_tcp_header_offset_limit)) { tso->fw_assisted = 0; } #if !SFXGE_TX_PARSE_EARLY KASSERT(mbuf->m_len >= tso->tcph_off, ("network header is fragmented in mbuf")); /* We need TCP header including flags (window is the next) */ if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) { m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy), (caddr_t)&th_copy); th = &th_copy; } else { th = tso_tcph(tso); } tso->header_len = tso->tcph_off + 4 * th->th_off; #else tso->header_len = mbuf->m_pkthdr.l4hlen; #endif tso->seg_size = mbuf->m_pkthdr.tso_segsz; #if !SFXGE_TX_PARSE_EARLY tso->seqnum = ntohl(th->th_seq); /* These flags must not be duplicated */ /* * RST should not be duplicated as well, but FreeBSD kernel * generates TSO packets with RST flag. So, do not assert * its absence. */ KASSERT(!(th->th_flags & (TH_URG | TH_SYN)), ("incompatible TCP flag 0x%x on TSO packet", th->th_flags & (TH_URG | TH_SYN))); tso->tcp_flags = th->th_flags; #else tso->seqnum = TSO_MBUF_SEQNUM(mbuf); tso->tcp_flags = TSO_MBUF_FLAGS(mbuf); #endif tso->out_len = mbuf->m_pkthdr.len - tso->header_len; if (tso->fw_assisted) { if (hdr_dma_seg->ds_len >= tso->header_len) efx_tx_qdesc_dma_create(txq->common, hdr_dma_seg->ds_addr, tso->header_len, B_FALSE, &tso->header_desc); else tso->fw_assisted = 0; } } /* * tso_fill_packet_with_fragment - form descriptors for the current fragment * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space. */ static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, struct sfxge_tso_state *tso) { efx_desc_t *desc; int n; uint64_t dma_addr = tso->dma_addr; boolean_t eop; if (tso->in_len == 0 || tso->packet_space == 0) return; KASSERT(tso->in_len > 0, ("TSO input length went negative")); KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); if (tso->fw_assisted & SFXGE_FATSOV2) { n = tso->in_len; tso->out_len -= n; tso->seqnum += n; tso->in_len = 0; if (n < tso->packet_space) { tso->packet_space -= n; tso->segs_space--; } else { tso->packet_space = tso->seg_size - (n - tso->packet_space) % tso->seg_size; tso->segs_space = EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 - (tso->packet_space != tso->seg_size); } } else { n = min(tso->in_len, tso->packet_space); tso->packet_space -= n; tso->out_len -= n; tso->dma_addr += n; tso->in_len -= n; } /* * It is OK to use binary OR below to avoid extra branching * since all conditions may always be checked. */ eop = (tso->out_len == 0) | (tso->packet_space == 0) | (tso->segs_space == 0); desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc); } /* Callback from bus_dmamap_load() for long TSO headers. */ static void tso_map_long_header(void *dma_addr_ret, bus_dma_segment_t *segs, int nseg, int error) { *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && __predict_true(nseg == 1)) ? segs->ds_addr : 0); } /* * tso_start_new_packet - generate a new header and prepare for the new packet * * Generate a new header and prepare for the new packet. Return 0 on * success, or an error code if failed to alloc header. */ static int tso_start_new_packet(struct sfxge_txq *txq, struct sfxge_tso_state *tso, unsigned int *idp) { unsigned int id = *idp; struct tcphdr *tsoh_th; unsigned ip_length; caddr_t header; uint64_t dma_addr; bus_dmamap_t map; efx_desc_t *desc; int rc; if (tso->fw_assisted) { if (tso->fw_assisted & SFXGE_FATSOV2) { /* Add 2 FATSOv2 option descriptors */ desc = &txq->pend_desc[txq->n_pend_desc]; efx_tx_qdesc_tso2_create(txq->common, tso->packet_id, tso->seqnum, tso->seg_size, desc, EFX_TX_FATSOV2_OPT_NDESCS); desc += EFX_TX_FATSOV2_OPT_NDESCS; txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask; tso->segs_space = EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1; } else { uint8_t tcp_flags = tso->tcp_flags; if (tso->out_len > tso->seg_size) tcp_flags &= ~(TH_FIN | TH_PUSH); /* Add FATSOv1 option descriptor */ desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_tso_create(txq->common, tso->packet_id, tso->seqnum, tcp_flags, desc++); KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; tso->seqnum += tso->seg_size; tso->segs_space = UINT_MAX; } /* Header DMA descriptor */ *desc = tso->header_desc; txq->n_pend_desc++; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; } else { /* Allocate a DMA-mapped header buffer. */ if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { unsigned int page_index = (id / 2) / TSOH_PER_PAGE; unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; header = (txq->tsoh_buffer[page_index].esm_base + buf_index * TSOH_STD_SIZE); dma_addr = (txq->tsoh_buffer[page_index].esm_addr + buf_index * TSOH_STD_SIZE); map = txq->tsoh_buffer[page_index].esm_map; KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); } else { struct sfxge_tx_mapping *stmp = &txq->stmp[id]; /* We cannot use bus_dmamem_alloc() as that may sleep */ header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); if (__predict_false(!header)) return (ENOMEM); rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, header, tso->header_len, tso_map_long_header, &dma_addr, BUS_DMA_NOWAIT); if (__predict_false(dma_addr == 0)) { if (rc == 0) { /* Succeeded but got >1 segment */ bus_dmamap_unload(txq->packet_dma_tag, stmp->map); rc = EINVAL; } free(header, M_SFXGE); return (rc); } map = stmp->map; txq->tso_long_headers++; stmp->u.heap_buf = header; stmp->flags = TX_BUF_UNMAP; } tsoh_th = (struct tcphdr *)(header + tso->tcph_off); /* Copy and update the headers. */ m_copydata(tso->mbuf, 0, tso->header_len, header); tsoh_th->th_seq = htonl(tso->seqnum); tso->seqnum += tso->seg_size; if (tso->out_len > tso->seg_size) { /* This packet will not finish the TSO burst. */ ip_length = tso->header_len - tso->nh_off + tso->seg_size; tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); } else { /* This packet will be the last in the TSO burst. */ ip_length = tso->header_len - tso->nh_off + tso->out_len; } if (tso->protocol == htons(ETHERTYPE_IP)) { struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); tsoh_iph->ip_len = htons(ip_length); /* XXX We should increment ip_id, but FreeBSD doesn't * currently allocate extra IDs for multiple segments. */ } else { struct ip6_hdr *tsoh_iph = (struct ip6_hdr *)(header + tso->nh_off); tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); } /* Make the header visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); /* Form a descriptor for this header. */ desc = &txq->pend_desc[txq->n_pend_desc++]; efx_tx_qdesc_dma_create(txq->common, dma_addr, tso->header_len, 0, desc); id = (id + 1) & txq->ptr_mask; tso->segs_space = UINT_MAX; } tso->packet_space = tso->seg_size; txq->tso_packets++; *idp = id; return (0); } static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg, int vlan_tagged) { struct sfxge_tso_state tso; unsigned int id; unsigned skipped = 0; tso_start(txq, &tso, dma_seg, mbuf); while (dma_seg->ds_len + skipped <= tso.header_len) { skipped += dma_seg->ds_len; --n_dma_seg; KASSERT(n_dma_seg, ("no payload found in TSO packet")); ++dma_seg; } tso.in_len = dma_seg->ds_len - (tso.header_len - skipped); tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); id = (txq->added + vlan_tagged) & txq->ptr_mask; if (__predict_false(tso_start_new_packet(txq, &tso, &id))) return (-1); while (1) { tso_fill_packet_with_fragment(txq, &tso); /* Exactly one DMA descriptor is added */ KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0")); id = (id + 1) & txq->ptr_mask; /* Move onto the next fragment? */ if (tso.in_len == 0) { --n_dma_seg; if (n_dma_seg == 0) break; ++dma_seg; tso.in_len = dma_seg->ds_len; tso.dma_addr = dma_seg->ds_addr; } /* End of packet? */ if ((tso.packet_space == 0) | (tso.segs_space == 0)) { unsigned int n_fatso_opt_desc = (tso.fw_assisted & SFXGE_FATSOV2) ? EFX_TX_FATSOV2_OPT_NDESCS : (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0; /* If the queue is now full due to tiny MSS, * or we can't create another header, discard * the remainder of the input mbuf but do not * roll back the work we have done. */ if (txq->n_pend_desc + n_fatso_opt_desc + 1 /* header */ + n_dma_seg > txq->max_pkt_desc) { txq->tso_pdrop_too_many++; break; } if (__predict_false(tso_start_new_packet(txq, &tso, &id))) { txq->tso_pdrop_no_rsrc++; break; } } } txq->tso_bursts++; return (id); } static void sfxge_tx_qunblock(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_evq *evq; sc = txq->sc; evq = sc->evq[txq->evq_index]; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) return; SFXGE_TXQ_LOCK(txq); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) { /* reaped must be in sync with blocked */ sfxge_tx_qreap(txq); txq->blocked = 0; } } sfxge_tx_qdpl_service(txq); /* note: lock has been dropped */ } void sfxge_tx_qflush_done(struct sfxge_txq *txq) { txq->flush_state = SFXGE_FLUSH_DONE; } static void sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; struct sfxge_evq *evq; unsigned int count; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); txq = sc->txq[index]; evq = sc->evq[txq->evq_index]; SFXGE_EVQ_LOCK(evq); SFXGE_TXQ_LOCK(txq); KASSERT(txq->init_state == SFXGE_TXQ_STARTED, ("txq->init_state != SFXGE_TXQ_STARTED")); txq->init_state = SFXGE_TXQ_INITIALIZED; if (txq->flush_state != SFXGE_FLUSH_DONE) { txq->flush_state = SFXGE_FLUSH_PENDING; SFXGE_EVQ_UNLOCK(evq); SFXGE_TXQ_UNLOCK(txq); /* Flush the transmit queue. */ if (efx_tx_qflush(txq->common) != 0) { log(LOG_ERR, "%s: Flushing Tx queue %u failed\n", device_get_nameunit(sc->dev), index); txq->flush_state = SFXGE_FLUSH_DONE; } else { count = 0; do { /* Spin for 100ms. */ DELAY(100000); if (txq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); } SFXGE_EVQ_LOCK(evq); SFXGE_TXQ_LOCK(txq); KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, ("txq->flush_state == SFXGE_FLUSH_FAILED")); if (txq->flush_state != SFXGE_FLUSH_DONE) { /* Flush timeout */ log(LOG_ERR, "%s: Cannot flush Tx queue %u\n", device_get_nameunit(sc->dev), index); txq->flush_state = SFXGE_FLUSH_DONE; } } txq->blocked = 0; txq->pending = txq->added; sfxge_tx_qcomplete(txq, evq); KASSERT(txq->completed == txq->added, ("txq->completed != txq->added")); sfxge_tx_qreap(txq); KASSERT(txq->reaped == txq->completed, ("txq->reaped != txq->completed")); txq->added = 0; txq->pending = 0; txq->completed = 0; txq->reaped = 0; /* Destroy the common code transmit queue. */ efx_tx_qdestroy(txq->common); txq->common = NULL; efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); SFXGE_EVQ_UNLOCK(evq); SFXGE_TXQ_UNLOCK(txq); } /* * Estimate maximum number of Tx descriptors required for TSO packet. * With minimum MSS and maximum mbuf length we might need more (even * than a ring-ful of descriptors), but this should not happen in * practice except due to deliberate attack. In that case we will * truncate the output at a packet boundary. */ static unsigned int sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type, unsigned int tso_fw_assisted) { /* One descriptor for every input fragment */ unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG; unsigned int sw_tso_max_descs; unsigned int fa_tso_v1_max_descs = 0; unsigned int fa_tso_v2_max_descs = 0; /* VLAN tagging Tx option descriptor may be required */ if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled) max_descs++; if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) { /* * Plus header and payload descriptor for each output segment. * Minus one since header fragment is already counted. * Even if FATSO is used, we should be ready to fallback * to do it in the driver. */ sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1; /* FW assisted TSOv1 requires one more descriptor per segment * in comparison to SW TSO */ if (tso_fw_assisted & SFXGE_FATSOV1) fa_tso_v1_max_descs = sw_tso_max_descs + SFXGE_TSO_MAX_SEGS; /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra * descriptors per superframe limited by number of DMA fetches * per packet. The first packet header is already counted. */ if (tso_fw_assisted & SFXGE_FATSOV2) { fa_tso_v2_max_descs = howmany(SFXGE_TX_MAPPING_MAX_SEG, EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) * (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1; } max_descs += MAX(sw_tso_max_descs, MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs)); } return (max_descs); } static int sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; efsys_mem_t *esmp; uint16_t flags; unsigned int tso_fw_assisted; struct sfxge_evq *evq; unsigned int desc_index; int rc; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); txq = sc->txq[index]; esmp = &txq->mem; evq = sc->evq[txq->evq_index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, EFX_TXQ_NBUFS(sc->txq_entries))) != 0) return (rc); /* Determine the kind of queue we are creating. */ tso_fw_assisted = 0; switch (txq->type) { case SFXGE_TXQ_NON_CKSUM: flags = 0; break; case SFXGE_TXQ_IP_CKSUM: flags = EFX_TXQ_CKSUM_IPV4; break; case SFXGE_TXQ_IP_TCP_UDP_CKSUM: flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; tso_fw_assisted = sc->tso_fw_assisted; if (tso_fw_assisted & SFXGE_FATSOV2) flags |= EFX_TXQ_FATSOV2; break; default: KASSERT(0, ("Impossible TX queue")); flags = 0; break; } /* Create the common code transmit queue. */ if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, sc->txq_entries, txq->buf_base_id, flags, evq->common, &txq->common, &desc_index)) != 0) { /* Retry if no FATSOv2 resources, otherwise fail */ if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2)) goto fail; /* Looks like all FATSOv2 contexts are used */ flags &= ~EFX_TXQ_FATSOV2; tso_fw_assisted &= ~SFXGE_FATSOV2; if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, sc->txq_entries, txq->buf_base_id, flags, evq->common, &txq->common, &desc_index)) != 0) goto fail; } /* Initialise queue descriptor indexes */ txq->added = txq->pending = txq->completed = txq->reaped = desc_index; SFXGE_TXQ_LOCK(txq); /* Enable the transmit queue. */ efx_tx_qenable(txq->common); txq->init_state = SFXGE_TXQ_STARTED; txq->flush_state = SFXGE_FLUSH_REQUIRED; txq->tso_fw_assisted = tso_fw_assisted; txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type, tso_fw_assisted); SFXGE_TXQ_UNLOCK(txq); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); return (rc); } void sfxge_tx_stop(struct sfxge_softc *sc) { int index; index = sc->txq_count; while (--index >= 0) sfxge_tx_qstop(sc, index); /* Tear down the transmit module */ efx_tx_fini(sc->enp); } int sfxge_tx_start(struct sfxge_softc *sc) { int index; int rc; /* Initialize the common code transmit module. */ if ((rc = efx_tx_init(sc->enp)) != 0) return (rc); for (index = 0; index < sc->txq_count; index++) { if ((rc = sfxge_tx_qstart(sc, index)) != 0) goto fail; } return (0); fail: while (--index >= 0) sfxge_tx_qstop(sc, index); efx_tx_fini(sc->enp); return (rc); } static int sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev); struct sysctl_oid *stat_node; unsigned int id; stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, "stats", CTLFLAG_RD, NULL, "Tx queue statistics"); if (stat_node == NULL) return (ENOMEM); for (id = 0; id < nitems(sfxge_tx_stats); id++) { SYSCTL_ADD_ULONG( ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO, sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS, (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset), ""); } return (0); } /** * Destroy a transmit queue. */ static void sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; unsigned int nmaps; txq = sc->txq[index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) tso_fini(txq); /* Free the context arrays. */ free(txq->pend_desc, M_SFXGE); nmaps = sc->txq_entries; while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); /* Release DMA memory mapping. */ sfxge_dma_free(&txq->mem); sc->txq[index] = NULL; SFXGE_TXQ_LOCK_DESTROY(txq); free(txq, M_SFXGE); } static int sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, enum sfxge_txq_type type, unsigned int evq_index) { char name[16]; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *txq_node; struct sfxge_txq *txq; struct sfxge_evq *evq; struct sfxge_tx_dpl *stdp; struct sysctl_oid *dpl_node; efsys_mem_t *esmp; unsigned int nmaps; int rc; txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); txq->sc = sc; txq->entries = sc->txq_entries; txq->ptr_mask = txq->entries - 1; sc->txq[txq_index] = txq; esmp = &txq->mem; evq = sc->evq[evq_index]; /* Allocate and zero DMA space for the descriptor ring. */ if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), &txq->buf_base_id); /* Create a DMA tag for packet mappings. */ if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, &txq->packet_dma_tag) != 0) { device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); rc = ENOMEM; goto fail; } /* Allocate pending descriptor array for batching writes. */ txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); /* Allocate and initialise mbuf DMA mapping array. */ txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { rc = bus_dmamap_create(txq->packet_dma_tag, 0, &txq->stmp[nmaps].map); if (rc != 0) goto fail2; } snprintf(name, sizeof(name), "%u", txq_index); txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node), OID_AUTO, name, CTLFLAG_RD, NULL, ""); if (txq_node == NULL) { rc = ENOMEM; goto fail_txq_node; } if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && (rc = tso_init(txq)) != 0) goto fail3; if (sfxge_tx_dpl_get_max <= 0) { log(LOG_ERR, "%s=%d must be greater than 0", SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); rc = EINVAL; goto fail_tx_dpl_get_max; } if (sfxge_tx_dpl_get_non_tcp_max <= 0) { log(LOG_ERR, "%s=%d must be greater than 0", SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, sfxge_tx_dpl_get_non_tcp_max); rc = EINVAL; goto fail_tx_dpl_get_max; } if (sfxge_tx_dpl_put_max < 0) { log(LOG_ERR, "%s=%d must be greater or equal to 0", SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); rc = EINVAL; goto fail_tx_dpl_put_max; } /* Initialize the deferred packet list. */ stdp = &txq->dpl; stdp->std_put_max = sfxge_tx_dpl_put_max; stdp->std_get_max = sfxge_tx_dpl_get_max; stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; stdp->std_getp = &stdp->std_get; SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index); dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO, "dpl", CTLFLAG_RD, NULL, "Deferred packet list statistics"); if (dpl_node == NULL) { rc = ENOMEM; goto fail_dpl_node; } SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_count", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_count, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_non_tcp_count, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_hiwat, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO, "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_put_hiwat, 0, ""); rc = sfxge_txq_stat_init(txq, txq_node); if (rc != 0) goto fail_txq_stat_init; txq->type = type; txq->evq_index = evq_index; txq->txq_index = txq_index; txq->init_state = SFXGE_TXQ_INITIALIZED; txq->hw_vlan_tci = 0; return (0); fail_txq_stat_init: fail_dpl_node: fail_tx_dpl_put_max: fail_tx_dpl_get_max: fail3: fail_txq_node: free(txq->pend_desc, M_SFXGE); fail2: while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); bus_dma_tag_destroy(txq->packet_dma_tag); fail: sfxge_dma_free(esmp); return (rc); } static int sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned long sum; unsigned int index; /* Sum across all TX queues */ sum = 0; for (index = 0; index < sc->txq_count; index++) sum += *(unsigned long *)((caddr_t)sc->txq[index] + sfxge_tx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_tx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < nitems(sfxge_tx_stats); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_tx_stats[id].name, CTLTYPE_ULONG|CTLFLAG_RD, sc, id, sfxge_tx_stat_handler, "LU", ""); } } uint64_t sfxge_tx_get_drops(struct sfxge_softc *sc) { unsigned int index; uint64_t drops = 0; struct sfxge_txq *txq; /* Sum across all TX queues */ for (index = 0; index < sc->txq_count; index++) { txq = sc->txq[index]; /* * In theory, txq->put_overflow and txq->netdown_drops * should use atomic operation and other should be * obtained under txq lock, but it is just statistics. */ drops += txq->drops + txq->get_overflow + txq->get_non_tcp_overflow + txq->put_overflow + txq->netdown_drops + txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc; } return (drops); } void sfxge_tx_fini(struct sfxge_softc *sc) { int index; index = sc->txq_count; while (--index >= 0) sfxge_tx_qfini(sc, index); sc->txq_count = 0; } int sfxge_tx_init(struct sfxge_softc *sc) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); struct sfxge_intr *intr; int index; int rc; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc; sc->tso_fw_assisted = sfxge_tso_fw_assisted; if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) || (!encp->enc_fw_assisted_tso_enabled)) sc->tso_fw_assisted &= ~SFXGE_FATSOV1; if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) || (!encp->enc_fw_assisted_tso_v2_enabled)) sc->tso_fw_assisted &= ~SFXGE_FATSOV2; sc->txqs_node = SYSCTL_ADD_NODE( device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); if (sc->txqs_node == NULL) { rc = ENOMEM; goto fail_txq_node; } /* Initialize the transmit queues */ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, SFXGE_TXQ_NON_CKSUM, 0)) != 0) goto fail; if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, SFXGE_TXQ_IP_CKSUM, 0)) != 0) goto fail2; for (index = 0; index < sc->txq_count - SFXGE_TXQ_NTYPES + 1; index++) { if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index, SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) goto fail3; } sfxge_tx_stat_init(sc); return (0); fail3: while (--index >= 0) sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); fail2: sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); fail: fail_txq_node: sc->txq_count = 0; return (rc); } diff --git a/sys/dev/siba/siba_core.c b/sys/dev/siba/siba_core.c index 6d32531c3880..92c54d258806 100644 --- a/sys/dev/siba/siba_core.c +++ b/sys/dev/siba/siba_core.c @@ -1,2577 +1,2577 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * the Sonics Silicon Backplane driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SIBA_DEBUG enum { SIBA_DEBUG_SCAN = 0x00000001, /* scan */ SIBA_DEBUG_PMU = 0x00000002, /* PMU */ SIBA_DEBUG_PLL = 0x00000004, /* PLL */ SIBA_DEBUG_SWITCHCORE = 0x00000008, /* switching core */ SIBA_DEBUG_SPROM = 0x00000010, /* SPROM */ SIBA_DEBUG_CORE = 0x00000020, /* handling cores */ SIBA_DEBUG_ANY = 0xffffffff }; #define DPRINTF(siba, m, fmt, ...) do { \ if (siba->siba_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(siba, m, fmt, ...) do { (void) siba; } while (0) #endif #define N(a) (sizeof(a) / sizeof(a[0])) static void siba_pci_gpio(struct siba_softc *, uint32_t, int); static void siba_scan(struct siba_softc *); static int siba_switchcore(struct siba_softc *, uint8_t); static int siba_pci_switchcore_sub(struct siba_softc *, uint8_t); static uint32_t siba_scan_read_4(struct siba_softc *, uint8_t, uint16_t); static uint16_t siba_dev2chipid(struct siba_softc *); static uint16_t siba_pci_read_2(struct siba_dev_softc *, uint16_t); static uint32_t siba_pci_read_4(struct siba_dev_softc *, uint16_t); static void siba_pci_write_2(struct siba_dev_softc *, uint16_t, uint16_t); static void siba_pci_write_4(struct siba_dev_softc *, uint16_t, uint32_t); static void siba_cc_clock(struct siba_cc *, enum siba_clock); static void siba_cc_pmu_init(struct siba_cc *); static void siba_cc_power_init(struct siba_cc *); static void siba_cc_powerup_delay(struct siba_cc *); static int siba_cc_clockfreq(struct siba_cc *, int); static void siba_cc_pmu1_pll0_init(struct siba_cc *, uint32_t); static void siba_cc_pmu0_pll0_init(struct siba_cc *, uint32_t); static enum siba_clksrc siba_cc_clksrc(struct siba_cc *); static const struct siba_cc_pmu1_plltab *siba_cc_pmu1_plltab_find(uint32_t); static uint32_t siba_cc_pll_read(struct siba_cc *, uint32_t); static void siba_cc_pll_write(struct siba_cc *, uint32_t, uint32_t); static const struct siba_cc_pmu0_plltab * siba_cc_pmu0_plltab_findentry(uint32_t); static int siba_pci_sprom(struct siba_softc *, struct siba_sprom *); static int siba_sprom_read(struct siba_softc *, uint16_t *, uint16_t); static int sprom_check_crc(const uint16_t *, size_t); static uint8_t siba_crc8(uint8_t, uint8_t); static void siba_sprom_r123(struct siba_sprom *, const uint16_t *); static void siba_sprom_r45(struct siba_sprom *, const uint16_t *); static void siba_sprom_r8(struct siba_sprom *, const uint16_t *); static int8_t siba_sprom_r123_antgain(uint8_t, const uint16_t *, uint16_t, uint16_t); static uint32_t siba_tmslow_reject_bitmask(struct siba_dev_softc *); static uint32_t siba_pcicore_read_4(struct siba_pci *, uint16_t); static void siba_pcicore_write_4(struct siba_pci *, uint16_t, uint32_t); static uint32_t siba_pcie_read(struct siba_pci *, uint32_t); static void siba_pcie_write(struct siba_pci *, uint32_t, uint32_t); static void siba_pcie_mdio_write(struct siba_pci *, uint8_t, uint8_t, uint16_t); static void siba_pci_read_multi_1(struct siba_dev_softc *, void *, size_t, uint16_t); static void siba_pci_read_multi_2(struct siba_dev_softc *, void *, size_t, uint16_t); static void siba_pci_read_multi_4(struct siba_dev_softc *, void *, size_t, uint16_t); static void siba_pci_write_multi_1(struct siba_dev_softc *, const void *, size_t, uint16_t); static void siba_pci_write_multi_2(struct siba_dev_softc *, const void *, size_t, uint16_t); static void siba_pci_write_multi_4(struct siba_dev_softc *, const void *, size_t, uint16_t); static const char *siba_core_name(uint16_t); static void siba_pcicore_init(struct siba_pci *); static uint32_t siba_read_4_sub(struct siba_dev_softc *, uint16_t); static void siba_write_4_sub(struct siba_dev_softc *, uint16_t, uint32_t); static void siba_powerup_sub(struct siba_softc *, int); static int siba_powerdown_sub(struct siba_softc *); static int siba_dev_isup_sub(struct siba_dev_softc *); static void siba_dev_up_sub(struct siba_dev_softc *, uint32_t); static void siba_dev_down_sub(struct siba_dev_softc *, uint32_t); int siba_core_attach(struct siba_softc *); int siba_core_detach(struct siba_softc *); int siba_core_suspend(struct siba_softc *); int siba_core_resume(struct siba_softc *); uint8_t siba_getncores(device_t, uint16_t); static const struct siba_bus_ops siba_pci_ops = { .read_2 = siba_pci_read_2, .read_4 = siba_pci_read_4, .write_2 = siba_pci_write_2, .write_4 = siba_pci_write_4, .read_multi_1 = siba_pci_read_multi_1, .read_multi_2 = siba_pci_read_multi_2, .read_multi_4 = siba_pci_read_multi_4, .write_multi_1 = siba_pci_write_multi_1, .write_multi_2 = siba_pci_write_multi_2, .write_multi_4 = siba_pci_write_multi_4, }; static const struct siba_cc_pmu_res_updown siba_cc_pmu_4325_updown[] = SIBA_CC_PMU_4325_RES_UPDOWN; static const struct siba_cc_pmu_res_depend siba_cc_pmu_4325_depend[] = SIBA_CC_PMU_4325_RES_DEPEND; static const struct siba_cc_pmu_res_updown siba_cc_pmu_4328_updown[] = SIBA_CC_PMU_4328_RES_UPDOWN; static const struct siba_cc_pmu_res_depend siba_cc_pmu_4328_depend[] = SIBA_CC_PMU_4328_RES_DEPEND; static const struct siba_cc_pmu0_plltab siba_cc_pmu0_plltab[] = SIBA_CC_PMU0_PLLTAB_ENTRY; static const struct siba_cc_pmu1_plltab siba_cc_pmu1_plltab[] = SIBA_CC_PMU1_PLLTAB_ENTRY; int siba_core_attach(struct siba_softc *siba) { struct siba_cc *scc; int error; KASSERT(siba->siba_type == SIBA_TYPE_PCI, ("unsupported BUS type (%#x)", siba->siba_type)); siba->siba_ops = &siba_pci_ops; siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL | SIBA_GPIO_PLL, 1); siba_scan(siba); /* XXX init PCI or PCMCIA host devices */ siba_powerup_sub(siba, 0); /* init ChipCommon */ scc = &siba->siba_cc; if (scc->scc_dev != NULL) { siba_cc_pmu_init(scc); siba_cc_power_init(scc); siba_cc_clock(scc, SIBA_CLOCK_FAST); siba_cc_powerup_delay(scc); } error = siba_pci_sprom(siba, &siba->siba_sprom); if (error) { siba_powerdown_sub(siba); return (error); } siba_pcicore_init(&siba->siba_pci); siba_powerdown_sub(siba); return (bus_generic_attach(siba->siba_dev)); } int siba_core_detach(struct siba_softc *siba) { /* detach & delete all children */ device_delete_children(siba->siba_dev); return (0); } static void siba_pci_gpio(struct siba_softc *siba, uint32_t what, int on) { uint32_t in, out; uint16_t status; if (siba->siba_type != SIBA_TYPE_PCI) return; out = pci_read_config(siba->siba_dev, SIBA_GPIO_OUT, 4); if (on == 0) { if (what & SIBA_GPIO_PLL) out |= SIBA_GPIO_PLL; if (what & SIBA_GPIO_CRYSTAL) out &= ~SIBA_GPIO_CRYSTAL; pci_write_config(siba->siba_dev, SIBA_GPIO_OUT, out, 4); pci_write_config(siba->siba_dev, SIBA_GPIO_OUT_EN, pci_read_config(siba->siba_dev, SIBA_GPIO_OUT_EN, 4) | what, 4); return; } in = pci_read_config(siba->siba_dev, SIBA_GPIO_IN, 4); if ((in & SIBA_GPIO_CRYSTAL) != SIBA_GPIO_CRYSTAL) { if (what & SIBA_GPIO_CRYSTAL) { out |= SIBA_GPIO_CRYSTAL; if (what & SIBA_GPIO_PLL) out |= SIBA_GPIO_PLL; pci_write_config(siba->siba_dev, SIBA_GPIO_OUT, out, 4); pci_write_config(siba->siba_dev, SIBA_GPIO_OUT_EN, pci_read_config(siba->siba_dev, SIBA_GPIO_OUT_EN, 4) | what, 4); DELAY(1000); } if (what & SIBA_GPIO_PLL) { out &= ~SIBA_GPIO_PLL; pci_write_config(siba->siba_dev, SIBA_GPIO_OUT, out, 4); DELAY(5000); } } status = pci_read_config(siba->siba_dev, PCIR_STATUS, 2); status &= ~PCIM_STATUS_STABORT; pci_write_config(siba->siba_dev, PCIR_STATUS, status, 2); } static void siba_scan(struct siba_softc *siba) { struct siba_dev_softc *sd; uint32_t idhi, tmp; device_t child; int base, dev_i = 0, error, i, is_pcie, n_80211 = 0, n_cc = 0, n_pci = 0; KASSERT(siba->siba_type == SIBA_TYPE_PCI, ("unsupported BUS type (%#x)", siba->siba_type)); siba->siba_ndevs = 0; error = siba_switchcore(siba, 0); /* need the first core */ if (error) return; idhi = siba_scan_read_4(siba, 0, SIBA_IDHIGH); if (SIBA_IDHIGH_CORECODE(idhi) == SIBA_DEVID_CHIPCOMMON) { tmp = siba_scan_read_4(siba, 0, SIBA_CC_CHIPID); siba->siba_chipid = SIBA_CC_ID(tmp); siba->siba_chiprev = SIBA_CC_REV(tmp); siba->siba_chippkg = SIBA_CC_PKG(tmp); if (SIBA_IDHIGH_REV(idhi) >= 4) siba->siba_ndevs = SIBA_CC_NCORES(tmp); siba->siba_cc.scc_caps = siba_scan_read_4(siba, 0, SIBA_CC_CAPS); } else { if (siba->siba_type == SIBA_TYPE_PCI) { siba->siba_chipid = siba_dev2chipid(siba); siba->siba_chiprev = pci_read_config(siba->siba_dev, PCIR_REVID, 2); siba->siba_chippkg = 0; } else { siba->siba_chipid = 0x4710; siba->siba_chiprev = 0; siba->siba_chippkg = 0; } } if (siba->siba_ndevs == 0) siba->siba_ndevs = siba_getncores(siba->siba_dev, siba->siba_chipid); if (siba->siba_ndevs > SIBA_MAX_CORES) { device_printf(siba->siba_dev, "too many siba cores (max %d %d)\n", SIBA_MAX_CORES, siba->siba_ndevs); return; } /* looking basic information about each cores/devices */ for (i = 0; i < siba->siba_ndevs; i++) { error = siba_switchcore(siba, i); if (error) return; sd = &(siba->siba_devs[dev_i]); idhi = siba_scan_read_4(siba, i, SIBA_IDHIGH); sd->sd_bus = siba; sd->sd_id.sd_device = SIBA_IDHIGH_CORECODE(idhi); sd->sd_id.sd_rev = SIBA_IDHIGH_REV(idhi); sd->sd_id.sd_vendor = SIBA_IDHIGH_VENDOR(idhi); sd->sd_ops = siba->siba_ops; sd->sd_coreidx = i; DPRINTF(siba, SIBA_DEBUG_SCAN, "core %d (%s) found (cc %#xrev %#x vendor %#x)\n", i, siba_core_name(sd->sd_id.sd_device), sd->sd_id.sd_device, sd->sd_id.sd_rev, sd->sd_id.vendor); switch (sd->sd_id.sd_device) { case SIBA_DEVID_CHIPCOMMON: n_cc++; if (n_cc > 1) { device_printf(siba->siba_dev, "warn: multiple ChipCommon\n"); break; } siba->siba_cc.scc_dev = sd; break; case SIBA_DEVID_80211: n_80211++; if (n_80211 > 1) { device_printf(siba->siba_dev, "warn: multiple 802.11 core\n"); continue; } break; case SIBA_DEVID_PCI: case SIBA_DEVID_PCIE: n_pci++; error = pci_find_cap(siba->siba_dev, PCIY_EXPRESS, &base); is_pcie = (error == 0) ? 1 : 0; if (n_pci > 1) { device_printf(siba->siba_dev, "warn: multiple PCI(E) cores\n"); break; } if (sd->sd_id.sd_device == SIBA_DEVID_PCI && is_pcie == 1) continue; if (sd->sd_id.sd_device == SIBA_DEVID_PCIE && is_pcie == 0) continue; siba->siba_pci.spc_dev = sd; break; case SIBA_DEVID_MODEM: case SIBA_DEVID_PCMCIA: break; default: device_printf(siba->siba_dev, "unsupported coreid (%s)\n", siba_core_name(sd->sd_id.sd_device)); break; } dev_i++; child = device_add_child(siba->siba_dev, NULL, -1); if (child == NULL) { device_printf(siba->siba_dev, "child attach failed\n"); continue; } device_set_ivars(child, sd); } siba->siba_ndevs = dev_i; } static int siba_switchcore(struct siba_softc *siba, uint8_t idx) { switch (siba->siba_type) { case SIBA_TYPE_PCI: return (siba_pci_switchcore_sub(siba, idx)); default: KASSERT(0 == 1, ("%s: unsupported bustype %#x", __func__, siba->siba_type)); } return (0); } static int siba_pci_switchcore_sub(struct siba_softc *siba, uint8_t idx) { #define RETRY_MAX 50 int i; uint32_t dir; dir = SIBA_REGWIN(idx); for (i = 0; i < RETRY_MAX; i++) { pci_write_config(siba->siba_dev, SIBA_BAR0, dir, 4); if (pci_read_config(siba->siba_dev, SIBA_BAR0, 4) == dir) return (0); DELAY(10); } return (ENODEV); #undef RETRY_MAX } static int siba_pci_switchcore(struct siba_softc *siba, struct siba_dev_softc *sd) { int error; DPRINTF(siba, SIBA_DEBUG_SWITCHCORE, "Switching to %s core, index %d\n", siba_core_name(sd->sd_id.sd_device), sd->sd_coreidx); error = siba_pci_switchcore_sub(siba, sd->sd_coreidx); if (error == 0) siba->siba_curdev = sd; return (error); } static uint32_t siba_scan_read_4(struct siba_softc *siba, uint8_t coreidx, uint16_t offset) { (void)coreidx; KASSERT(siba->siba_type == SIBA_TYPE_PCI, ("unsupported BUS type (%#x)", siba->siba_type)); return (SIBA_READ_4(siba, offset)); } static uint16_t siba_dev2chipid(struct siba_softc *siba) { uint16_t chipid = 0; switch (siba->siba_pci_did) { case 0x4301: chipid = 0x4301; break; case 0x4305: case 0x4306: case 0x4307: chipid = 0x4307; break; case 0x4403: chipid = 0x4402; break; case 0x4610: case 0x4611: case 0x4612: case 0x4613: case 0x4614: case 0x4615: chipid = 0x4610; break; case 0x4710: case 0x4711: case 0x4712: case 0x4713: case 0x4714: case 0x4715: chipid = 0x4710; break; case 0x4320: case 0x4321: case 0x4322: case 0x4323: case 0x4324: case 0x4325: chipid = 0x4309; break; case PCI_DEVICE_ID_BCM4401: case PCI_DEVICE_ID_BCM4401B0: case PCI_DEVICE_ID_BCM4401B1: chipid = 0x4401; break; default: device_printf(siba->siba_dev, "unknown PCI did (%d)\n", siba->siba_pci_did); } return (chipid); } /* * Earlier ChipCommon revisions have hardcoded number of cores * present dependent on the ChipCommon ID. */ uint8_t siba_getncores(device_t dev, uint16_t chipid) { switch (chipid) { case 0x4401: case 0x4402: return (3); case 0x4301: case 0x4307: return (5); case 0x4306: return (6); case SIBA_CCID_SENTRY5: return (7); case 0x4310: return (8); case SIBA_CCID_BCM4710: case 0x4610: case SIBA_CCID_BCM4704: return (9); default: device_printf(dev, "unknown the chipset ID %#x\n", chipid); } return (1); } static const char * siba_core_name(uint16_t coreid) { switch (coreid) { case SIBA_DEVID_CHIPCOMMON: return ("ChipCommon"); case SIBA_DEVID_ILINE20: return ("ILine 20"); case SIBA_DEVID_SDRAM: return ("SDRAM"); case SIBA_DEVID_PCI: return ("PCI"); case SIBA_DEVID_MIPS: return ("MIPS"); case SIBA_DEVID_ETHERNET: return ("Fast Ethernet"); case SIBA_DEVID_MODEM: return ("Modem"); case SIBA_DEVID_USB11_HOSTDEV: return ("USB 1.1 Hostdev"); case SIBA_DEVID_ADSL: return ("ADSL"); case SIBA_DEVID_ILINE100: return ("ILine 100"); case SIBA_DEVID_IPSEC: return ("IPSEC"); case SIBA_DEVID_PCMCIA: return ("PCMCIA"); case SIBA_DEVID_INTERNAL_MEM: return ("Internal Memory"); case SIBA_DEVID_SDRAMDDR: return ("MEMC SDRAM"); case SIBA_DEVID_EXTIF: return ("EXTIF"); case SIBA_DEVID_80211: return ("IEEE 802.11"); case SIBA_DEVID_MIPS_3302: return ("MIPS 3302"); case SIBA_DEVID_USB11_HOST: return ("USB 1.1 Host"); case SIBA_DEVID_USB11_DEV: return ("USB 1.1 Device"); case SIBA_DEVID_USB20_HOST: return ("USB 2.0 Host"); case SIBA_DEVID_USB20_DEV: return ("USB 2.0 Device"); case SIBA_DEVID_SDIO_HOST: return ("SDIO Host"); case SIBA_DEVID_ROBOSWITCH: return ("Roboswitch"); case SIBA_DEVID_PARA_ATA: return ("PATA"); case SIBA_DEVID_SATA_XORDMA: return ("SATA XOR-DMA"); case SIBA_DEVID_ETHERNET_GBIT: return ("GBit Ethernet"); case SIBA_DEVID_PCIE: return ("PCI-Express"); case SIBA_DEVID_MIMO_PHY: return ("MIMO PHY"); case SIBA_DEVID_SRAM_CTRLR: return ("SRAM Controller"); case SIBA_DEVID_MINI_MACPHY: return ("Mini MACPHY"); case SIBA_DEVID_ARM_1176: return ("ARM 1176"); case SIBA_DEVID_ARM_7TDMI: return ("ARM 7TDMI"); } return ("unknown"); } static uint16_t siba_pci_read_2(struct siba_dev_softc *sd, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return (0xffff); return (SIBA_READ_2(siba, offset)); } static uint32_t siba_pci_read_4(struct siba_dev_softc *sd, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return (0xffff); return (SIBA_READ_4(siba, offset)); } static void siba_pci_write_2(struct siba_dev_softc *sd, uint16_t offset, uint16_t value) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return; SIBA_WRITE_2(siba, offset, value); } static void siba_pci_write_4(struct siba_dev_softc *sd, uint16_t offset, uint32_t value) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return; SIBA_WRITE_4(siba, offset, value); } static void siba_pci_read_multi_1(struct siba_dev_softc *sd, void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) { memset(buffer, 0xff, count); return; } SIBA_READ_MULTI_1(siba, offset, buffer, count); } static void siba_pci_read_multi_2(struct siba_dev_softc *sd, void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) { memset(buffer, 0xff, count); return; } KASSERT(!(count & 1), ("%s:%d: fail", __func__, __LINE__)); SIBA_READ_MULTI_2(siba, offset, buffer, count >> 1); } static void siba_pci_read_multi_4(struct siba_dev_softc *sd, void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) { memset(buffer, 0xff, count); return; } KASSERT(!(count & 3), ("%s:%d: fail", __func__, __LINE__)); SIBA_READ_MULTI_4(siba, offset, buffer, count >> 2); } static void siba_pci_write_multi_1(struct siba_dev_softc *sd, const void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return; SIBA_WRITE_MULTI_1(siba, offset, buffer, count); } static void siba_pci_write_multi_2(struct siba_dev_softc *sd, const void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return; KASSERT(!(count & 1), ("%s:%d: fail", __func__, __LINE__)); SIBA_WRITE_MULTI_2(siba, offset, buffer, count >> 1); } static void siba_pci_write_multi_4(struct siba_dev_softc *sd, const void *buffer, size_t count, uint16_t offset) { struct siba_softc *siba = sd->sd_bus; if (siba->siba_curdev != sd && siba_pci_switchcore(siba, sd) != 0) return; KASSERT(!(count & 3), ("%s:%d: fail", __func__, __LINE__)); SIBA_WRITE_MULTI_4(siba, offset, buffer, count >> 2); } void siba_powerup(device_t dev, int dynamic) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; siba_powerup_sub(siba, dynamic); } static void siba_powerup_sub(struct siba_softc *siba, int dynamic) { siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL | SIBA_GPIO_PLL, 1); siba_cc_clock(&siba->siba_cc, (dynamic != 0) ? SIBA_CLOCK_DYNAMIC : SIBA_CLOCK_FAST); } static void siba_cc_clock(struct siba_cc *scc, enum siba_clock clock) { struct siba_dev_softc *sd = scc->scc_dev; struct siba_softc *siba; uint32_t tmp; if (sd == NULL) return; siba = sd->sd_bus; /* * chipcommon < r6 (no dynamic clock control) * chipcommon >= r10 (unknown) */ if (sd->sd_id.sd_rev < 6 || sd->sd_id.sd_rev >= 10 || (scc->scc_caps & SIBA_CC_CAPS_PWCTL) == 0) return; switch (clock) { case SIBA_CLOCK_DYNAMIC: tmp = SIBA_CC_READ32(scc, SIBA_CC_CLKSLOW) & ~(SIBA_CC_CLKSLOW_ENXTAL | SIBA_CC_CLKSLOW_FSLOW | SIBA_CC_CLKSLOW_IPLL); if ((tmp & SIBA_CC_CLKSLOW_SRC) != SIBA_CC_CLKSLOW_SRC_CRYSTAL) tmp |= SIBA_CC_CLKSLOW_ENXTAL; SIBA_CC_WRITE32(scc, SIBA_CC_CLKSLOW, tmp); if (tmp & SIBA_CC_CLKSLOW_ENXTAL) siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL, 0); break; case SIBA_CLOCK_SLOW: SIBA_CC_WRITE32(scc, SIBA_CC_CLKSLOW, SIBA_CC_READ32(scc, SIBA_CC_CLKSLOW) | SIBA_CC_CLKSLOW_FSLOW); break; case SIBA_CLOCK_FAST: /* crystal on */ siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL, 1); SIBA_CC_WRITE32(scc, SIBA_CC_CLKSLOW, (SIBA_CC_READ32(scc, SIBA_CC_CLKSLOW) | SIBA_CC_CLKSLOW_IPLL) & ~SIBA_CC_CLKSLOW_FSLOW); break; default: KASSERT(0 == 1, ("%s: unsupported clock %#x", __func__, clock)); } } uint16_t siba_read_2(device_t dev, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); return (sd->sd_ops->read_2(sd, offset)); } uint32_t siba_read_4(device_t dev, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); return (siba_read_4_sub(sd, offset)); } static uint32_t siba_read_4_sub(struct siba_dev_softc *sd, uint16_t offset) { return (sd->sd_ops->read_4(sd, offset)); } void siba_write_2(device_t dev, uint16_t offset, uint16_t value) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->write_2(sd, offset, value); } void siba_write_4(device_t dev, uint16_t offset, uint32_t value) { struct siba_dev_softc *sd = device_get_ivars(dev); return (siba_write_4_sub(sd, offset, value)); } static void siba_write_4_sub(struct siba_dev_softc *sd, uint16_t offset, uint32_t value) { sd->sd_ops->write_4(sd, offset, value); } void siba_read_multi_1(device_t dev, void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->read_multi_1(sd, buffer, count, offset); } void siba_read_multi_2(device_t dev, void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->read_multi_2(sd, buffer, count, offset); } void siba_read_multi_4(device_t dev, void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->read_multi_4(sd, buffer, count, offset); } void siba_write_multi_1(device_t dev, const void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->write_multi_1(sd, buffer, count, offset); } void siba_write_multi_2(device_t dev, const void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->write_multi_2(sd, buffer, count, offset); } void siba_write_multi_4(device_t dev, const void *buffer, size_t count, uint16_t offset) { struct siba_dev_softc *sd = device_get_ivars(dev); sd->sd_ops->write_multi_4(sd, buffer, count, offset); } static void siba_cc_pmu_init(struct siba_cc *scc) { const struct siba_cc_pmu_res_updown *updown = NULL; const struct siba_cc_pmu_res_depend *depend = NULL; struct siba_dev_softc *sd = scc->scc_dev; struct siba_softc *siba = sd->sd_bus; uint32_t min = 0, max = 0, pmucap; unsigned int i, updown_size, depend_size; if ((scc->scc_caps & SIBA_CC_CAPS_PMU) == 0) return; pmucap = SIBA_CC_READ32(scc, SIBA_CC_PMUCAPS); scc->scc_pmu.rev = (pmucap & SIBA_CC_PMUCAPS_REV); DPRINTF(siba, SIBA_DEBUG_PMU, "PMU(r%u) found (caps %#x)\n", scc->scc_pmu.rev, pmucap); if (scc->scc_pmu.rev >= 1) { if (siba->siba_chiprev < 2 && siba->siba_chipid == 0x4325) SIBA_CC_MASK32(scc, SIBA_CC_PMUCTL, ~SIBA_CC_PMUCTL_NOILP); else SIBA_CC_SET32(scc, SIBA_CC_PMUCTL, SIBA_CC_PMUCTL_NOILP); } /* initialize PLL & PMU resources */ switch (siba->siba_chipid) { case 0x4312: siba_cc_pmu1_pll0_init(scc, 0 /* use default */); /* use the default: min = 0xcbb max = 0x7ffff */ break; case 0x4325: siba_cc_pmu1_pll0_init(scc, 0 /* use default */); updown = siba_cc_pmu_4325_updown; updown_size = N(siba_cc_pmu_4325_updown); depend = siba_cc_pmu_4325_depend; depend_size = N(siba_cc_pmu_4325_depend); min = (1 << SIBA_CC_PMU_4325_BURST) | (1 << SIBA_CC_PMU_4325_LN); if (SIBA_CC_READ32(scc, SIBA_CC_CHIPSTAT) & SIBA_CC_CHST_4325_PMUTOP_2B) min |= (1 << SIBA_CC_PMU_4325_CLBURST); max = 0xfffff; break; case 0x4328: siba_cc_pmu0_pll0_init(scc, 0 /* use default */); updown = siba_cc_pmu_4328_updown; updown_size = N(siba_cc_pmu_4328_updown); depend = siba_cc_pmu_4328_depend; depend_size = N(siba_cc_pmu_4328_depend); min = (1 << SIBA_CC_PMU_4328_EXT_SWITCH_PWM) | (1 << SIBA_CC_PMU_4328_BB_SWITCH_PWM) | (1 << SIBA_CC_PMU_4328_CRYSTAL_EN); max = 0xfffff; break; case 0x5354: siba_cc_pmu0_pll0_init(scc, 0 /* use default */); max = 0xfffff; break; default: device_printf(siba->siba_dev, "unknown chipid %#x for PLL & PMU init\n", siba->siba_chipid); } if (updown) { for (i = 0; i < updown_size; i++) { SIBA_CC_WRITE32(scc, SIBA_CC_PMU_TABSEL, updown[i].res); SIBA_CC_WRITE32(scc, SIBA_CC_PMU_UPDNTM, updown[i].updown); } } if (depend) { for (i = 0; i < depend_size; i++) { SIBA_CC_WRITE32(scc, SIBA_CC_PMU_TABSEL, depend[i].res); switch (depend[i].task) { case SIBA_CC_PMU_DEP_SET: SIBA_CC_WRITE32(scc, SIBA_CC_PMU_DEPMSK, depend[i].depend); break; case SIBA_CC_PMU_DEP_ADD: SIBA_CC_SET32(scc, SIBA_CC_PMU_DEPMSK, depend[i].depend); break; case SIBA_CC_PMU_DEP_REMOVE: SIBA_CC_MASK32(scc, SIBA_CC_PMU_DEPMSK, ~(depend[i].depend)); break; default: KASSERT(0 == 1, ("%s:%d: assertion failed", __func__, __LINE__)); } } } if (min) SIBA_CC_WRITE32(scc, SIBA_CC_PMU_MINRES, min); if (max) SIBA_CC_WRITE32(scc, SIBA_CC_PMU_MAXRES, max); } static void siba_cc_power_init(struct siba_cc *scc) { struct siba_softc *siba = scc->scc_dev->sd_bus; int maxfreq; if (siba->siba_chipid == 0x4321) { if (siba->siba_chiprev == 0) SIBA_CC_WRITE32(scc, SIBA_CC_CHIPCTL, 0x3a4); else if (siba->siba_chiprev == 1) SIBA_CC_WRITE32(scc, SIBA_CC_CHIPCTL, 0xa4); } if ((scc->scc_caps & SIBA_CC_CAPS_PWCTL) == 0) return; if (scc->scc_dev->sd_id.sd_rev >= 10) SIBA_CC_WRITE32(scc, SIBA_CC_CLKSYSCTL, (SIBA_CC_READ32(scc, SIBA_CC_CLKSYSCTL) & 0xffff) | 0x40000); else { maxfreq = siba_cc_clockfreq(scc, 1); SIBA_CC_WRITE32(scc, SIBA_CC_PLLONDELAY, (maxfreq * 150 + 999999) / 1000000); SIBA_CC_WRITE32(scc, SIBA_CC_FREFSELDELAY, (maxfreq * 15 + 999999) / 1000000); } } static void siba_cc_powerup_delay(struct siba_cc *scc) { struct siba_softc *siba = scc->scc_dev->sd_bus; int min; if (siba->siba_type != SIBA_TYPE_PCI || !(scc->scc_caps & SIBA_CC_CAPS_PWCTL)) return; min = siba_cc_clockfreq(scc, 0); scc->scc_powerup_delay = - (((SIBA_CC_READ32(scc, SIBA_CC_PLLONDELAY) + 2) * 1000000) + - (min - 1)) / min; + howmany((SIBA_CC_READ32(scc, SIBA_CC_PLLONDELAY) + 2) * 1000000, + min); } static int siba_cc_clockfreq(struct siba_cc *scc, int max) { enum siba_clksrc src; int div = 1, limit = 0; src = siba_cc_clksrc(scc); if (scc->scc_dev->sd_id.sd_rev < 6) { div = (src == SIBA_CC_CLKSRC_PCI) ? 64 : (src == SIBA_CC_CLKSRC_CRYSTAL) ? 32 : 1; KASSERT(div != 1, ("%s: unknown clock %d", __func__, src)); } else if (scc->scc_dev->sd_id.sd_rev < 10) { switch (src) { case SIBA_CC_CLKSRC_CRYSTAL: case SIBA_CC_CLKSRC_PCI: div = ((SIBA_CC_READ32(scc, SIBA_CC_CLKSLOW) >> 16) + 1) * 4; break; case SIBA_CC_CLKSRC_LOWPW: break; } } else div = ((SIBA_CC_READ32(scc, SIBA_CC_CLKSYSCTL) >> 16) + 1) * 4; switch (src) { case SIBA_CC_CLKSRC_CRYSTAL: limit = (max) ? 20200000 : 19800000; break; case SIBA_CC_CLKSRC_LOWPW: limit = (max) ? 43000 : 25000; break; case SIBA_CC_CLKSRC_PCI: limit = (max) ? 34000000 : 25000000; break; } return (limit / div); } static void siba_cc_pmu1_pll0_init(struct siba_cc *scc, uint32_t freq) { struct siba_dev_softc *sd = scc->scc_dev; struct siba_softc *siba = sd->sd_bus; const struct siba_cc_pmu1_plltab *e = NULL; uint32_t bufsth = 0, pll, pmu; unsigned int i; KASSERT(freq == 0, ("%s:%d: assertion vail", __func__, __LINE__)); if (siba->siba_chipid == 0x4312) { scc->scc_pmu.freq = 20000; return; } e = siba_cc_pmu1_plltab_find(SIBA_CC_PMU1_DEFAULT_FREQ); KASSERT(e != NULL, ("%s:%d: assertion vail", __func__, __LINE__)); scc->scc_pmu.freq = e->freq; pmu = SIBA_CC_READ32(scc, SIBA_CC_PMUCTL); if (SIBA_CC_PMUCTL_XF_VAL(pmu) == e->xf) return; DPRINTF(siba, SIBA_DEBUG_PLL, "change PLL value to %u.%03u MHz\n", (e->freq / 1000), (e->freq % 1000)); /* turn PLL off */ switch (siba->siba_chipid) { case 0x4325: bufsth = 0x222222; SIBA_CC_MASK32(scc, SIBA_CC_PMU_MINRES, ~((1 << SIBA_CC_PMU_4325_BBPLL_PWR) | (1 << SIBA_CC_PMU_4325_HT))); SIBA_CC_MASK32(scc, SIBA_CC_PMU_MAXRES, ~((1 << SIBA_CC_PMU_4325_BBPLL_PWR) | (1 << SIBA_CC_PMU_4325_HT))); break; default: KASSERT(0 == 1, ("%s:%d: assertion failed", __func__, __LINE__)); } for (i = 0; i < 1500; i++) { if (!(SIBA_CC_READ32(scc, SIBA_CC_CLKCTLSTATUS) & SIBA_CC_CLKCTLSTATUS_HT)) break; DELAY(10); } if (SIBA_CC_READ32(scc, SIBA_CC_CLKCTLSTATUS) & SIBA_CC_CLKCTLSTATUS_HT) device_printf(siba->siba_dev, "failed to turn PLL off!\n"); pll = siba_cc_pll_read(scc, SIBA_CC_PMU1_PLL0); pll &= ~(SIBA_CC_PMU1_PLL0_P1DIV | SIBA_CC_PMU1_PLL0_P2DIV); pll |= ((uint32_t)e->p1div << 20) & SIBA_CC_PMU1_PLL0_P1DIV; pll |= ((uint32_t)e->p2div << 24) & SIBA_CC_PMU1_PLL0_P2DIV; siba_cc_pll_write(scc, SIBA_CC_PMU1_PLL0, pll); pll = siba_cc_pll_read(scc, SIBA_CC_PMU1_PLL2); pll &= ~(SIBA_CC_PMU1_PLL2_NDIVINT | SIBA_CC_PMU1_PLL2_NDIVMODE); pll |= ((uint32_t)e->ndiv_int << 20) & SIBA_CC_PMU1_PLL2_NDIVINT; pll |= (1 << 17) & SIBA_CC_PMU1_PLL2_NDIVMODE; siba_cc_pll_write(scc, SIBA_CC_PMU1_PLL2, pll); pll = siba_cc_pll_read(scc, SIBA_CC_PMU1_PLL3); pll &= ~SIBA_CC_PMU1_PLL3_NDIVFRAC; pll |= ((uint32_t)e->ndiv_frac << 0) & SIBA_CC_PMU1_PLL3_NDIVFRAC; siba_cc_pll_write(scc, SIBA_CC_PMU1_PLL3, pll); if (bufsth) { pll = siba_cc_pll_read(scc, SIBA_CC_PMU1_PLL5); pll &= ~SIBA_CC_PMU1_PLL5_CLKDRV; pll |= (bufsth << 8) & SIBA_CC_PMU1_PLL5_CLKDRV; siba_cc_pll_write(scc, SIBA_CC_PMU1_PLL5, pll); } pmu = SIBA_CC_READ32(scc, SIBA_CC_PMUCTL); pmu &= ~(SIBA_CC_PMUCTL_ILP | SIBA_CC_PMUCTL_XF); pmu |= ((((uint32_t)e->freq + 127) / 128 - 1) << 16) & SIBA_CC_PMUCTL_ILP; pmu |= ((uint32_t)e->xf << 2) & SIBA_CC_PMUCTL_XF; SIBA_CC_WRITE32(scc, SIBA_CC_PMUCTL, pmu); } static void siba_cc_pmu0_pll0_init(struct siba_cc *scc, uint32_t xtalfreq) { struct siba_dev_softc *sd = scc->scc_dev; struct siba_softc *siba = sd->sd_bus; const struct siba_cc_pmu0_plltab *e = NULL; uint32_t pmu, tmp, pll; unsigned int i; if ((siba->siba_chipid == 0x5354) && !xtalfreq) xtalfreq = 25000; if (xtalfreq) e = siba_cc_pmu0_plltab_findentry(xtalfreq); if (!e) e = siba_cc_pmu0_plltab_findentry( SIBA_CC_PMU0_DEFAULT_XTALFREQ); KASSERT(e != NULL, ("%s:%d: fail", __func__, __LINE__)); xtalfreq = e->freq; scc->scc_pmu.freq = e->freq; pmu = SIBA_CC_READ32(scc, SIBA_CC_PMUCTL); if (((pmu & SIBA_CC_PMUCTL_XF) >> 2) == e->xf) return; DPRINTF(siba, SIBA_DEBUG_PLL, "change PLL value to %u.%03u MHz\n", (xtalfreq / 1000), (xtalfreq % 1000)); KASSERT(siba->siba_chipid == 0x4328 || siba->siba_chipid == 0x5354, ("%s:%d: fail", __func__, __LINE__)); switch (siba->siba_chipid) { case 0x4328: SIBA_CC_MASK32(scc, SIBA_CC_PMU_MINRES, ~(1 << SIBA_CC_PMU_4328_BB_PLL_PU)); SIBA_CC_MASK32(scc, SIBA_CC_PMU_MAXRES, ~(1 << SIBA_CC_PMU_4328_BB_PLL_PU)); break; case 0x5354: SIBA_CC_MASK32(scc, SIBA_CC_PMU_MINRES, ~(1 << SIBA_CC_PMU_5354_BB_PLL_PU)); SIBA_CC_MASK32(scc, SIBA_CC_PMU_MAXRES, ~(1 << SIBA_CC_PMU_5354_BB_PLL_PU)); break; } for (i = 1500; i; i--) { tmp = SIBA_CC_READ32(scc, SIBA_CC_CLKCTLSTATUS); if (!(tmp & SIBA_CC_CLKCTLSTATUS_HT)) break; DELAY(10); } tmp = SIBA_CC_READ32(scc, SIBA_CC_CLKCTLSTATUS); if (tmp & SIBA_CC_CLKCTLSTATUS_HT) device_printf(siba->siba_dev, "failed to turn PLL off!\n"); /* set PDIV */ pll = siba_cc_pll_read(scc, SIBA_CC_PMU0_PLL0); if (xtalfreq >= SIBA_CC_PMU0_PLL0_PDIV_FREQ) pll |= SIBA_CC_PMU0_PLL0_PDIV_MSK; else pll &= ~SIBA_CC_PMU0_PLL0_PDIV_MSK; siba_cc_pll_write(scc, SIBA_CC_PMU0_PLL0, pll); /* set WILD */ pll = siba_cc_pll_read(scc, SIBA_CC_PMU0_PLL1); pll &= ~(SIBA_CC_PMU0_PLL1_STOPMOD | SIBA_CC_PMU0_PLL1_IMSK | SIBA_CC_PMU0_PLL1_FMSK); pll |= ((uint32_t)e->wb_int << 28) & SIBA_CC_PMU0_PLL1_IMSK; pll |= ((uint32_t)e->wb_frac << 8) & SIBA_CC_PMU0_PLL1_FMSK; if (e->wb_frac == 0) pll |= SIBA_CC_PMU0_PLL1_STOPMOD; siba_cc_pll_write(scc, SIBA_CC_PMU0_PLL1, pll); /* set WILD */ pll = siba_cc_pll_read(scc, SIBA_CC_PMU0_PLL2); pll &= ~SIBA_CC_PMU0_PLL2_IMSKHI; pll |= (((uint32_t)e->wb_int >> 4) << 0) & SIBA_CC_PMU0_PLL2_IMSKHI; siba_cc_pll_write(scc, SIBA_CC_PMU0_PLL2, pll); /* set freq and divisor. */ pmu = SIBA_CC_READ32(scc, SIBA_CC_PMUCTL); pmu &= ~SIBA_CC_PMUCTL_ILP; pmu |= (((xtalfreq + 127) / 128 - 1) << 16) & SIBA_CC_PMUCTL_ILP; pmu &= ~SIBA_CC_PMUCTL_XF; pmu |= ((uint32_t)e->xf << 2) & SIBA_CC_PMUCTL_XF; SIBA_CC_WRITE32(scc, SIBA_CC_PMUCTL, pmu); } static enum siba_clksrc siba_cc_clksrc(struct siba_cc *scc) { struct siba_dev_softc *sd = scc->scc_dev; struct siba_softc *siba = sd->sd_bus; if (sd->sd_id.sd_rev < 6) { if (siba->siba_type == SIBA_TYPE_PCI) { if (pci_read_config(siba->siba_dev, SIBA_GPIO_OUT, 4) & 0x10) return (SIBA_CC_CLKSRC_PCI); return (SIBA_CC_CLKSRC_CRYSTAL); } if (siba->siba_type == SIBA_TYPE_SSB || siba->siba_type == SIBA_TYPE_PCMCIA) return (SIBA_CC_CLKSRC_CRYSTAL); } if (sd->sd_id.sd_rev < 10) { switch (SIBA_CC_READ32(scc, SIBA_CC_CLKSLOW) & 0x7) { case 0: return (SIBA_CC_CLKSRC_LOWPW); case 1: return (SIBA_CC_CLKSRC_CRYSTAL); case 2: return (SIBA_CC_CLKSRC_PCI); default: break; } } return (SIBA_CC_CLKSRC_CRYSTAL); } static const struct siba_cc_pmu1_plltab * siba_cc_pmu1_plltab_find(uint32_t crystalfreq) { const struct siba_cc_pmu1_plltab *e; unsigned int i; for (i = 0; i < N(siba_cc_pmu1_plltab); i++) { e = &siba_cc_pmu1_plltab[i]; if (crystalfreq == e->freq) return (e); } return (NULL); } static uint32_t siba_cc_pll_read(struct siba_cc *scc, uint32_t offset) { SIBA_CC_WRITE32(scc, SIBA_CC_PLLCTL_ADDR, offset); return (SIBA_CC_READ32(scc, SIBA_CC_PLLCTL_DATA)); } static void siba_cc_pll_write(struct siba_cc *scc, uint32_t offset, uint32_t value) { SIBA_CC_WRITE32(scc, SIBA_CC_PLLCTL_ADDR, offset); SIBA_CC_WRITE32(scc, SIBA_CC_PLLCTL_DATA, value); } static const struct siba_cc_pmu0_plltab * siba_cc_pmu0_plltab_findentry(uint32_t crystalfreq) { const struct siba_cc_pmu0_plltab *e; unsigned int i; for (i = 0; i < N(siba_cc_pmu0_plltab); i++) { e = &siba_cc_pmu0_plltab[i]; if (e->freq == crystalfreq) return (e); } return (NULL); } static int siba_pci_sprom(struct siba_softc *siba, struct siba_sprom *sprom) { int error = ENOMEM; uint16_t *buf; buf = malloc(SIBA_SPROMSIZE_R123 * sizeof(uint16_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); siba_sprom_read(siba, buf, SIBA_SPROMSIZE_R123); error = sprom_check_crc(buf, siba->siba_spromsize); if (error) { free(buf, M_DEVBUF); buf = malloc(SIBA_SPROMSIZE_R4 * sizeof(uint16_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); siba_sprom_read(siba, buf, SIBA_SPROMSIZE_R4); error = sprom_check_crc(buf, siba->siba_spromsize); if (error) device_printf(siba->siba_dev, "warn: bad SPROM CRC\n"); } bzero(sprom, sizeof(*sprom)); sprom->rev = buf[siba->siba_spromsize - 1] & 0x00FF; DPRINTF(siba, SIBA_DEBUG_SPROM, "SPROM rev %d\n", sprom->rev); memset(sprom->mac_eth, 0xff, 6); memset(sprom->mac_80211a, 0xff, 6); if ((siba->siba_chipid & 0xff00) == 0x4400) { sprom->rev = 1; siba_sprom_r123(sprom, buf); } else if (siba->siba_chipid == 0x4321) { sprom->rev = 4; siba_sprom_r45(sprom, buf); } else { switch (sprom->rev) { case 1: case 2: case 3: siba_sprom_r123(sprom, buf); break; case 4: case 5: siba_sprom_r45(sprom, buf); break; case 8: siba_sprom_r8(sprom, buf); break; default: device_printf(siba->siba_dev, "unknown SPROM revision %d.\n", sprom->rev); siba_sprom_r123(sprom, buf); } } if (sprom->bf_lo == 0xffff) sprom->bf_lo = 0; if (sprom->bf_hi == 0xffff) sprom->bf_hi = 0; free(buf, M_DEVBUF); return (error); } static int siba_sprom_read(struct siba_softc *siba, uint16_t *sprom, uint16_t len) { int i; for (i = 0; i < len; i++) sprom[i] = SIBA_READ_2(siba, SIBA_SPROM_BASE + (i * 2)); siba->siba_spromsize = len; return (0); } static int sprom_check_crc(const uint16_t *sprom, size_t size) { int word; uint8_t crc0, crc1 = 0xff; crc0 = (sprom[size - 1] & SIBA_SPROM_REV_CRC) >> 8; for (word = 0; word < size - 1; word++) { crc1 = siba_crc8(crc1, sprom[word] & 0x00ff); crc1 = siba_crc8(crc1, (sprom[word] & 0xff00) >> 8); } crc1 = siba_crc8(crc1, sprom[size - 1] & 0x00ff); crc1 ^= 0xff; return ((crc0 != crc1) ? EPROTO : 0); } static uint8_t siba_crc8(uint8_t crc, uint8_t data) { static const uint8_t ct[] = { 0x00, 0xf7, 0xb9, 0x4e, 0x25, 0xd2, 0x9c, 0x6b, 0x4a, 0xbd, 0xf3, 0x04, 0x6f, 0x98, 0xd6, 0x21, 0x94, 0x63, 0x2d, 0xda, 0xb1, 0x46, 0x08, 0xff, 0xde, 0x29, 0x67, 0x90, 0xfb, 0x0c, 0x42, 0xb5, 0x7f, 0x88, 0xc6, 0x31, 0x5a, 0xad, 0xe3, 0x14, 0x35, 0xc2, 0x8c, 0x7b, 0x10, 0xe7, 0xa9, 0x5e, 0xeb, 0x1c, 0x52, 0xa5, 0xce, 0x39, 0x77, 0x80, 0xa1, 0x56, 0x18, 0xef, 0x84, 0x73, 0x3d, 0xca, 0xfe, 0x09, 0x47, 0xb0, 0xdb, 0x2c, 0x62, 0x95, 0xb4, 0x43, 0x0d, 0xfa, 0x91, 0x66, 0x28, 0xdf, 0x6a, 0x9d, 0xd3, 0x24, 0x4f, 0xb8, 0xf6, 0x01, 0x20, 0xd7, 0x99, 0x6e, 0x05, 0xf2, 0xbc, 0x4b, 0x81, 0x76, 0x38, 0xcf, 0xa4, 0x53, 0x1d, 0xea, 0xcb, 0x3c, 0x72, 0x85, 0xee, 0x19, 0x57, 0xa0, 0x15, 0xe2, 0xac, 0x5b, 0x30, 0xc7, 0x89, 0x7e, 0x5f, 0xa8, 0xe6, 0x11, 0x7a, 0x8d, 0xc3, 0x34, 0xab, 0x5c, 0x12, 0xe5, 0x8e, 0x79, 0x37, 0xc0, 0xe1, 0x16, 0x58, 0xaf, 0xc4, 0x33, 0x7d, 0x8a, 0x3f, 0xc8, 0x86, 0x71, 0x1a, 0xed, 0xa3, 0x54, 0x75, 0x82, 0xcc, 0x3b, 0x50, 0xa7, 0xe9, 0x1e, 0xd4, 0x23, 0x6d, 0x9a, 0xf1, 0x06, 0x48, 0xbf, 0x9e, 0x69, 0x27, 0xd0, 0xbb, 0x4c, 0x02, 0xf5, 0x40, 0xb7, 0xf9, 0x0e, 0x65, 0x92, 0xdc, 0x2b, 0x0a, 0xfd, 0xb3, 0x44, 0x2f, 0xd8, 0x96, 0x61, 0x55, 0xa2, 0xec, 0x1b, 0x70, 0x87, 0xc9, 0x3e, 0x1f, 0xe8, 0xa6, 0x51, 0x3a, 0xcd, 0x83, 0x74, 0xc1, 0x36, 0x78, 0x8f, 0xe4, 0x13, 0x5d, 0xaa, 0x8b, 0x7c, 0x32, 0xc5, 0xae, 0x59, 0x17, 0xe0, 0x2a, 0xdd, 0x93, 0x64, 0x0f, 0xf8, 0xb6, 0x41, 0x60, 0x97, 0xd9, 0x2e, 0x45, 0xb2, 0xfc, 0x0b, 0xbe, 0x49, 0x07, 0xf0, 0x9b, 0x6c, 0x22, 0xd5, 0xf4, 0x03, 0x4d, 0xba, 0xd1, 0x26, 0x68, 0x9f, }; return (ct[crc ^ data]); } #define SIBA_LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) #define SIBA_OFFSET(offset) \ (((offset) - SIBA_SPROM_BASE) / sizeof(uint16_t)) #define SIBA_SHIFTOUT_SUB(__x, __mask) \ (((__x) & (__mask)) / SIBA_LOWEST_SET_BIT(__mask)) #define SIBA_SHIFTOUT(_var, _offset, _mask) \ out->_var = SIBA_SHIFTOUT_SUB(in[SIBA_OFFSET(_offset)], (_mask)) #define SIBA_SHIFTOUT_4(_var, _offset, _mask, _shift) \ out->_var = ((((uint32_t)in[SIBA_OFFSET((_offset)+2)] << 16 | \ in[SIBA_OFFSET(_offset)]) & (_mask)) >> (_shift)) static void siba_sprom_r123(struct siba_sprom *out, const uint16_t *in) { int i; uint16_t v; int8_t gain; uint16_t loc[3]; if (out->rev == 3) loc[0] = SIBA_SPROM3_MAC_80211BG; else { loc[0] = SIBA_SPROM1_MAC_80211BG; loc[1] = SIBA_SPROM1_MAC_ETH; loc[2] = SIBA_SPROM1_MAC_80211A; } for (i = 0; i < 3; i++) { v = in[SIBA_OFFSET(loc[0]) + i]; *(((uint16_t *)out->mac_80211bg) + i) = htobe16(v); } if (out->rev < 3) { for (i = 0; i < 3; i++) { v = in[SIBA_OFFSET(loc[1]) + i]; *(((uint16_t *)out->mac_eth) + i) = htobe16(v); } for (i = 0; i < 3; i++) { v = in[SIBA_OFFSET(loc[2]) + i]; *(((uint16_t *)out->mac_80211a) + i) = htobe16(v); } } SIBA_SHIFTOUT(mii_eth0, SIBA_SPROM1_ETHPHY, SIBA_SPROM1_ETHPHY_MII_ETH0); SIBA_SHIFTOUT(mii_eth1, SIBA_SPROM1_ETHPHY, SIBA_SPROM1_ETHPHY_MII_ETH1); SIBA_SHIFTOUT(mdio_eth0, SIBA_SPROM1_ETHPHY, SIBA_SPROM1_ETHPHY_MDIO_ETH0); SIBA_SHIFTOUT(mdio_eth1, SIBA_SPROM1_ETHPHY, SIBA_SPROM1_ETHPHY_MDIO_ETH1); SIBA_SHIFTOUT(brev, SIBA_SPROM1_BOARDINFO, SIBA_SPROM1_BOARDINFO_BREV); SIBA_SHIFTOUT(ccode, SIBA_SPROM1_BOARDINFO, SIBA_SPROM1_BOARDINFO_CCODE); SIBA_SHIFTOUT(ant_a, SIBA_SPROM1_BOARDINFO, SIBA_SPROM1_BOARDINFO_ANTA); SIBA_SHIFTOUT(ant_bg, SIBA_SPROM1_BOARDINFO, SIBA_SPROM1_BOARDINFO_ANTBG); SIBA_SHIFTOUT(pa0b0, SIBA_SPROM1_PA0B0, 0xffff); SIBA_SHIFTOUT(pa0b1, SIBA_SPROM1_PA0B1, 0xffff); SIBA_SHIFTOUT(pa0b2, SIBA_SPROM1_PA0B2, 0xffff); SIBA_SHIFTOUT(pa1b0, SIBA_SPROM1_PA1B0, 0xffff); SIBA_SHIFTOUT(pa1b1, SIBA_SPROM1_PA1B1, 0xffff); SIBA_SHIFTOUT(pa1b2, SIBA_SPROM1_PA1B2, 0xffff); SIBA_SHIFTOUT(gpio0, SIBA_SPROM1_GPIOA, SIBA_SPROM1_GPIOA_P0); SIBA_SHIFTOUT(gpio1, SIBA_SPROM1_GPIOA, SIBA_SPROM1_GPIOA_P1); SIBA_SHIFTOUT(gpio2, SIBA_SPROM1_GPIOB, SIBA_SPROM1_GPIOB_P2); SIBA_SHIFTOUT(gpio3, SIBA_SPROM1_GPIOB, SIBA_SPROM1_GPIOB_P3); SIBA_SHIFTOUT(maxpwr_a, SIBA_SPROM1_MAXPWR, SIBA_SPROM1_MAXPWR_A); SIBA_SHIFTOUT(maxpwr_bg, SIBA_SPROM1_MAXPWR, SIBA_SPROM1_MAXPWR_BG); SIBA_SHIFTOUT(tssi_a, SIBA_SPROM1_TSSI, SIBA_SPROM1_TSSI_A); SIBA_SHIFTOUT(tssi_bg, SIBA_SPROM1_TSSI, SIBA_SPROM1_TSSI_BG); SIBA_SHIFTOUT(bf_lo, SIBA_SPROM1_BFLOW, 0xffff); if (out->rev >= 2) SIBA_SHIFTOUT(bf_hi, SIBA_SPROM2_BFHIGH, 0xffff); /* antenna gain */ gain = siba_sprom_r123_antgain(out->rev, in, SIBA_SPROM1_AGAIN_BG, 0); out->again.ghz24.a0 = out->again.ghz24.a1 = gain; out->again.ghz24.a2 = out->again.ghz24.a3 = gain; gain = siba_sprom_r123_antgain(out->rev, in, SIBA_SPROM1_AGAIN_A, 8); out->again.ghz5.a0 = out->again.ghz5.a1 = gain; out->again.ghz5.a2 = out->again.ghz5.a3 = gain; } static void siba_sprom_r45(struct siba_sprom *out, const uint16_t *in) { int i; uint16_t v; uint16_t mac_80211bg_offset; if (out->rev == 4) mac_80211bg_offset = SIBA_SPROM4_MAC_80211BG; else mac_80211bg_offset = SIBA_SPROM5_MAC_80211BG; for (i = 0; i < 3; i++) { v = in[SIBA_OFFSET(mac_80211bg_offset) + i]; *(((uint16_t *)out->mac_80211bg) + i) = htobe16(v); } SIBA_SHIFTOUT(mii_eth0, SIBA_SPROM4_ETHPHY, SIBA_SPROM4_ETHPHY_ET0A); SIBA_SHIFTOUT(mii_eth1, SIBA_SPROM4_ETHPHY, SIBA_SPROM4_ETHPHY_ET1A); if (out->rev == 4) { SIBA_SHIFTOUT(ccode, SIBA_SPROM4_CCODE, 0xffff); SIBA_SHIFTOUT(bf_lo, SIBA_SPROM4_BFLOW, 0xffff); SIBA_SHIFTOUT(bf_hi, SIBA_SPROM4_BFHIGH, 0xffff); } else { SIBA_SHIFTOUT(ccode, SIBA_SPROM5_CCODE, 0xffff); SIBA_SHIFTOUT(bf_lo, SIBA_SPROM5_BFLOW, 0xffff); SIBA_SHIFTOUT(bf_hi, SIBA_SPROM5_BFHIGH, 0xffff); } SIBA_SHIFTOUT(ant_a, SIBA_SPROM4_ANTAVAIL, SIBA_SPROM4_ANTAVAIL_A); SIBA_SHIFTOUT(ant_bg, SIBA_SPROM4_ANTAVAIL, SIBA_SPROM4_ANTAVAIL_BG); SIBA_SHIFTOUT(maxpwr_bg, SIBA_SPROM4_MAXP_BG, SIBA_SPROM4_MAXP_BG_MASK); SIBA_SHIFTOUT(tssi_bg, SIBA_SPROM4_MAXP_BG, SIBA_SPROM4_TSSI_BG); SIBA_SHIFTOUT(maxpwr_a, SIBA_SPROM4_MAXP_A, SIBA_SPROM4_MAXP_A_MASK); SIBA_SHIFTOUT(tssi_a, SIBA_SPROM4_MAXP_A, SIBA_SPROM4_TSSI_A); if (out->rev == 4) { SIBA_SHIFTOUT(gpio0, SIBA_SPROM4_GPIOA, SIBA_SPROM4_GPIOA_P0); SIBA_SHIFTOUT(gpio1, SIBA_SPROM4_GPIOA, SIBA_SPROM4_GPIOA_P1); SIBA_SHIFTOUT(gpio2, SIBA_SPROM4_GPIOB, SIBA_SPROM4_GPIOB_P2); SIBA_SHIFTOUT(gpio3, SIBA_SPROM4_GPIOB, SIBA_SPROM4_GPIOB_P3); } else { SIBA_SHIFTOUT(gpio0, SIBA_SPROM5_GPIOA, SIBA_SPROM5_GPIOA_P0); SIBA_SHIFTOUT(gpio1, SIBA_SPROM5_GPIOA, SIBA_SPROM5_GPIOA_P1); SIBA_SHIFTOUT(gpio2, SIBA_SPROM5_GPIOB, SIBA_SPROM5_GPIOB_P2); SIBA_SHIFTOUT(gpio3, SIBA_SPROM5_GPIOB, SIBA_SPROM5_GPIOB_P3); } /* antenna gain */ SIBA_SHIFTOUT(again.ghz24.a0, SIBA_SPROM4_AGAIN01, SIBA_SPROM4_AGAIN0); SIBA_SHIFTOUT(again.ghz24.a1, SIBA_SPROM4_AGAIN01, SIBA_SPROM4_AGAIN1); SIBA_SHIFTOUT(again.ghz24.a2, SIBA_SPROM4_AGAIN23, SIBA_SPROM4_AGAIN2); SIBA_SHIFTOUT(again.ghz24.a3, SIBA_SPROM4_AGAIN23, SIBA_SPROM4_AGAIN3); bcopy(&out->again.ghz24, &out->again.ghz5, sizeof(out->again.ghz5)); } static void siba_sprom_r8(struct siba_sprom *out, const uint16_t *in) { int i; uint16_t v; for (i = 0; i < 3; i++) { v = in[SIBA_OFFSET(SIBA_SPROM8_MAC_80211BG) + i]; *(((uint16_t *)out->mac_80211bg) + i) = htobe16(v); } SIBA_SHIFTOUT(ccode, SIBA_SPROM8_CCODE, 0xffff); SIBA_SHIFTOUT(bf_lo, SIBA_SPROM8_BFLOW, 0xffff); SIBA_SHIFTOUT(bf_hi, SIBA_SPROM8_BFHIGH, 0xffff); SIBA_SHIFTOUT(bf2_lo, SIBA_SPROM8_BFL2LO, 0xffff); SIBA_SHIFTOUT(bf2_hi, SIBA_SPROM8_BFL2HI, 0xffff); SIBA_SHIFTOUT(ant_a, SIBA_SPROM8_ANTAVAIL, SIBA_SPROM8_ANTAVAIL_A); SIBA_SHIFTOUT(ant_bg, SIBA_SPROM8_ANTAVAIL, SIBA_SPROM8_ANTAVAIL_BG); SIBA_SHIFTOUT(maxpwr_bg, SIBA_SPROM8_MAXP_BG, SIBA_SPROM8_MAXP_BG_MASK); SIBA_SHIFTOUT(tssi_bg, SIBA_SPROM8_MAXP_BG, SIBA_SPROM8_TSSI_BG); SIBA_SHIFTOUT(maxpwr_a, SIBA_SPROM8_MAXP_A, SIBA_SPROM8_MAXP_A_MASK); SIBA_SHIFTOUT(tssi_a, SIBA_SPROM8_MAXP_A, SIBA_SPROM8_TSSI_A); SIBA_SHIFTOUT(maxpwr_ah, SIBA_SPROM8_MAXP_AHL, SIBA_SPROM8_MAXP_AH_MASK); SIBA_SHIFTOUT(maxpwr_al, SIBA_SPROM8_MAXP_AHL, SIBA_SPROM8_MAXP_AL_MASK); SIBA_SHIFTOUT(gpio0, SIBA_SPROM8_GPIOA, SIBA_SPROM8_GPIOA_P0); SIBA_SHIFTOUT(gpio1, SIBA_SPROM8_GPIOA, SIBA_SPROM8_GPIOA_P1); SIBA_SHIFTOUT(gpio2, SIBA_SPROM8_GPIOB, SIBA_SPROM8_GPIOB_P2); SIBA_SHIFTOUT(gpio3, SIBA_SPROM8_GPIOB, SIBA_SPROM8_GPIOB_P3); SIBA_SHIFTOUT(tri2g, SIBA_SPROM8_TRI25G, SIBA_SPROM8_TRI2G); SIBA_SHIFTOUT(tri5g, SIBA_SPROM8_TRI25G, SIBA_SPROM8_TRI5G); SIBA_SHIFTOUT(tri5gl, SIBA_SPROM8_TRI5GHL, SIBA_SPROM8_TRI5GL); SIBA_SHIFTOUT(tri5gh, SIBA_SPROM8_TRI5GHL, SIBA_SPROM8_TRI5GH); SIBA_SHIFTOUT(rxpo2g, SIBA_SPROM8_RXPO, SIBA_SPROM8_RXPO2G); SIBA_SHIFTOUT(rxpo5g, SIBA_SPROM8_RXPO, SIBA_SPROM8_RXPO5G); SIBA_SHIFTOUT(rssismf2g, SIBA_SPROM8_RSSIPARM2G, SIBA_SPROM8_RSSISMF2G); SIBA_SHIFTOUT(rssismc2g, SIBA_SPROM8_RSSIPARM2G, SIBA_SPROM8_RSSISMC2G); SIBA_SHIFTOUT(rssisav2g, SIBA_SPROM8_RSSIPARM2G, SIBA_SPROM8_RSSISAV2G); SIBA_SHIFTOUT(bxa2g, SIBA_SPROM8_RSSIPARM2G, SIBA_SPROM8_BXA2G); SIBA_SHIFTOUT(rssismf5g, SIBA_SPROM8_RSSIPARM5G, SIBA_SPROM8_RSSISMF5G); SIBA_SHIFTOUT(rssismc5g, SIBA_SPROM8_RSSIPARM5G, SIBA_SPROM8_RSSISMC5G); SIBA_SHIFTOUT(rssisav5g, SIBA_SPROM8_RSSIPARM5G, SIBA_SPROM8_RSSISAV5G); SIBA_SHIFTOUT(bxa5g, SIBA_SPROM8_RSSIPARM5G, SIBA_SPROM8_BXA5G); SIBA_SHIFTOUT(pa0b0, SIBA_SPROM8_PA0B0, 0xffff); SIBA_SHIFTOUT(pa0b1, SIBA_SPROM8_PA0B1, 0xffff); SIBA_SHIFTOUT(pa0b2, SIBA_SPROM8_PA0B2, 0xffff); SIBA_SHIFTOUT(pa1b0, SIBA_SPROM8_PA1B0, 0xffff); SIBA_SHIFTOUT(pa1b1, SIBA_SPROM8_PA1B1, 0xffff); SIBA_SHIFTOUT(pa1b2, SIBA_SPROM8_PA1B2, 0xffff); SIBA_SHIFTOUT(pa1lob0, SIBA_SPROM8_PA1LOB0, 0xffff); SIBA_SHIFTOUT(pa1lob1, SIBA_SPROM8_PA1LOB1, 0xffff); SIBA_SHIFTOUT(pa1lob2, SIBA_SPROM8_PA1LOB2, 0xffff); SIBA_SHIFTOUT(pa1hib0, SIBA_SPROM8_PA1HIB0, 0xffff); SIBA_SHIFTOUT(pa1hib1, SIBA_SPROM8_PA1HIB1, 0xffff); SIBA_SHIFTOUT(pa1hib2, SIBA_SPROM8_PA1HIB2, 0xffff); SIBA_SHIFTOUT(cck2gpo, SIBA_SPROM8_CCK2GPO, 0xffff); SIBA_SHIFTOUT_4(ofdm2gpo, SIBA_SPROM8_OFDM2GPO, 0xffffffff, 0); SIBA_SHIFTOUT_4(ofdm5glpo, SIBA_SPROM8_OFDM5GLPO, 0xffffffff, 0); SIBA_SHIFTOUT_4(ofdm5gpo, SIBA_SPROM8_OFDM5GPO, 0xffffffff, 0); SIBA_SHIFTOUT_4(ofdm5ghpo, SIBA_SPROM8_OFDM5GHPO, 0xffffffff, 0); /* antenna gain */ SIBA_SHIFTOUT(again.ghz24.a0, SIBA_SPROM8_AGAIN01, SIBA_SPROM8_AGAIN0); SIBA_SHIFTOUT(again.ghz24.a1, SIBA_SPROM8_AGAIN01, SIBA_SPROM8_AGAIN1); SIBA_SHIFTOUT(again.ghz24.a2, SIBA_SPROM8_AGAIN23, SIBA_SPROM8_AGAIN2); SIBA_SHIFTOUT(again.ghz24.a3, SIBA_SPROM8_AGAIN23, SIBA_SPROM8_AGAIN3); bcopy(&out->again.ghz24, &out->again.ghz5, sizeof(out->again.ghz5)); } static int8_t siba_sprom_r123_antgain(uint8_t sprom_revision, const uint16_t *in, uint16_t mask, uint16_t shift) { uint16_t v; uint8_t gain; v = in[SIBA_OFFSET(SIBA_SPROM1_AGAIN)]; gain = (v & mask) >> shift; gain = (gain == 0xff) ? 2 : (sprom_revision == 1) ? gain << 2 : ((gain & 0xc0) >> 6) | ((gain & 0x3f) << 2); return ((int8_t)gain); } #undef SIBA_LOWEST_SET_BIT #undef SIBA_OFFSET #undef SIBA_SHIFTOUT_SUB #undef SIBA_SHIFTOUT int siba_powerdown(device_t dev) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; return (siba_powerdown_sub(siba)); } static int siba_powerdown_sub(struct siba_softc *siba) { struct siba_cc *scc; if (siba->siba_type == SIBA_TYPE_SSB) return (0); scc = &siba->siba_cc; if (!scc->scc_dev || scc->scc_dev->sd_id.sd_rev < 5) return (0); siba_cc_clock(scc, SIBA_CLOCK_SLOW); siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL | SIBA_GPIO_PLL, 0); return (0); } static void siba_pcicore_init(struct siba_pci *spc) { struct siba_dev_softc *sd = spc->spc_dev; if (sd == NULL) return; if (!siba_dev_isup_sub(sd)) siba_dev_up_sub(sd, 0); KASSERT(spc->spc_hostmode == 0, ("%s:%d: hostmode", __func__, __LINE__)); /* disable PCI interrupt */ siba_write_4_sub(spc->spc_dev, SIBA_INTR_MASK, 0); } int siba_dev_isup(device_t dev) { struct siba_dev_softc *sd = device_get_ivars(dev); return (siba_dev_isup_sub(sd)); } static int siba_dev_isup_sub(struct siba_dev_softc *sd) { uint32_t reject, val; reject = siba_tmslow_reject_bitmask(sd); val = siba_read_4_sub(sd, SIBA_TGSLOW); val &= SIBA_TGSLOW_CLOCK | SIBA_TGSLOW_RESET | reject; return (val == SIBA_TGSLOW_CLOCK); } void siba_dev_up(device_t dev, uint32_t flags) { struct siba_dev_softc *sd = device_get_ivars(dev); siba_dev_up_sub(sd, flags); } static void siba_dev_up_sub(struct siba_dev_softc *sd, uint32_t flags) { uint32_t val; siba_dev_down_sub(sd, flags); siba_write_4_sub(sd, SIBA_TGSLOW, SIBA_TGSLOW_RESET | SIBA_TGSLOW_CLOCK | SIBA_TGSLOW_FGC | flags); siba_read_4_sub(sd, SIBA_TGSLOW); DELAY(1); if (siba_read_4_sub(sd, SIBA_TGSHIGH) & SIBA_TGSHIGH_SERR) siba_write_4_sub(sd, SIBA_TGSHIGH, 0); val = siba_read_4_sub(sd, SIBA_IAS); if (val & (SIBA_IAS_INBAND_ERR | SIBA_IAS_TIMEOUT)) { val &= ~(SIBA_IAS_INBAND_ERR | SIBA_IAS_TIMEOUT); siba_write_4_sub(sd, SIBA_IAS, val); } siba_write_4_sub(sd, SIBA_TGSLOW, SIBA_TGSLOW_CLOCK | SIBA_TGSLOW_FGC | flags); siba_read_4_sub(sd, SIBA_TGSLOW); DELAY(1); siba_write_4_sub(sd, SIBA_TGSLOW, SIBA_TGSLOW_CLOCK | flags); siba_read_4_sub(sd, SIBA_TGSLOW); DELAY(1); } static uint32_t siba_tmslow_reject_bitmask(struct siba_dev_softc *sd) { uint32_t rev = siba_read_4_sub(sd, SIBA_IDLOW) & SIBA_IDLOW_SSBREV; switch (rev) { case SIBA_IDLOW_SSBREV_22: return (SIBA_TGSLOW_REJECT_22); case SIBA_IDLOW_SSBREV_23: return (SIBA_TGSLOW_REJECT_23); case SIBA_IDLOW_SSBREV_24: case SIBA_IDLOW_SSBREV_25: case SIBA_IDLOW_SSBREV_26: case SIBA_IDLOW_SSBREV_27: return (SIBA_TGSLOW_REJECT_23); default: KASSERT(0 == 1, ("%s:%d: unknown backplane rev %#x\n", __func__, __LINE__, rev)); } return (SIBA_TGSLOW_REJECT_22 | SIBA_TGSLOW_REJECT_23); } void siba_dev_down(device_t dev, uint32_t flags) { struct siba_dev_softc *sd = device_get_ivars(dev); siba_dev_down_sub(sd, flags); } static void siba_dev_down_sub(struct siba_dev_softc *sd, uint32_t flags) { struct siba_softc *siba = sd->sd_bus; uint32_t reject, val; int i; if (siba_read_4_sub(sd, SIBA_TGSLOW) & SIBA_TGSLOW_RESET) return; reject = siba_tmslow_reject_bitmask(sd); siba_write_4_sub(sd, SIBA_TGSLOW, reject | SIBA_TGSLOW_CLOCK); for (i = 0; i < 1000; i++) { val = siba_read_4_sub(sd, SIBA_TGSLOW); if (val & reject) break; DELAY(10); } if ((val & reject) == 0) { device_printf(siba->siba_dev, "timeout (bit %#x reg %#x)\n", reject, SIBA_TGSLOW); } for (i = 0; i < 1000; i++) { val = siba_read_4_sub(sd, SIBA_TGSHIGH); if (!(val & SIBA_TGSHIGH_BUSY)) break; DELAY(10); } if ((val & SIBA_TGSHIGH_BUSY) != 0) { device_printf(siba->siba_dev, "timeout (bit %#x reg %#x)\n", SIBA_TGSHIGH_BUSY, SIBA_TGSHIGH); } siba_write_4_sub(sd, SIBA_TGSLOW, SIBA_TGSLOW_FGC | SIBA_TGSLOW_CLOCK | reject | SIBA_TGSLOW_RESET | flags); siba_read_4_sub(sd, SIBA_TGSLOW); DELAY(1); siba_write_4_sub(sd, SIBA_TGSLOW, reject | SIBA_TGSLOW_RESET | flags); siba_read_4_sub(sd, SIBA_TGSLOW); DELAY(1); } static void siba_pcicore_setup(struct siba_pci *spc, struct siba_dev_softc *sd) { struct siba_dev_softc *psd = spc->spc_dev; struct siba_softc *siba = psd->sd_bus; uint32_t tmp; if (psd->sd_id.sd_device == SIBA_DEVID_PCI) { siba_pcicore_write_4(spc, SIBA_PCICORE_SBTOPCI2, siba_pcicore_read_4(spc, SIBA_PCICORE_SBTOPCI2) | SIBA_PCICORE_SBTOPCI_PREF | SIBA_PCICORE_SBTOPCI_BURST); if (psd->sd_id.sd_rev < 5) { tmp = siba_read_4_sub(psd, SIBA_IMCFGLO); tmp &= ~SIBA_IMCFGLO_SERTO; tmp = (tmp | 2) & ~SIBA_IMCFGLO_REQTO; tmp |= 3 << 4 /* SIBA_IMCFGLO_REQTO_SHIFT */; siba_write_4_sub(psd, SIBA_IMCFGLO, tmp); /* broadcast value */ sd = (siba->siba_cc.scc_dev != NULL) ? siba->siba_cc.scc_dev : siba->siba_pci.spc_dev; if (sd != NULL) { siba_write_4_sub(sd, SIBA_PCICORE_BCAST_ADDR, 0xfd8); siba_read_4_sub(sd, SIBA_PCICORE_BCAST_ADDR); siba_write_4_sub(sd, SIBA_PCICORE_BCAST_DATA, 0); siba_read_4_sub(sd, SIBA_PCICORE_BCAST_DATA); } } else if (psd->sd_id.sd_rev >= 11) { tmp = siba_pcicore_read_4(spc, SIBA_PCICORE_SBTOPCI2); tmp |= SIBA_PCICORE_SBTOPCI_MRM; siba_pcicore_write_4(spc, SIBA_PCICORE_SBTOPCI2, tmp); } } else { KASSERT(psd->sd_id.sd_device == SIBA_DEVID_PCIE, ("only PCIE")); if ((psd->sd_id.sd_rev == 0) || (psd->sd_id.sd_rev == 1)) siba_pcie_write(spc, 0x4, siba_pcie_read(spc, 0x4) | 0x8); if (psd->sd_id.sd_rev == 0) { siba_pcie_mdio_write(spc, 0x1f, 2, 0x8128); /* Timer */ siba_pcie_mdio_write(spc, 0x1f, 6, 0x0100); /* CDR */ siba_pcie_mdio_write(spc, 0x1f, 7, 0x1466); /* CDR BW */ } else if (psd->sd_id.sd_rev == 1) siba_pcie_write(spc, 0x100, siba_pcie_read(spc, 0x100) | 0x40); } spc->spc_inited = 1; } void siba_pcicore_intr(device_t dev) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; struct siba_pci *spc = &siba->siba_pci; struct siba_dev_softc *psd = spc->spc_dev; uint32_t tmp; if (siba->siba_type != SIBA_TYPE_PCI || !psd) return; KASSERT(siba == psd->sd_bus, ("different pointers")); /* enable interrupts */ if (siba->siba_dev != NULL && (psd->sd_id.sd_rev >= 6 || psd->sd_id.sd_device == SIBA_DEVID_PCIE)) { tmp = pci_read_config(siba->siba_dev, SIBA_IRQMASK, 4); tmp |= (1 << sd->sd_coreidx) << 8; pci_write_config(siba->siba_dev, SIBA_IRQMASK, tmp, 4); } else { tmp = siba_read_4_sub(sd, SIBA_TPS); tmp &= SIBA_TPS_BPFLAG; siba_write_4_sub(psd, SIBA_INTR_MASK, siba_read_4_sub(psd, SIBA_INTR_MASK) | (1 << tmp)); } /* setup PCIcore */ if (spc->spc_inited == 0) siba_pcicore_setup(spc, sd); } static uint32_t siba_pcicore_read_4(struct siba_pci *spc, uint16_t offset) { return (siba_read_4_sub(spc->spc_dev, offset)); } static void siba_pcicore_write_4(struct siba_pci *spc, uint16_t offset, uint32_t value) { siba_write_4_sub(spc->spc_dev, offset, value); } static uint32_t siba_pcie_read(struct siba_pci *spc, uint32_t address) { siba_pcicore_write_4(spc, 0x130, address); return (siba_pcicore_read_4(spc, 0x134)); } static void siba_pcie_write(struct siba_pci *spc, uint32_t address, uint32_t data) { siba_pcicore_write_4(spc, 0x130, address); siba_pcicore_write_4(spc, 0x134, data); } static void siba_pcie_mdio_write(struct siba_pci *spc, uint8_t device, uint8_t address, uint16_t data) { int i; siba_pcicore_write_4(spc, SIBA_PCICORE_MDIO_CTL, 0x80 | 0x2); siba_pcicore_write_4(spc, SIBA_PCICORE_MDIO_DATA, (1 << 30) | (1 << 28) | ((uint32_t)device << 22) | ((uint32_t)address << 18) | (1 << 17) | data); DELAY(10); for (i = 0; i < 10; i++) { if (siba_pcicore_read_4(spc, SIBA_PCICORE_MDIO_CTL) & 0x100) break; DELAY(1000); } siba_pcicore_write_4(spc, SIBA_PCICORE_MDIO_CTL, 0); } uint32_t siba_dma_translation(device_t dev) { #ifdef INVARIANTS struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; KASSERT(siba->siba_type == SIBA_TYPE_PCI, ("unsupported bustype %d\n", siba->siba_type)); #endif return (SIBA_PCI_DMA); } void siba_barrier(device_t dev, int flags) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; SIBA_BARRIER(siba, flags); } static void siba_cc_suspend(struct siba_cc *scc) { siba_cc_clock(scc, SIBA_CLOCK_SLOW); } static void siba_cc_resume(struct siba_cc *scc) { siba_cc_power_init(scc); siba_cc_clock(scc, SIBA_CLOCK_FAST); } int siba_core_suspend(struct siba_softc *siba) { siba_cc_suspend(&siba->siba_cc); siba_pci_gpio(siba, SIBA_GPIO_CRYSTAL | SIBA_GPIO_PLL, 0); return (0); } int siba_core_resume(struct siba_softc *siba) { siba->siba_pci.spc_inited = 0; siba->siba_curdev = NULL; siba_powerup_sub(siba, 0); /* XXX setup H/W for PCMCIA??? */ siba_cc_resume(&siba->siba_cc); siba_powerdown_sub(siba); return (0); } static void siba_cc_regctl_setmask(struct siba_cc *cc, uint32_t offset, uint32_t mask, uint32_t set) { SIBA_CC_READ32(cc, SIBA_CC_REGCTL_ADDR); SIBA_CC_WRITE32(cc, SIBA_CC_REGCTL_ADDR, offset); SIBA_CC_READ32(cc, SIBA_CC_REGCTL_ADDR); SIBA_CC_WRITE32(cc, SIBA_CC_REGCTL_DATA, (SIBA_CC_READ32(cc, SIBA_CC_REGCTL_DATA) & mask) | set); SIBA_CC_READ32(cc, SIBA_CC_REGCTL_DATA); } void siba_cc_pmu_set_ldovolt(device_t dev, int id, uint32_t volt) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; struct siba_cc *scc = &siba->siba_cc; uint32_t *p = NULL, info[5][3] = { { 2, 25, 0xf }, { 3, 1, 0xf }, { 3, 9, 0xf }, { 3, 17, 0x3f }, { 0, 21, 0x3f } }; if (siba->siba_chipid == 0x4312) { if (id != SIBA_LDO_PAREF) return; p = info[4]; siba_cc_regctl_setmask(scc, p[0], ~(p[2] << p[1]), (volt & p[2]) << p[1]); return; } if (siba->siba_chipid == 0x4328 || siba->siba_chipid == 0x5354) { switch (id) { case SIBA_LDO_PAREF: p = info[3]; break; case SIBA_LDO_VOLT1: p = info[0]; break; case SIBA_LDO_VOLT2: p = info[1]; break; case SIBA_LDO_VOLT3: p = info[2]; break; default: KASSERT(0 == 1, ("%s: unsupported voltage ID %#x", __func__, id)); return; } siba_cc_regctl_setmask(scc, p[0], ~(p[2] << p[1]), (volt & p[2]) << p[1]); } } void siba_cc_pmu_set_ldoparef(device_t dev, uint8_t on) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; struct siba_cc *scc = &siba->siba_cc; int ldo; ldo = ((siba->siba_chipid == 0x4312) ? SIBA_CC_PMU_4312_PA_REF : ((siba->siba_chipid == 0x4328) ? SIBA_CC_PMU_4328_PA_REF : ((siba->siba_chipid == 0x5354) ? SIBA_CC_PMU_5354_PA_REF : -1))); if (ldo == -1) return; if (on) SIBA_CC_SET32(scc, SIBA_CC_PMU_MINRES, 1 << ldo); else SIBA_CC_MASK32(scc, SIBA_CC_PMU_MINRES, ~(1 << ldo)); SIBA_CC_READ32(scc, SIBA_CC_PMU_MINRES); } int siba_read_sprom(device_t dev, device_t child, int which, uintptr_t *result) { struct siba_dev_softc *sd = device_get_ivars(child); struct siba_softc *siba = sd->sd_bus; switch (which) { case SIBA_SPROMVAR_REV: *result = siba->siba_sprom.rev; break; case SIBA_SPROMVAR_MAC_80211BG: *((uint8_t **) result) = siba->siba_sprom.mac_80211bg; break; case SIBA_SPROMVAR_MAC_ETH: *((uint8_t **) result) = siba->siba_sprom.mac_eth; break; case SIBA_SPROMVAR_MAC_80211A: *((uint8_t **) result) = siba->siba_sprom.mac_80211a; break; case SIBA_SPROMVAR_MII_ETH0: *result = siba->siba_sprom.mii_eth0; break; case SIBA_SPROMVAR_MII_ETH1: *result = siba->siba_sprom.mii_eth1; break; case SIBA_SPROMVAR_MDIO_ETH0: *result = siba->siba_sprom.mdio_eth0; break; case SIBA_SPROMVAR_MDIO_ETH1: *result = siba->siba_sprom.mdio_eth1; break; case SIBA_SPROMVAR_BREV: *result = siba->siba_sprom.brev; break; case SIBA_SPROMVAR_CCODE: *result = siba->siba_sprom.ccode; break; case SIBA_SPROMVAR_ANT_A: *result = siba->siba_sprom.ant_a; break; case SIBA_SPROMVAR_ANT_BG: *result = siba->siba_sprom.ant_bg; break; case SIBA_SPROMVAR_PA0B0: *result = siba->siba_sprom.pa0b0; break; case SIBA_SPROMVAR_PA0B1: *result = siba->siba_sprom.pa0b1; break; case SIBA_SPROMVAR_PA0B2: *result = siba->siba_sprom.pa0b2; break; case SIBA_SPROMVAR_PA1B0: *result = siba->siba_sprom.pa1b0; break; case SIBA_SPROMVAR_PA1B1: *result = siba->siba_sprom.pa1b1; break; case SIBA_SPROMVAR_PA1B2: *result = siba->siba_sprom.pa1b2; break; case SIBA_SPROMVAR_PA1LOB0: *result = siba->siba_sprom.pa1lob0; break; case SIBA_SPROMVAR_PA1LOB1: *result = siba->siba_sprom.pa1lob1; break; case SIBA_SPROMVAR_PA1LOB2: *result = siba->siba_sprom.pa1lob2; break; case SIBA_SPROMVAR_PA1HIB0: *result = siba->siba_sprom.pa1hib0; break; case SIBA_SPROMVAR_PA1HIB1: *result = siba->siba_sprom.pa1hib1; break; case SIBA_SPROMVAR_PA1HIB2: *result = siba->siba_sprom.pa1hib2; break; case SIBA_SPROMVAR_GPIO0: *result = siba->siba_sprom.gpio0; break; case SIBA_SPROMVAR_GPIO1: *result = siba->siba_sprom.gpio1; break; case SIBA_SPROMVAR_GPIO2: *result = siba->siba_sprom.gpio2; break; case SIBA_SPROMVAR_GPIO3: *result = siba->siba_sprom.gpio3; break; case SIBA_SPROMVAR_MAXPWR_AL: *result = siba->siba_sprom.maxpwr_al; break; case SIBA_SPROMVAR_MAXPWR_A: *result = siba->siba_sprom.maxpwr_a; break; case SIBA_SPROMVAR_MAXPWR_AH: *result = siba->siba_sprom.maxpwr_ah; break; case SIBA_SPROMVAR_MAXPWR_BG: *result = siba->siba_sprom.maxpwr_bg; break; case SIBA_SPROMVAR_RXPO2G: *result = siba->siba_sprom.rxpo2g; break; case SIBA_SPROMVAR_RXPO5G: *result = siba->siba_sprom.rxpo5g; break; case SIBA_SPROMVAR_TSSI_A: *result = siba->siba_sprom.tssi_a; break; case SIBA_SPROMVAR_TSSI_BG: *result = siba->siba_sprom.tssi_bg; break; case SIBA_SPROMVAR_TRI2G: *result = siba->siba_sprom.tri2g; break; case SIBA_SPROMVAR_TRI5GL: *result = siba->siba_sprom.tri5gl; break; case SIBA_SPROMVAR_TRI5G: *result = siba->siba_sprom.tri5g; break; case SIBA_SPROMVAR_TRI5GH: *result = siba->siba_sprom.tri5gh; break; case SIBA_SPROMVAR_RSSISAV2G: *result = siba->siba_sprom.rssisav2g; break; case SIBA_SPROMVAR_RSSISMC2G: *result = siba->siba_sprom.rssismc2g; break; case SIBA_SPROMVAR_RSSISMF2G: *result = siba->siba_sprom.rssismf2g; break; case SIBA_SPROMVAR_BXA2G: *result = siba->siba_sprom.bxa2g; break; case SIBA_SPROMVAR_RSSISAV5G: *result = siba->siba_sprom.rssisav5g; break; case SIBA_SPROMVAR_RSSISMC5G: *result = siba->siba_sprom.rssismc5g; break; case SIBA_SPROMVAR_RSSISMF5G: *result = siba->siba_sprom.rssismf5g; break; case SIBA_SPROMVAR_BXA5G: *result = siba->siba_sprom.bxa5g; break; case SIBA_SPROMVAR_CCK2GPO: *result = siba->siba_sprom.cck2gpo; break; case SIBA_SPROMVAR_OFDM2GPO: *result = siba->siba_sprom.ofdm2gpo; break; case SIBA_SPROMVAR_OFDM5GLPO: *result = siba->siba_sprom.ofdm5glpo; break; case SIBA_SPROMVAR_OFDM5GPO: *result = siba->siba_sprom.ofdm5gpo; break; case SIBA_SPROMVAR_OFDM5GHPO: *result = siba->siba_sprom.ofdm5ghpo; break; case SIBA_SPROMVAR_BF_LO: *result = siba->siba_sprom.bf_lo; break; case SIBA_SPROMVAR_BF_HI: *result = siba->siba_sprom.bf_hi; break; case SIBA_SPROMVAR_BF2_LO: *result = siba->siba_sprom.bf2_lo; break; case SIBA_SPROMVAR_BF2_HI: *result = siba->siba_sprom.bf2_hi; break; default: return (ENOENT); } return (0); } int siba_write_sprom(device_t dev, device_t child, int which, uintptr_t value) { struct siba_dev_softc *sd = device_get_ivars(child); struct siba_softc *siba = sd->sd_bus; switch (which) { case SIBA_SPROMVAR_REV: siba->siba_sprom.rev = value; break; case SIBA_SPROMVAR_MII_ETH0: siba->siba_sprom.mii_eth0 = value; break; case SIBA_SPROMVAR_MII_ETH1: siba->siba_sprom.mii_eth1 = value; break; case SIBA_SPROMVAR_MDIO_ETH0: siba->siba_sprom.mdio_eth0 = value; break; case SIBA_SPROMVAR_MDIO_ETH1: siba->siba_sprom.mdio_eth1 = value; break; case SIBA_SPROMVAR_BREV: siba->siba_sprom.brev = value; break; case SIBA_SPROMVAR_CCODE: siba->siba_sprom.ccode = value; break; case SIBA_SPROMVAR_ANT_A: siba->siba_sprom.ant_a = value; break; case SIBA_SPROMVAR_ANT_BG: siba->siba_sprom.ant_bg = value; break; case SIBA_SPROMVAR_PA0B0: siba->siba_sprom.pa0b0 = value; break; case SIBA_SPROMVAR_PA0B1: siba->siba_sprom.pa0b1 = value; break; case SIBA_SPROMVAR_PA0B2: siba->siba_sprom.pa0b2 = value; break; case SIBA_SPROMVAR_PA1B0: siba->siba_sprom.pa1b0 = value; break; case SIBA_SPROMVAR_PA1B1: siba->siba_sprom.pa1b1 = value; break; case SIBA_SPROMVAR_PA1B2: siba->siba_sprom.pa1b2 = value; break; case SIBA_SPROMVAR_PA1LOB0: siba->siba_sprom.pa1lob0 = value; break; case SIBA_SPROMVAR_PA1LOB1: siba->siba_sprom.pa1lob1 = value; break; case SIBA_SPROMVAR_PA1LOB2: siba->siba_sprom.pa1lob2 = value; break; case SIBA_SPROMVAR_PA1HIB0: siba->siba_sprom.pa1hib0 = value; break; case SIBA_SPROMVAR_PA1HIB1: siba->siba_sprom.pa1hib1 = value; break; case SIBA_SPROMVAR_PA1HIB2: siba->siba_sprom.pa1hib2 = value; break; case SIBA_SPROMVAR_GPIO0: siba->siba_sprom.gpio0 = value; break; case SIBA_SPROMVAR_GPIO1: siba->siba_sprom.gpio1 = value; break; case SIBA_SPROMVAR_GPIO2: siba->siba_sprom.gpio2 = value; break; case SIBA_SPROMVAR_GPIO3: siba->siba_sprom.gpio3 = value; break; case SIBA_SPROMVAR_MAXPWR_AL: siba->siba_sprom.maxpwr_al = value; break; case SIBA_SPROMVAR_MAXPWR_A: siba->siba_sprom.maxpwr_a = value; break; case SIBA_SPROMVAR_MAXPWR_AH: siba->siba_sprom.maxpwr_ah = value; break; case SIBA_SPROMVAR_MAXPWR_BG: siba->siba_sprom.maxpwr_bg = value; break; case SIBA_SPROMVAR_RXPO2G: siba->siba_sprom.rxpo2g = value; break; case SIBA_SPROMVAR_RXPO5G: siba->siba_sprom.rxpo5g = value; break; case SIBA_SPROMVAR_TSSI_A: siba->siba_sprom.tssi_a = value; break; case SIBA_SPROMVAR_TSSI_BG: siba->siba_sprom.tssi_bg = value; break; case SIBA_SPROMVAR_TRI2G: siba->siba_sprom.tri2g = value; break; case SIBA_SPROMVAR_TRI5GL: siba->siba_sprom.tri5gl = value; break; case SIBA_SPROMVAR_TRI5G: siba->siba_sprom.tri5g = value; break; case SIBA_SPROMVAR_TRI5GH: siba->siba_sprom.tri5gh = value; break; case SIBA_SPROMVAR_RSSISAV2G: siba->siba_sprom.rssisav2g = value; break; case SIBA_SPROMVAR_RSSISMC2G: siba->siba_sprom.rssismc2g = value; break; case SIBA_SPROMVAR_RSSISMF2G: siba->siba_sprom.rssismf2g = value; break; case SIBA_SPROMVAR_BXA2G: siba->siba_sprom.bxa2g = value; break; case SIBA_SPROMVAR_RSSISAV5G: siba->siba_sprom.rssisav5g = value; break; case SIBA_SPROMVAR_RSSISMC5G: siba->siba_sprom.rssismc5g = value; break; case SIBA_SPROMVAR_RSSISMF5G: siba->siba_sprom.rssismf5g = value; break; case SIBA_SPROMVAR_BXA5G: siba->siba_sprom.bxa5g = value; break; case SIBA_SPROMVAR_CCK2GPO: siba->siba_sprom.cck2gpo = value; break; case SIBA_SPROMVAR_OFDM2GPO: siba->siba_sprom.ofdm2gpo = value; break; case SIBA_SPROMVAR_OFDM5GLPO: siba->siba_sprom.ofdm5glpo = value; break; case SIBA_SPROMVAR_OFDM5GPO: siba->siba_sprom.ofdm5gpo = value; break; case SIBA_SPROMVAR_OFDM5GHPO: siba->siba_sprom.ofdm5ghpo = value; break; case SIBA_SPROMVAR_BF_LO: siba->siba_sprom.bf_lo = value; break; case SIBA_SPROMVAR_BF_HI: siba->siba_sprom.bf_hi = value; break; case SIBA_SPROMVAR_BF2_LO: siba->siba_sprom.bf2_lo = value; break; case SIBA_SPROMVAR_BF2_HI: siba->siba_sprom.bf2_hi = value; break; default: return (ENOENT); } return (0); } #define SIBA_GPIOCTL 0x06c uint32_t siba_gpio_get(device_t dev) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; struct siba_dev_softc *gpiodev, *pcidev = NULL; pcidev = siba->siba_pci.spc_dev; gpiodev = siba->siba_cc.scc_dev ? siba->siba_cc.scc_dev : pcidev; if (!gpiodev) return (-1); return (siba_read_4_sub(gpiodev, SIBA_GPIOCTL)); } void siba_gpio_set(device_t dev, uint32_t value) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; struct siba_dev_softc *gpiodev, *pcidev = NULL; pcidev = siba->siba_pci.spc_dev; gpiodev = siba->siba_cc.scc_dev ? siba->siba_cc.scc_dev : pcidev; if (!gpiodev) return; siba_write_4_sub(gpiodev, SIBA_GPIOCTL, value); } void siba_fix_imcfglobug(device_t dev) { struct siba_dev_softc *sd = device_get_ivars(dev); struct siba_softc *siba = sd->sd_bus; uint32_t tmp; if (siba->siba_pci.spc_dev == NULL) return; if (siba->siba_pci.spc_dev->sd_id.sd_device != SIBA_DEVID_PCI || siba->siba_pci.spc_dev->sd_id.sd_rev > 5) return; tmp = siba_read_4_sub(sd, SIBA_IMCFGLO) & ~(SIBA_IMCFGLO_REQTO | SIBA_IMCFGLO_SERTO); switch (siba->siba_type) { case SIBA_TYPE_PCI: case SIBA_TYPE_PCMCIA: tmp |= 0x32; break; case SIBA_TYPE_SSB: tmp |= 0x53; break; } siba_write_4_sub(sd, SIBA_IMCFGLO, tmp); } diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c index c3baf7fca5e1..6329f8129d96 100644 --- a/sys/dev/sound/usb/uaudio.c +++ b/sys/dev/sound/usb/uaudio.c @@ -1,6121 +1,6121 @@ /* $NetBSD: uaudio.c,v 1.91 2004/11/05 17:46:14 kent Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * USB audio specs: http://www.usb.org/developers/devclass_docs/audio10.pdf * http://www.usb.org/developers/devclass_docs/frmts10.pdf * http://www.usb.org/developers/devclass_docs/termt10.pdf */ /* * Also merged: * $NetBSD: uaudio.c,v 1.94 2005/01/15 15:19:53 kent Exp $ * $NetBSD: uaudio.c,v 1.95 2005/01/16 06:02:19 dsainty Exp $ * $NetBSD: uaudio.c,v 1.96 2005/01/16 12:46:00 kent Exp $ * $NetBSD: uaudio.c,v 1.97 2005/02/24 08:19:38 martin Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #include #include #include #define USB_DEBUG_VAR uaudio_debug #include #include #include /* for bootverbose */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include "feeder_if.h" static int uaudio_default_rate = 0; /* use rate list */ static int uaudio_default_bits = 32; static int uaudio_default_channels = 0; /* use default */ #ifdef USB_DEBUG static int uaudio_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, uaudio, CTLFLAG_RW, 0, "USB uaudio"); SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, debug, CTLFLAG_RWTUN, &uaudio_debug, 0, "uaudio debug level"); SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, default_rate, CTLFLAG_RWTUN, &uaudio_default_rate, 0, "uaudio default sample rate"); SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, default_bits, CTLFLAG_RWTUN, &uaudio_default_bits, 0, "uaudio default sample bits"); SYSCTL_INT(_hw_usb_uaudio, OID_AUTO, default_channels, CTLFLAG_RWTUN, &uaudio_default_channels, 0, "uaudio default sample channels"); #endif #define UAUDIO_IRQS (8000 / UAUDIO_NFRAMES) /* interrupts per second */ #define UAUDIO_NFRAMES 64 /* must be factor of 8 due HS-USB */ #define UAUDIO_NCHANBUFS 2 /* number of outstanding request */ #define UAUDIO_RECURSE_LIMIT 255 /* rounds */ #define UAUDIO_CHANNELS_MAX MIN(64, AFMT_CHANNEL_MAX) #define UAUDIO_MATRIX_MAX 8 /* channels */ #define MAKE_WORD(h,l) (((h) << 8) | (l)) #define BIT_TEST(bm,bno) (((bm)[(bno) / 8] >> (7 - ((bno) % 8))) & 1) #define UAUDIO_MAX_CHAN(x) (x) #define MIX(sc) ((sc)->sc_mixer_node) union uaudio_asid { const struct usb_audio_streaming_interface_descriptor *v1; const struct usb_audio20_streaming_interface_descriptor *v2; }; union uaudio_asf1d { const struct usb_audio_streaming_type1_descriptor *v1; const struct usb_audio20_streaming_type1_descriptor *v2; }; union uaudio_sed { const struct usb_audio_streaming_endpoint_descriptor *v1; const struct usb_audio20_streaming_endpoint_descriptor *v2; }; struct uaudio_mixer_node { const char *name; int32_t minval; int32_t maxval; #define MIX_MAX_CHAN 16 int32_t wValue[MIX_MAX_CHAN]; /* using nchan */ uint32_t mul; uint32_t ctl; int wData[MIX_MAX_CHAN]; /* using nchan */ uint16_t wIndex; uint8_t update[(MIX_MAX_CHAN + 7) / 8]; uint8_t nchan; uint8_t type; #define MIX_ON_OFF 1 #define MIX_SIGNED_16 2 #define MIX_UNSIGNED_16 3 #define MIX_SIGNED_8 4 #define MIX_SELECTOR 5 #define MIX_UNKNOWN 6 #define MIX_SIZE(n) ((((n) == MIX_SIGNED_16) || \ ((n) == MIX_UNSIGNED_16)) ? 2 : 1) #define MIX_UNSIGNED(n) ((n) == MIX_UNSIGNED_16) #define MAX_SELECTOR_INPUT_PIN 256 uint8_t slctrtype[MAX_SELECTOR_INPUT_PIN]; uint8_t class; uint8_t val_default; uint8_t desc[64]; struct uaudio_mixer_node *next; }; struct uaudio_configure_msg { struct usb_proc_msg hdr; struct uaudio_softc *sc; }; #define CHAN_MAX_ALT 24 struct uaudio_chan_alt { union uaudio_asf1d p_asf1d; union uaudio_sed p_sed; const usb_endpoint_descriptor_audio_t *p_ed1; const struct uaudio_format *p_fmt; const struct usb_config *usb_cfg; uint32_t sample_rate; /* in Hz */ uint16_t sample_size; uint8_t iface_index; uint8_t iface_alt_index; uint8_t channels; }; struct uaudio_chan { struct pcmchan_caps pcm_cap; /* capabilities */ struct uaudio_chan_alt usb_alt[CHAN_MAX_ALT]; struct snd_dbuf *pcm_buf; struct mtx *pcm_mtx; /* lock protecting this structure */ struct uaudio_softc *priv_sc; struct pcm_channel *pcm_ch; struct usb_xfer *xfer[UAUDIO_NCHANBUFS + 1]; uint8_t *buf; /* pointer to buffer */ uint8_t *start; /* upper layer buffer start */ uint8_t *end; /* upper layer buffer end */ uint8_t *cur; /* current position in upper layer * buffer */ uint32_t intr_frames; /* in units */ uint32_t frames_per_second; uint32_t sample_rem; uint32_t sample_curr; uint32_t max_buf; int32_t jitter_rem; int32_t jitter_curr; int feedback_rate; uint32_t pcm_format[2]; uint16_t bytes_per_frame[2]; uint32_t intr_counter; uint32_t running; uint32_t num_alt; uint32_t cur_alt; uint32_t set_alt; uint32_t operation; #define CHAN_OP_NONE 0 #define CHAN_OP_START 1 #define CHAN_OP_STOP 2 #define CHAN_OP_DRAIN 3 }; #define UMIDI_EMB_JACK_MAX 16 /* units */ #define UMIDI_TX_FRAMES 256 /* units */ #define UMIDI_TX_BUFFER (UMIDI_TX_FRAMES * 4) /* bytes */ enum { UMIDI_TX_TRANSFER, UMIDI_RX_TRANSFER, UMIDI_N_TRANSFER, }; struct umidi_sub_chan { struct usb_fifo_sc fifo; uint8_t *temp_cmd; uint8_t temp_0[4]; uint8_t temp_1[4]; uint8_t state; #define UMIDI_ST_UNKNOWN 0 /* scan for command */ #define UMIDI_ST_1PARAM 1 #define UMIDI_ST_2PARAM_1 2 #define UMIDI_ST_2PARAM_2 3 #define UMIDI_ST_SYSEX_0 4 #define UMIDI_ST_SYSEX_1 5 #define UMIDI_ST_SYSEX_2 6 uint8_t read_open:1; uint8_t write_open:1; uint8_t unused:6; }; struct umidi_chan { struct umidi_sub_chan sub[UMIDI_EMB_JACK_MAX]; struct mtx mtx; struct usb_xfer *xfer[UMIDI_N_TRANSFER]; uint8_t iface_index; uint8_t iface_alt_index; uint8_t read_open_refcount; uint8_t write_open_refcount; uint8_t curr_cable; uint8_t max_emb_jack; uint8_t valid; uint8_t single_command; }; struct uaudio_search_result { uint8_t bit_input[(256 + 7) / 8]; uint8_t bit_output[(256 + 7) / 8]; uint8_t recurse_level; uint8_t id_max; uint8_t is_input; }; enum { UAUDIO_HID_RX_TRANSFER, UAUDIO_HID_N_TRANSFER, }; struct uaudio_hid { struct usb_xfer *xfer[UAUDIO_HID_N_TRANSFER]; struct hid_location volume_up_loc; struct hid_location volume_down_loc; struct hid_location mute_loc; uint32_t flags; #define UAUDIO_HID_VALID 0x0001 #define UAUDIO_HID_HAS_ID 0x0002 #define UAUDIO_HID_HAS_VOLUME_UP 0x0004 #define UAUDIO_HID_HAS_VOLUME_DOWN 0x0008 #define UAUDIO_HID_HAS_MUTE 0x0010 uint8_t iface_index; uint8_t volume_up_id; uint8_t volume_down_id; uint8_t mute_id; }; struct uaudio_softc { struct sbuf sc_sndstat; struct sndcard_func sc_sndcard_func; struct uaudio_chan sc_rec_chan; struct uaudio_chan sc_play_chan; struct umidi_chan sc_midi_chan; struct uaudio_hid sc_hid; struct uaudio_search_result sc_mixer_clocks; struct uaudio_mixer_node sc_mixer_node; struct uaudio_configure_msg sc_config_msg[2]; struct mtx *sc_mixer_lock; struct snd_mixer *sc_mixer_dev; struct usb_device *sc_udev; struct usb_xfer *sc_mixer_xfer[1]; struct uaudio_mixer_node *sc_mixer_root; struct uaudio_mixer_node *sc_mixer_curr; uint32_t sc_mix_info; uint32_t sc_recsrc_info; uint16_t sc_audio_rev; uint16_t sc_mixer_count; uint8_t sc_sndstat_valid; uint8_t sc_mixer_iface_index; uint8_t sc_mixer_iface_no; uint8_t sc_mixer_chan; uint8_t sc_pcm_registered:1; uint8_t sc_mixer_init:1; uint8_t sc_uq_audio_swap_lr:1; uint8_t sc_uq_au_inp_async:1; uint8_t sc_uq_au_no_xu:1; uint8_t sc_uq_bad_adc:1; uint8_t sc_uq_au_vendor_class:1; uint8_t sc_pcm_bitperfect:1; }; struct uaudio_terminal_node { union { const struct usb_descriptor *desc; const struct usb_audio_input_terminal *it_v1; const struct usb_audio_output_terminal *ot_v1; const struct usb_audio_mixer_unit_0 *mu_v1; const struct usb_audio_selector_unit *su_v1; const struct usb_audio_feature_unit *fu_v1; const struct usb_audio_processing_unit_0 *pu_v1; const struct usb_audio_extension_unit_0 *eu_v1; const struct usb_audio20_clock_source_unit *csrc_v2; const struct usb_audio20_clock_selector_unit_0 *csel_v2; const struct usb_audio20_clock_multiplier_unit *cmul_v2; const struct usb_audio20_input_terminal *it_v2; const struct usb_audio20_output_terminal *ot_v2; const struct usb_audio20_mixer_unit_0 *mu_v2; const struct usb_audio20_selector_unit *su_v2; const struct usb_audio20_feature_unit *fu_v2; const struct usb_audio20_sample_rate_unit *ru_v2; const struct usb_audio20_processing_unit_0 *pu_v2; const struct usb_audio20_extension_unit_0 *eu_v2; const struct usb_audio20_effect_unit *ef_v2; } u; struct uaudio_search_result usr; struct uaudio_terminal_node *root; }; struct uaudio_format { uint16_t wFormat; uint8_t bPrecision; uint32_t freebsd_fmt; const char *description; }; static const struct uaudio_format uaudio10_formats[] = { {UA_FMT_PCM8, 8, AFMT_U8, "8-bit U-LE PCM"}, {UA_FMT_PCM8, 16, AFMT_U16_LE, "16-bit U-LE PCM"}, {UA_FMT_PCM8, 24, AFMT_U24_LE, "24-bit U-LE PCM"}, {UA_FMT_PCM8, 32, AFMT_U32_LE, "32-bit U-LE PCM"}, {UA_FMT_PCM, 8, AFMT_S8, "8-bit S-LE PCM"}, {UA_FMT_PCM, 16, AFMT_S16_LE, "16-bit S-LE PCM"}, {UA_FMT_PCM, 24, AFMT_S24_LE, "24-bit S-LE PCM"}, {UA_FMT_PCM, 32, AFMT_S32_LE, "32-bit S-LE PCM"}, {UA_FMT_ALAW, 8, AFMT_A_LAW, "8-bit A-Law"}, {UA_FMT_MULAW, 8, AFMT_MU_LAW, "8-bit mu-Law"}, {0, 0, 0, NULL} }; static const struct uaudio_format uaudio20_formats[] = { {UA20_FMT_PCM, 8, AFMT_S8, "8-bit S-LE PCM"}, {UA20_FMT_PCM, 16, AFMT_S16_LE, "16-bit S-LE PCM"}, {UA20_FMT_PCM, 24, AFMT_S24_LE, "24-bit S-LE PCM"}, {UA20_FMT_PCM, 32, AFMT_S32_LE, "32-bit S-LE PCM"}, {UA20_FMT_PCM8, 8, AFMT_U8, "8-bit U-LE PCM"}, {UA20_FMT_PCM8, 16, AFMT_U16_LE, "16-bit U-LE PCM"}, {UA20_FMT_PCM8, 24, AFMT_U24_LE, "24-bit U-LE PCM"}, {UA20_FMT_PCM8, 32, AFMT_U32_LE, "32-bit U-LE PCM"}, {UA20_FMT_ALAW, 8, AFMT_A_LAW, "8-bit A-Law"}, {UA20_FMT_MULAW, 8, AFMT_MU_LAW, "8-bit mu-Law"}, {0, 0, 0, NULL} }; #define UAC_OUTPUT 0 #define UAC_INPUT 1 #define UAC_EQUAL 2 #define UAC_RECORD 3 #define UAC_NCLASSES 4 #ifdef USB_DEBUG static const char *uac_names[] = { "outputs", "inputs", "equalization", "record" }; #endif /* prototypes */ static device_probe_t uaudio_probe; static device_attach_t uaudio_attach; static device_detach_t uaudio_detach; static usb_callback_t uaudio_chan_play_callback; static usb_callback_t uaudio_chan_play_sync_callback; static usb_callback_t uaudio_chan_record_callback; static usb_callback_t uaudio_chan_record_sync_callback; static usb_callback_t uaudio_mixer_write_cfg_callback; static usb_callback_t umidi_bulk_read_callback; static usb_callback_t umidi_bulk_write_callback; static usb_callback_t uaudio_hid_rx_callback; static usb_proc_callback_t uaudio_configure_msg; /* ==== USB mixer ==== */ static int uaudio_mixer_sysctl_handler(SYSCTL_HANDLER_ARGS); static void uaudio_mixer_ctl_free(struct uaudio_softc *); static void uaudio_mixer_register_sysctl(struct uaudio_softc *, device_t); static void uaudio_mixer_reload_all(struct uaudio_softc *); static void uaudio_mixer_controls_create_ftu(struct uaudio_softc *); /* ==== USB audio v1.0 ==== */ static void uaudio_mixer_add_mixer(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio_mixer_add_selector(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static uint32_t uaudio_mixer_feature_get_bmaControls( const struct usb_audio_feature_unit *, uint8_t); static void uaudio_mixer_add_feature(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio_mixer_add_processing_updown(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio_mixer_add_processing(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio_mixer_add_extension(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static struct usb_audio_cluster uaudio_mixer_get_cluster(uint8_t, const struct uaudio_terminal_node *); static uint16_t uaudio_mixer_determine_class(const struct uaudio_terminal_node *, struct uaudio_mixer_node *); static uint16_t uaudio_mixer_feature_name(const struct uaudio_terminal_node *, struct uaudio_mixer_node *); static void uaudio_mixer_find_inputs_sub(struct uaudio_terminal_node *, const uint8_t *, uint8_t, struct uaudio_search_result *); static const void *uaudio_mixer_verify_desc(const void *, uint32_t); static usb_error_t uaudio_set_speed(struct usb_device *, uint8_t, uint32_t); static int uaudio_mixer_get(struct usb_device *, uint16_t, uint8_t, struct uaudio_mixer_node *); /* ==== USB audio v2.0 ==== */ static void uaudio20_mixer_add_mixer(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio20_mixer_add_selector(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static void uaudio20_mixer_add_feature(struct uaudio_softc *, const struct uaudio_terminal_node *, int); static struct usb_audio20_cluster uaudio20_mixer_get_cluster(uint8_t, const struct uaudio_terminal_node *); static uint16_t uaudio20_mixer_determine_class(const struct uaudio_terminal_node *, struct uaudio_mixer_node *); static uint16_t uaudio20_mixer_feature_name(const struct uaudio_terminal_node *, struct uaudio_mixer_node *); static void uaudio20_mixer_find_inputs_sub(struct uaudio_terminal_node *, const uint8_t *, uint8_t, struct uaudio_search_result *); static const void *uaudio20_mixer_verify_desc(const void *, uint32_t); static usb_error_t uaudio20_set_speed(struct usb_device *, uint8_t, uint8_t, uint32_t); /* USB audio v1.0 and v2.0 */ static void uaudio_chan_fill_info_sub(struct uaudio_softc *, struct usb_device *, uint32_t, uint8_t, uint8_t); static void uaudio_chan_fill_info(struct uaudio_softc *, struct usb_device *); static void uaudio_mixer_add_ctl_sub(struct uaudio_softc *, struct uaudio_mixer_node *); static void uaudio_mixer_add_ctl(struct uaudio_softc *, struct uaudio_mixer_node *); static void uaudio_mixer_fill_info(struct uaudio_softc *, struct usb_device *, void *); static void uaudio_mixer_ctl_set(struct uaudio_softc *, struct uaudio_mixer_node *, uint8_t, int32_t val); static int uaudio_mixer_signext(uint8_t, int); static int uaudio_mixer_bsd2value(struct uaudio_mixer_node *, int32_t val); static void uaudio_mixer_init(struct uaudio_softc *); static const struct uaudio_terminal_node *uaudio_mixer_get_input( const struct uaudio_terminal_node *, uint8_t); static const struct uaudio_terminal_node *uaudio_mixer_get_output( const struct uaudio_terminal_node *, uint8_t); static void uaudio_mixer_find_outputs_sub(struct uaudio_terminal_node *, uint8_t, uint8_t, struct uaudio_search_result *); static uint8_t umidi_convert_to_usb(struct umidi_sub_chan *, uint8_t, uint8_t); static struct umidi_sub_chan *umidi_sub_by_fifo(struct usb_fifo *); static void umidi_start_read(struct usb_fifo *); static void umidi_stop_read(struct usb_fifo *); static void umidi_start_write(struct usb_fifo *); static void umidi_stop_write(struct usb_fifo *); static int umidi_open(struct usb_fifo *, int); static int umidi_ioctl(struct usb_fifo *, u_long cmd, void *, int); static void umidi_close(struct usb_fifo *, int); static void umidi_init(device_t dev); static int umidi_probe(device_t dev); static int umidi_detach(device_t dev); static int uaudio_hid_probe(struct uaudio_softc *sc, struct usb_attach_arg *uaa); static void uaudio_hid_detach(struct uaudio_softc *sc); #ifdef USB_DEBUG static void uaudio_chan_dump_ep_desc( const usb_endpoint_descriptor_audio_t *); #endif static const struct usb_config uaudio_cfg_record[UAUDIO_NCHANBUFS + 1] = { [0] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UAUDIO_NFRAMES, .flags = {.short_xfer_ok = 1,}, .callback = &uaudio_chan_record_callback, }, [1] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UAUDIO_NFRAMES, .flags = {.short_xfer_ok = 1,}, .callback = &uaudio_chan_record_callback, }, [2] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = 1, .flags = {.no_pipe_ok = 1,.short_xfer_ok = 1,}, .callback = &uaudio_chan_record_sync_callback, }, }; static const struct usb_config uaudio_cfg_play[UAUDIO_NCHANBUFS + 1] = { [0] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UAUDIO_NFRAMES, .flags = {.short_xfer_ok = 1,}, .callback = &uaudio_chan_play_callback, }, [1] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = UAUDIO_NFRAMES, .flags = {.short_xfer_ok = 1,}, .callback = &uaudio_chan_play_callback, }, [2] = { .type = UE_ISOCHRONOUS, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 0, /* use "wMaxPacketSize * frames" */ .frames = 1, .flags = {.no_pipe_ok = 1,.short_xfer_ok = 1,}, .callback = &uaudio_chan_play_sync_callback, }, }; static const struct usb_config uaudio_mixer_config[1] = { [0] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = (sizeof(struct usb_device_request) + 4), .callback = &uaudio_mixer_write_cfg_callback, .timeout = 1000, /* 1 second */ }, }; static const uint8_t umidi_cmd_to_len[16] = { [0x0] = 0, /* reserved */ [0x1] = 0, /* reserved */ [0x2] = 2, /* bytes */ [0x3] = 3, /* bytes */ [0x4] = 3, /* bytes */ [0x5] = 1, /* bytes */ [0x6] = 2, /* bytes */ [0x7] = 3, /* bytes */ [0x8] = 3, /* bytes */ [0x9] = 3, /* bytes */ [0xA] = 3, /* bytes */ [0xB] = 3, /* bytes */ [0xC] = 2, /* bytes */ [0xD] = 2, /* bytes */ [0xE] = 3, /* bytes */ [0xF] = 1, /* bytes */ }; static const struct usb_config umidi_config[UMIDI_N_TRANSFER] = { [UMIDI_TX_TRANSFER] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = UMIDI_TX_BUFFER, .callback = &umidi_bulk_write_callback, }, [UMIDI_RX_TRANSFER] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 4, /* bytes */ .flags = {.short_xfer_ok = 1,.proxy_buffer = 1,}, .callback = &umidi_bulk_read_callback, }, }; static const struct usb_config uaudio_hid_config[UAUDIO_HID_N_TRANSFER] = { [UAUDIO_HID_RX_TRANSFER] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = 0, /* use wMaxPacketSize */ .flags = {.short_xfer_ok = 1,}, .callback = &uaudio_hid_rx_callback, }, }; static devclass_t uaudio_devclass; static device_method_t uaudio_methods[] = { DEVMETHOD(device_probe, uaudio_probe), DEVMETHOD(device_attach, uaudio_attach), DEVMETHOD(device_detach, uaudio_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t uaudio_driver = { .name = "uaudio", .methods = uaudio_methods, .size = sizeof(struct uaudio_softc), }; /* The following table is derived from Linux's quirks-table.h */ static const STRUCT_USB_HOST_ID uaudio_vendor_midi[] = { { USB_VPI(USB_VENDOR_YAMAHA, 0x1000, 0) }, /* UX256 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1001, 0) }, /* MU1000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1002, 0) }, /* MU2000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1003, 0) }, /* MU500 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1004, 3) }, /* UW500 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1005, 0) }, /* MOTIF6 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1006, 0) }, /* MOTIF7 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1007, 0) }, /* MOTIF8 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1008, 0) }, /* UX96 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1009, 0) }, /* UX16 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x100a, 3) }, /* EOS BX */ { USB_VPI(USB_VENDOR_YAMAHA, 0x100c, 0) }, /* UC-MX */ { USB_VPI(USB_VENDOR_YAMAHA, 0x100d, 0) }, /* UC-KX */ { USB_VPI(USB_VENDOR_YAMAHA, 0x100e, 0) }, /* S08 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x100f, 0) }, /* CLP-150 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1010, 0) }, /* CLP-170 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1011, 0) }, /* P-250 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1012, 0) }, /* TYROS */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1013, 0) }, /* PF-500 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1014, 0) }, /* S90 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1015, 0) }, /* MOTIF-R */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1016, 0) }, /* MDP-5 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1017, 0) }, /* CVP-204 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1018, 0) }, /* CVP-206 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1019, 0) }, /* CVP-208 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101a, 0) }, /* CVP-210 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101b, 0) }, /* PSR-1100 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101c, 0) }, /* PSR-2100 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101d, 0) }, /* CLP-175 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101e, 0) }, /* PSR-K1 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x101f, 0) }, /* EZ-J24 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1020, 0) }, /* EZ-250i */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1021, 0) }, /* MOTIF ES 6 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1022, 0) }, /* MOTIF ES 7 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1023, 0) }, /* MOTIF ES 8 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1024, 0) }, /* CVP-301 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1025, 0) }, /* CVP-303 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1026, 0) }, /* CVP-305 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1027, 0) }, /* CVP-307 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1028, 0) }, /* CVP-309 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1029, 0) }, /* CVP-309GP */ { USB_VPI(USB_VENDOR_YAMAHA, 0x102a, 0) }, /* PSR-1500 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x102b, 0) }, /* PSR-3000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x102e, 0) }, /* ELS-01/01C */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1030, 0) }, /* PSR-295/293 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1031, 0) }, /* DGX-205/203 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1032, 0) }, /* DGX-305 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1033, 0) }, /* DGX-505 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1034, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1035, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1036, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1037, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1038, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1039, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103a, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103b, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103c, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103d, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103e, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x103f, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1040, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1041, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1042, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1043, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1044, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1045, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x104e, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x104f, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1050, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1051, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1052, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1053, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1054, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1055, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1056, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1057, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1058, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1059, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x105a, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x105b, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x105c, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x105d, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x1503, 3) }, /* MOX6/MOX8 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x2000, 0) }, /* DGP-7 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x2001, 0) }, /* DGP-5 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x2002, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x2003, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5000, 0) }, /* CS1D */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5001, 0) }, /* DSP1D */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5002, 0) }, /* DME32 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5003, 0) }, /* DM2000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5004, 0) }, /* 02R96 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5005, 0) }, /* ACU16-C */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5006, 0) }, /* NHB32-C */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5007, 0) }, /* DM1000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5008, 0) }, /* 01V96 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x5009, 0) }, /* SPX2000 */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500a, 0) }, /* PM5D */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500b, 0) }, /* DME64N */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500c, 0) }, /* DME24N */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500d, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500e, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x500f, 0) }, /* NULL */ { USB_VPI(USB_VENDOR_YAMAHA, 0x7000, 0) }, /* DTX */ { USB_VPI(USB_VENDOR_YAMAHA, 0x7010, 0) }, /* UB99 */ }; static const STRUCT_USB_HOST_ID __used uaudio_devs[] = { /* Generic USB audio class match */ {USB_IFACE_CLASS(UICLASS_AUDIO), USB_IFACE_SUBCLASS(UISUBCLASS_AUDIOCONTROL),}, /* Generic USB MIDI class match */ {USB_IFACE_CLASS(UICLASS_AUDIO), USB_IFACE_SUBCLASS(UISUBCLASS_MIDISTREAM),}, }; static int uaudio_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); /* lookup non-standard device(s) */ if (usbd_lookup_id_by_uaa(uaudio_vendor_midi, sizeof(uaudio_vendor_midi), uaa) == 0) { return (BUS_PROBE_SPECIFIC); } if (uaa->info.bInterfaceClass != UICLASS_AUDIO) { if (uaa->info.bInterfaceClass != UICLASS_VENDOR || usb_test_quirk(uaa, UQ_AU_VENDOR_CLASS) == 0) return (ENXIO); } /* check for AUDIO control interface */ if (uaa->info.bInterfaceSubClass == UISUBCLASS_AUDIOCONTROL) { if (usb_test_quirk(uaa, UQ_BAD_AUDIO)) return (ENXIO); else return (BUS_PROBE_GENERIC); } /* check for MIDI stream */ if (uaa->info.bInterfaceSubClass == UISUBCLASS_MIDISTREAM) { if (usb_test_quirk(uaa, UQ_BAD_MIDI)) return (ENXIO); else return (BUS_PROBE_GENERIC); } return (ENXIO); } static int uaudio_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct uaudio_softc *sc = device_get_softc(dev); struct usb_interface_descriptor *id; usb_error_t err; device_t child; sc->sc_play_chan.priv_sc = sc; sc->sc_rec_chan.priv_sc = sc; sc->sc_udev = uaa->device; sc->sc_mixer_iface_index = uaa->info.bIfaceIndex; sc->sc_mixer_iface_no = uaa->info.bIfaceNum; sc->sc_config_msg[0].hdr.pm_callback = &uaudio_configure_msg; sc->sc_config_msg[0].sc = sc; sc->sc_config_msg[1].hdr.pm_callback = &uaudio_configure_msg; sc->sc_config_msg[1].sc = sc; if (usb_test_quirk(uaa, UQ_AUDIO_SWAP_LR)) sc->sc_uq_audio_swap_lr = 1; if (usb_test_quirk(uaa, UQ_AU_INP_ASYNC)) sc->sc_uq_au_inp_async = 1; if (usb_test_quirk(uaa, UQ_AU_NO_XU)) sc->sc_uq_au_no_xu = 1; if (usb_test_quirk(uaa, UQ_BAD_ADC)) sc->sc_uq_bad_adc = 1; if (usb_test_quirk(uaa, UQ_AU_VENDOR_CLASS)) sc->sc_uq_au_vendor_class = 1; umidi_init(dev); device_set_usb_desc(dev); id = usbd_get_interface_descriptor(uaa->iface); /* must fill mixer info before channel info */ uaudio_mixer_fill_info(sc, uaa->device, id); /* fill channel info */ uaudio_chan_fill_info(sc, uaa->device); DPRINTF("audio rev %d.%02x\n", sc->sc_audio_rev >> 8, sc->sc_audio_rev & 0xff); if (sc->sc_mixer_count == 0) { if (uaa->info.idVendor == USB_VENDOR_MAUDIO && (uaa->info.idProduct == USB_PRODUCT_MAUDIO_FASTTRACKULTRA || uaa->info.idProduct == USB_PRODUCT_MAUDIO_FASTTRACKULTRA8R)) { DPRINTF("Generating mixer descriptors\n"); uaudio_mixer_controls_create_ftu(sc); } } DPRINTF("%d mixer controls\n", sc->sc_mixer_count); if (sc->sc_play_chan.num_alt > 0) { uint8_t x; /* * Need to set a default alternate interface, else * some USB audio devices might go into an infinte * re-enumeration loop: */ err = usbd_set_alt_interface_index(sc->sc_udev, sc->sc_play_chan.usb_alt[0].iface_index, sc->sc_play_chan.usb_alt[0].iface_alt_index); if (err) { DPRINTF("setting of alternate index failed: %s!\n", usbd_errstr(err)); } for (x = 0; x != sc->sc_play_chan.num_alt; x++) { device_printf(dev, "Play: %d Hz, %d ch, %s format, " "2x8ms buffer.\n", sc->sc_play_chan.usb_alt[x].sample_rate, sc->sc_play_chan.usb_alt[x].channels, sc->sc_play_chan.usb_alt[x].p_fmt->description); } } else { device_printf(dev, "No playback.\n"); } if (sc->sc_rec_chan.num_alt > 0) { uint8_t x; /* * Need to set a default alternate interface, else * some USB audio devices might go into an infinte * re-enumeration loop: */ err = usbd_set_alt_interface_index(sc->sc_udev, sc->sc_rec_chan.usb_alt[0].iface_index, sc->sc_rec_chan.usb_alt[0].iface_alt_index); if (err) { DPRINTF("setting of alternate index failed: %s!\n", usbd_errstr(err)); } for (x = 0; x != sc->sc_rec_chan.num_alt; x++) { device_printf(dev, "Record: %d Hz, %d ch, %s format, " "2x8ms buffer.\n", sc->sc_rec_chan.usb_alt[x].sample_rate, sc->sc_rec_chan.usb_alt[x].channels, sc->sc_rec_chan.usb_alt[x].p_fmt->description); } } else { device_printf(dev, "No recording.\n"); } if (sc->sc_midi_chan.valid == 0) { if (usbd_lookup_id_by_uaa(uaudio_vendor_midi, sizeof(uaudio_vendor_midi), uaa) == 0) { sc->sc_midi_chan.iface_index = (uint8_t)uaa->driver_info; sc->sc_midi_chan.iface_alt_index = 0; sc->sc_midi_chan.valid = 1; } } if (sc->sc_midi_chan.valid) { if (umidi_probe(dev)) { goto detach; } device_printf(dev, "MIDI sequencer.\n"); } else { device_printf(dev, "No MIDI sequencer.\n"); } DPRINTF("doing child attach\n"); /* attach the children */ sc->sc_sndcard_func.func = SCF_PCM; /* * Only attach a PCM device if we have a playback, recording * or mixer device present: */ if (sc->sc_play_chan.num_alt > 0 || sc->sc_rec_chan.num_alt > 0 || sc->sc_mix_info) { child = device_add_child(dev, "pcm", -1); if (child == NULL) { DPRINTF("out of memory\n"); goto detach; } device_set_ivars(child, &sc->sc_sndcard_func); } if (bus_generic_attach(dev)) { DPRINTF("child attach failed\n"); goto detach; } if (uaudio_hid_probe(sc, uaa) == 0) { device_printf(dev, "HID volume keys found.\n"); } else { device_printf(dev, "No HID volume keys found.\n"); } /* reload all mixer settings */ uaudio_mixer_reload_all(sc); return (0); /* success */ detach: uaudio_detach(dev); return (ENXIO); } static void uaudio_pcm_setflags(device_t dev, uint32_t flags) { pcm_setflags(dev, pcm_getflags(dev) | flags); } int uaudio_attach_sub(device_t dev, kobj_class_t mixer_class, kobj_class_t chan_class) { struct uaudio_softc *sc = device_get_softc(device_get_parent(dev)); char status[SND_STATUSLEN]; uaudio_mixer_init(sc); if (sc->sc_uq_audio_swap_lr) { DPRINTF("hardware has swapped left and right\n"); /* uaudio_pcm_setflags(dev, SD_F_PSWAPLR); */ } if (!(sc->sc_mix_info & SOUND_MASK_PCM)) { DPRINTF("emulating master volume\n"); /* * Emulate missing pcm mixer controller * through FEEDER_VOLUME */ uaudio_pcm_setflags(dev, SD_F_SOFTPCMVOL); } if (sc->sc_pcm_bitperfect) { DPRINTF("device needs bitperfect by default\n"); uaudio_pcm_setflags(dev, SD_F_BITPERFECT); } if (mixer_init(dev, mixer_class, sc)) goto detach; sc->sc_mixer_init = 1; mixer_hwvol_init(dev); snprintf(status, sizeof(status), "at ? %s", PCM_KLDSTRING(snd_uaudio)); if (pcm_register(dev, sc, (sc->sc_play_chan.num_alt > 0) ? 1 : 0, (sc->sc_rec_chan.num_alt > 0) ? 1 : 0)) { goto detach; } uaudio_pcm_setflags(dev, SD_F_MPSAFE); sc->sc_pcm_registered = 1; if (sc->sc_play_chan.num_alt > 0) { pcm_addchan(dev, PCMDIR_PLAY, chan_class, sc); } if (sc->sc_rec_chan.num_alt > 0) { pcm_addchan(dev, PCMDIR_REC, chan_class, sc); } pcm_setstatus(dev, status); uaudio_mixer_register_sysctl(sc, dev); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "feedback_rate", CTLFLAG_RD, &sc->sc_play_chan.feedback_rate, 0, "Feedback sample rate in Hz"); return (0); /* success */ detach: uaudio_detach_sub(dev); return (ENXIO); } int uaudio_detach_sub(device_t dev) { struct uaudio_softc *sc = device_get_softc(device_get_parent(dev)); int error = 0; repeat: if (sc->sc_pcm_registered) { error = pcm_unregister(dev); } else { if (sc->sc_mixer_init) { error = mixer_uninit(dev); } } if (error) { device_printf(dev, "Waiting for sound application to exit!\n"); usb_pause_mtx(NULL, 2 * hz); goto repeat; /* try again */ } return (0); /* success */ } static int uaudio_detach(device_t dev) { struct uaudio_softc *sc = device_get_softc(dev); /* * Stop USB transfers early so that any audio applications * will time out and close opened /dev/dspX.Y device(s), if * any. */ usb_proc_explore_lock(sc->sc_udev); sc->sc_play_chan.operation = CHAN_OP_DRAIN; sc->sc_rec_chan.operation = CHAN_OP_DRAIN; usb_proc_explore_mwait(sc->sc_udev, &sc->sc_config_msg[0], &sc->sc_config_msg[1]); usb_proc_explore_unlock(sc->sc_udev); usbd_transfer_unsetup(sc->sc_play_chan.xfer, UAUDIO_NCHANBUFS + 1); usbd_transfer_unsetup(sc->sc_rec_chan.xfer, UAUDIO_NCHANBUFS + 1); uaudio_hid_detach(sc); if (bus_generic_detach(dev) != 0) { DPRINTF("detach failed!\n"); } sbuf_delete(&sc->sc_sndstat); sc->sc_sndstat_valid = 0; umidi_detach(dev); /* free mixer data */ uaudio_mixer_ctl_free(sc); return (0); } static uint32_t uaudio_get_buffer_size(struct uaudio_chan *ch, uint8_t alt) { struct uaudio_chan_alt *chan_alt = &ch->usb_alt[alt]; /* We use 2 times 8ms of buffer */ - uint32_t buf_size = (((chan_alt->sample_rate * (UAUDIO_NFRAMES / 8)) + - 1000 - 1) / 1000) * chan_alt->sample_size; + uint32_t buf_size = chan_alt->sample_size * + howmany(chan_alt->sample_rate * (UAUDIO_NFRAMES / 8), 1000); return (buf_size); } static void uaudio_configure_msg_sub(struct uaudio_softc *sc, struct uaudio_chan *chan, int dir) { struct uaudio_chan_alt *chan_alt; uint32_t frames; uint32_t buf_size; uint16_t fps; uint8_t set_alt; uint8_t fps_shift; uint8_t operation; usb_error_t err; if (chan->num_alt <= 0) return; DPRINTF("\n"); usb_proc_explore_lock(sc->sc_udev); operation = chan->operation; chan->operation = CHAN_OP_NONE; usb_proc_explore_unlock(sc->sc_udev); mtx_lock(chan->pcm_mtx); if (chan->cur_alt != chan->set_alt) set_alt = chan->set_alt; else set_alt = CHAN_MAX_ALT; mtx_unlock(chan->pcm_mtx); if (set_alt >= chan->num_alt) goto done; chan_alt = chan->usb_alt + set_alt; usbd_transfer_unsetup(chan->xfer, UAUDIO_NCHANBUFS + 1); err = usbd_set_alt_interface_index(sc->sc_udev, chan_alt->iface_index, chan_alt->iface_alt_index); if (err) { DPRINTF("setting of alternate index failed: %s!\n", usbd_errstr(err)); goto error; } /* * Only set the sample rate if the channel reports that it * supports the frequency control. */ if (sc->sc_audio_rev >= UAUDIO_VERSION_30) { /* FALLTHROUGH */ } else if (sc->sc_audio_rev >= UAUDIO_VERSION_20) { unsigned int x; for (x = 0; x != 256; x++) { if (dir == PCMDIR_PLAY) { if (!(sc->sc_mixer_clocks.bit_output[x / 8] & (1 << (x % 8)))) { continue; } } else { if (!(sc->sc_mixer_clocks.bit_input[x / 8] & (1 << (x % 8)))) { continue; } } if (uaudio20_set_speed(sc->sc_udev, sc->sc_mixer_iface_no, x, chan_alt->sample_rate)) { /* * If the endpoint is adaptive setting * the speed may fail. */ DPRINTF("setting of sample rate failed! " "(continuing anyway)\n"); } } } else if (chan_alt->p_sed.v1->bmAttributes & UA_SED_FREQ_CONTROL) { if (uaudio_set_speed(sc->sc_udev, chan_alt->p_ed1->bEndpointAddress, chan_alt->sample_rate)) { /* * If the endpoint is adaptive setting the * speed may fail. */ DPRINTF("setting of sample rate failed! " "(continuing anyway)\n"); } } if (usbd_transfer_setup(sc->sc_udev, &chan_alt->iface_index, chan->xfer, chan_alt->usb_cfg, UAUDIO_NCHANBUFS + 1, chan, chan->pcm_mtx)) { DPRINTF("could not allocate USB transfers!\n"); goto error; } fps = usbd_get_isoc_fps(sc->sc_udev); if (fps < 8000) { /* FULL speed USB */ frames = 8; } else { /* HIGH speed USB */ frames = UAUDIO_NFRAMES; } fps_shift = usbd_xfer_get_fps_shift(chan->xfer[0]); /* down shift number of frames per second, if any */ fps >>= fps_shift; frames >>= fps_shift; /* bytes per frame should not be zero */ chan->bytes_per_frame[0] = ((chan_alt->sample_rate / fps) * chan_alt->sample_size); - chan->bytes_per_frame[1] = - (((chan_alt->sample_rate + fps - 1) / fps) * chan_alt->sample_size); + chan->bytes_per_frame[1] = howmany(chan_alt->sample_rate, fps) * + chan_alt->sample_size; /* setup data rate dithering, if any */ chan->frames_per_second = fps; chan->sample_rem = chan_alt->sample_rate % fps; chan->sample_curr = 0; /* compute required buffer size */ buf_size = (chan->bytes_per_frame[1] * frames); if (buf_size > (chan->end - chan->start)) { DPRINTF("buffer size is too big\n"); goto error; } chan->intr_frames = frames; DPRINTF("fps=%d sample_rem=%d\n", (int)fps, (int)chan->sample_rem); if (chan->intr_frames == 0) { DPRINTF("frame shift is too high!\n"); goto error; } mtx_lock(chan->pcm_mtx); chan->cur_alt = set_alt; mtx_unlock(chan->pcm_mtx); done: #if (UAUDIO_NCHANBUFS != 2) #error "please update code" #endif switch (operation) { case CHAN_OP_START: mtx_lock(chan->pcm_mtx); usbd_transfer_start(chan->xfer[0]); usbd_transfer_start(chan->xfer[1]); mtx_unlock(chan->pcm_mtx); break; case CHAN_OP_STOP: mtx_lock(chan->pcm_mtx); usbd_transfer_stop(chan->xfer[0]); usbd_transfer_stop(chan->xfer[1]); mtx_unlock(chan->pcm_mtx); break; default: break; } return; error: usbd_transfer_unsetup(chan->xfer, UAUDIO_NCHANBUFS + 1); mtx_lock(chan->pcm_mtx); chan->cur_alt = CHAN_MAX_ALT; mtx_unlock(chan->pcm_mtx); } static void uaudio_configure_msg(struct usb_proc_msg *pm) { struct uaudio_softc *sc = ((struct uaudio_configure_msg *)pm)->sc; usb_proc_explore_unlock(sc->sc_udev); uaudio_configure_msg_sub(sc, &sc->sc_play_chan, PCMDIR_PLAY); uaudio_configure_msg_sub(sc, &sc->sc_rec_chan, PCMDIR_REC); usb_proc_explore_lock(sc->sc_udev); } /*========================================================================* * AS - Audio Stream - routines *========================================================================*/ #ifdef USB_DEBUG static void uaudio_chan_dump_ep_desc(const usb_endpoint_descriptor_audio_t *ed) { if (ed) { DPRINTF("endpoint=%p bLength=%d bDescriptorType=%d \n" "bEndpointAddress=%d bmAttributes=0x%x \n" "wMaxPacketSize=%d bInterval=%d \n" "bRefresh=%d bSynchAddress=%d\n", ed, ed->bLength, ed->bDescriptorType, ed->bEndpointAddress, ed->bmAttributes, UGETW(ed->wMaxPacketSize), ed->bInterval, UEP_HAS_REFRESH(ed) ? ed->bRefresh : 0, UEP_HAS_SYNCADDR(ed) ? ed->bSynchAddress : 0); } } #endif /* * The following is a workaround for broken no-name USB audio devices * sold by dealextreme called "3D sound". The problem is that the * manufacturer computed wMaxPacketSize is too small to hold the * actual data sent. In other words the device sometimes sends more * data than it actually reports it can send in a single isochronous * packet. */ static void uaudio_record_fix_fs(usb_endpoint_descriptor_audio_t *ep, uint32_t xps, uint32_t add) { uint32_t mps; mps = UGETW(ep->wMaxPacketSize); /* * If the device indicates it can send more data than what the * sample rate indicates, we apply the workaround. */ if (mps > xps) { /* allow additional data */ xps += add; /* check against the maximum USB 1.x length */ if (xps > 1023) xps = 1023; /* check if we should do an update */ if (mps < xps) { /* simply update the wMaxPacketSize field */ USETW(ep->wMaxPacketSize, xps); DPRINTF("Workaround: Updated wMaxPacketSize " "from %d to %d bytes.\n", (int)mps, (int)xps); } } } static usb_error_t uaudio20_check_rate(struct usb_device *udev, uint8_t iface_no, uint8_t clockid, uint32_t rate) { struct usb_device_request req; usb_error_t error; uint8_t data[255]; uint16_t actlen; uint16_t rates; uint16_t x; DPRINTFN(6, "ifaceno=%d clockid=%d rate=%u\n", iface_no, clockid, rate); req.bmRequestType = UT_READ_CLASS_INTERFACE; req.bRequest = UA20_CS_RANGE; USETW2(req.wValue, UA20_CS_SAM_FREQ_CONTROL, 0); USETW2(req.wIndex, clockid, iface_no); USETW(req.wLength, 255); error = usbd_do_request_flags(udev, NULL, &req, data, USB_SHORT_XFER_OK, &actlen, USB_DEFAULT_TIMEOUT); if (error != 0 || actlen < 2) return (USB_ERR_INVAL); rates = data[0] | (data[1] << 8); actlen = (actlen - 2) / 12; if (rates > actlen) { DPRINTF("Too many rates\n"); rates = actlen; } for (x = 0; x != rates; x++) { uint32_t min = UGETDW(data + 2 + (12 * x)); uint32_t max = UGETDW(data + 6 + (12 * x)); uint32_t res = UGETDW(data + 10 + (12 * x)); if (res == 0) { DPRINTF("Zero residue\n"); res = 1; } if (min > max) { DPRINTF("Swapped max and min\n"); uint32_t temp; temp = min; min = max; max = temp; } if (rate >= min && rate <= max && (((rate - min) % res) == 0)) { return (0); } } return (USB_ERR_INVAL); } static void uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev, uint32_t rate, uint8_t channels, uint8_t bit_resolution) { struct usb_descriptor *desc = NULL; union uaudio_asid asid = { NULL }; union uaudio_asf1d asf1d = { NULL }; union uaudio_sed sed = { NULL }; struct usb_midi_streaming_endpoint_descriptor *msid = NULL; usb_endpoint_descriptor_audio_t *ed1 = NULL; const struct usb_audio_control_descriptor *acdp = NULL; struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev); struct usb_interface_descriptor *id; const struct uaudio_format *p_fmt = NULL; struct uaudio_chan *chan; struct uaudio_chan_alt *chan_alt; uint32_t format; uint16_t curidx = 0xFFFF; uint16_t lastidx = 0xFFFF; uint16_t alt_index = 0; uint16_t audio_rev = 0; uint16_t x; uint8_t ep_dir; uint8_t bChannels; uint8_t bBitResolution; uint8_t audio_if = 0; uint8_t midi_if = 0; uint8_t uma_if_class; while ((desc = usb_desc_foreach(cd, desc))) { if ((desc->bDescriptorType == UDESC_INTERFACE) && (desc->bLength >= sizeof(*id))) { id = (void *)desc; if (id->bInterfaceNumber != lastidx) { lastidx = id->bInterfaceNumber; curidx++; alt_index = 0; } else { alt_index++; } if ((!(sc->sc_hid.flags & UAUDIO_HID_VALID)) && (id->bInterfaceClass == UICLASS_HID) && (id->bInterfaceSubClass == 0) && (id->bInterfaceProtocol == 0) && (alt_index == 0) && usbd_get_iface(udev, curidx) != NULL) { DPRINTF("Found HID interface at %d\n", curidx); sc->sc_hid.flags |= UAUDIO_HID_VALID; sc->sc_hid.iface_index = curidx; } uma_if_class = ((id->bInterfaceClass == UICLASS_AUDIO) || ((id->bInterfaceClass == UICLASS_VENDOR) && (sc->sc_uq_au_vendor_class != 0))); if ((uma_if_class != 0) && (id->bInterfaceSubClass == UISUBCLASS_AUDIOSTREAM)) { audio_if = 1; } else { audio_if = 0; } if ((uma_if_class != 0) && (id->bInterfaceSubClass == UISUBCLASS_MIDISTREAM)) { /* * XXX could allow multiple MIDI interfaces */ midi_if = 1; if ((sc->sc_midi_chan.valid == 0) && (usbd_get_iface(udev, curidx) != NULL)) { sc->sc_midi_chan.iface_index = curidx; sc->sc_midi_chan.iface_alt_index = alt_index; sc->sc_midi_chan.valid = 1; } } else { midi_if = 0; } asid.v1 = NULL; asf1d.v1 = NULL; ed1 = NULL; sed.v1 = NULL; /* * There can only be one USB audio instance * per USB device. Grab all USB audio * interfaces on this USB device so that we * don't attach USB audio twice: */ if (alt_index == 0 && curidx != sc->sc_mixer_iface_index && (id->bInterfaceClass == UICLASS_AUDIO || audio_if != 0 || midi_if != 0)) { usbd_set_parent_iface(sc->sc_udev, curidx, sc->sc_mixer_iface_index); } } if (audio_if == 0) { if (midi_if == 0) { if ((acdp == NULL) && (desc->bDescriptorType == UDESC_CS_INTERFACE) && (desc->bDescriptorSubtype == UDESCSUB_AC_HEADER) && (desc->bLength >= sizeof(*acdp))) { acdp = (void *)desc; audio_rev = UGETW(acdp->bcdADC); } } else { msid = (void *)desc; /* get the maximum number of embedded jacks in use, if any */ if (msid->bLength >= sizeof(*msid) && msid->bDescriptorType == UDESC_CS_ENDPOINT && msid->bDescriptorSubtype == MS_GENERAL && msid->bNumEmbMIDIJack > sc->sc_midi_chan.max_emb_jack) { sc->sc_midi_chan.max_emb_jack = msid->bNumEmbMIDIJack; } } /* * Don't collect any USB audio descriptors if * this is not an USB audio stream interface. */ continue; } if ((acdp != NULL) && (desc->bDescriptorType == UDESC_CS_INTERFACE) && (desc->bDescriptorSubtype == AS_GENERAL) && (asid.v1 == NULL)) { if (audio_rev >= UAUDIO_VERSION_30) { /* FALLTHROUGH */ } else if (audio_rev >= UAUDIO_VERSION_20) { if (desc->bLength >= sizeof(*asid.v2)) { asid.v2 = (void *)desc; } } else { if (desc->bLength >= sizeof(*asid.v1)) { asid.v1 = (void *)desc; } } } if ((acdp != NULL) && (desc->bDescriptorType == UDESC_CS_INTERFACE) && (desc->bDescriptorSubtype == FORMAT_TYPE) && (asf1d.v1 == NULL)) { if (audio_rev >= UAUDIO_VERSION_30) { /* FALLTHROUGH */ } else if (audio_rev >= UAUDIO_VERSION_20) { if (desc->bLength >= sizeof(*asf1d.v2)) asf1d.v2 = (void *)desc; } else { if (desc->bLength >= sizeof(*asf1d.v1)) { asf1d.v1 = (void *)desc; if (asf1d.v1->bFormatType != FORMAT_TYPE_I) { DPRINTFN(11, "ignored bFormatType = %d\n", asf1d.v1->bFormatType); asf1d.v1 = NULL; continue; } if (desc->bLength < (sizeof(*asf1d.v1) + ((asf1d.v1->bSamFreqType == 0) ? 6 : (asf1d.v1->bSamFreqType * 3)))) { DPRINTFN(11, "invalid descriptor, " "too short\n"); asf1d.v1 = NULL; continue; } } } } if ((desc->bDescriptorType == UDESC_ENDPOINT) && (desc->bLength >= UEP_MINSIZE) && (ed1 == NULL)) { ed1 = (void *)desc; if (UE_GET_XFERTYPE(ed1->bmAttributes) != UE_ISOCHRONOUS) { ed1 = NULL; continue; } } if ((acdp != NULL) && (desc->bDescriptorType == UDESC_CS_ENDPOINT) && (desc->bDescriptorSubtype == AS_GENERAL) && (sed.v1 == NULL)) { if (audio_rev >= UAUDIO_VERSION_30) { /* FALLTHROUGH */ } else if (audio_rev >= UAUDIO_VERSION_20) { if (desc->bLength >= sizeof(*sed.v2)) sed.v2 = (void *)desc; } else { if (desc->bLength >= sizeof(*sed.v1)) sed.v1 = (void *)desc; } } if (asid.v1 == NULL || asf1d.v1 == NULL || ed1 == NULL || sed.v1 == NULL) { /* need more descriptors */ continue; } ep_dir = UE_GET_DIR(ed1->bEndpointAddress); /* We ignore sync endpoint information until further. */ if (audio_rev >= UAUDIO_VERSION_30) { goto next_ep; } else if (audio_rev >= UAUDIO_VERSION_20) { uint32_t dwFormat; dwFormat = UGETDW(asid.v2->bmFormats); bChannels = asid.v2->bNrChannels; bBitResolution = asf1d.v2->bSubslotSize * 8; if ((bChannels != channels) || (bBitResolution != bit_resolution)) { DPRINTF("Wrong number of channels\n"); goto next_ep; } for (p_fmt = uaudio20_formats; p_fmt->wFormat != 0; p_fmt++) { if ((p_fmt->wFormat & dwFormat) && (p_fmt->bPrecision == bBitResolution)) break; } if (p_fmt->wFormat == 0) { DPRINTF("Unsupported audio format\n"); goto next_ep; } for (x = 0; x != 256; x++) { if (ep_dir == UE_DIR_OUT) { if (!(sc->sc_mixer_clocks.bit_output[x / 8] & (1 << (x % 8)))) { continue; } } else { if (!(sc->sc_mixer_clocks.bit_input[x / 8] & (1 << (x % 8)))) { continue; } } DPRINTF("Checking clock ID=%d\n", x); if (uaudio20_check_rate(udev, sc->sc_mixer_iface_no, x, rate)) { DPRINTF("Unsupported sampling " "rate, id=%d\n", x); goto next_ep; } } } else { uint16_t wFormat; wFormat = UGETW(asid.v1->wFormatTag); bChannels = UAUDIO_MAX_CHAN(asf1d.v1->bNrChannels); bBitResolution = asf1d.v1->bSubFrameSize * 8; if (asf1d.v1->bSamFreqType == 0) { DPRINTFN(16, "Sample rate: %d-%dHz\n", UA_SAMP_LO(asf1d.v1), UA_SAMP_HI(asf1d.v1)); if ((rate >= UA_SAMP_LO(asf1d.v1)) && (rate <= UA_SAMP_HI(asf1d.v1))) goto found_rate; } else { for (x = 0; x < asf1d.v1->bSamFreqType; x++) { DPRINTFN(16, "Sample rate = %dHz\n", UA_GETSAMP(asf1d.v1, x)); if (rate == UA_GETSAMP(asf1d.v1, x)) goto found_rate; } } goto next_ep; found_rate: for (p_fmt = uaudio10_formats; p_fmt->wFormat != 0; p_fmt++) { if ((p_fmt->wFormat == wFormat) && (p_fmt->bPrecision == bBitResolution)) break; } if (p_fmt->wFormat == 0) { DPRINTF("Unsupported audio format\n"); goto next_ep; } if ((bChannels != channels) || (bBitResolution != bit_resolution)) { DPRINTF("Wrong number of channels\n"); goto next_ep; } } chan = (ep_dir == UE_DIR_IN) ? &sc->sc_rec_chan : &sc->sc_play_chan; if (usbd_get_iface(udev, curidx) == NULL) { DPRINTF("Interface is not valid\n"); goto next_ep; } if (chan->num_alt == CHAN_MAX_ALT) { DPRINTF("Too many alternate settings\n"); goto next_ep; } chan->set_alt = 0; chan->cur_alt = CHAN_MAX_ALT; chan_alt = &chan->usb_alt[chan->num_alt++]; #ifdef USB_DEBUG uaudio_chan_dump_ep_desc(ed1); #endif DPRINTF("Sample rate = %dHz, channels = %d, " "bits = %d, format = %s\n", rate, channels, bit_resolution, p_fmt->description); chan_alt->sample_rate = rate; chan_alt->p_asf1d = asf1d; chan_alt->p_ed1 = ed1; chan_alt->p_fmt = p_fmt; chan_alt->p_sed = sed; chan_alt->iface_index = curidx; chan_alt->iface_alt_index = alt_index; if (ep_dir == UE_DIR_IN) chan_alt->usb_cfg = uaudio_cfg_record; else chan_alt->usb_cfg = uaudio_cfg_play; chan_alt->sample_size = (UAUDIO_MAX_CHAN(channels) * p_fmt->bPrecision) / 8; chan_alt->channels = channels; if (ep_dir == UE_DIR_IN && usbd_get_speed(udev) == USB_SPEED_FULL) { uaudio_record_fix_fs(ed1, chan_alt->sample_size * (rate / 1000), chan_alt->sample_size * (rate / 4000)); } /* setup play/record format */ format = chan_alt->p_fmt->freebsd_fmt; /* get default SND_FORMAT() */ format = SND_FORMAT(format, chan_alt->channels, 0); switch (chan_alt->channels) { uint32_t temp_fmt; case 1: case 2: /* mono and stereo */ break; default: /* surround and more */ temp_fmt = feeder_matrix_default_format(format); /* if multichannel, then format can be zero */ if (temp_fmt != 0) format = temp_fmt; break; } /* check if format is not supported */ if (format == 0) { DPRINTF("The selected audio format is not supported\n"); chan->num_alt--; goto next_ep; } if (chan->num_alt > 1) { /* we only accumulate one format at different sample rates */ if (chan->pcm_format[0] != format) { DPRINTF("Multiple formats is not supported\n"); chan->num_alt--; goto next_ep; } /* ignore if duplicate sample rate entry */ if (rate == chan->usb_alt[chan->num_alt - 2].sample_rate) { DPRINTF("Duplicate sample rate detected\n"); chan->num_alt--; goto next_ep; } } chan->pcm_cap.fmtlist = chan->pcm_format; chan->pcm_cap.fmtlist[0] = format; /* check if device needs bitperfect */ if (chan_alt->channels > UAUDIO_MATRIX_MAX) sc->sc_pcm_bitperfect = 1; if (rate < chan->pcm_cap.minspeed || chan->pcm_cap.minspeed == 0) chan->pcm_cap.minspeed = rate; if (rate > chan->pcm_cap.maxspeed || chan->pcm_cap.maxspeed == 0) chan->pcm_cap.maxspeed = rate; if (sc->sc_sndstat_valid != 0) { sbuf_printf(&sc->sc_sndstat, "\n\t" "mode %d.%d:(%s) %dch, %dbit, %s, %dHz", curidx, alt_index, (ep_dir == UE_DIR_IN) ? "input" : "output", channels, p_fmt->bPrecision, p_fmt->description, rate); } next_ep: sed.v1 = NULL; ed1 = NULL; } } /* This structure defines all the supported rates. */ static const uint32_t uaudio_rate_list[CHAN_MAX_ALT] = { 384000, 352800, 192000, 176400, 96000, 88200, 88000, 80000, 72000, 64000, 56000, 48000, 44100, 40000, 32000, 24000, 22050, 16000, 11025, 8000, 0 }; static void uaudio_chan_fill_info(struct uaudio_softc *sc, struct usb_device *udev) { uint32_t rate = uaudio_default_rate; uint8_t z; uint8_t bits = uaudio_default_bits; uint8_t y; uint8_t channels = uaudio_default_channels; uint8_t x; bits -= (bits % 8); if ((bits == 0) || (bits > 32)) { /* set a valid value */ bits = 32; } if (channels == 0) { switch (usbd_get_speed(udev)) { case USB_SPEED_LOW: case USB_SPEED_FULL: /* * Due to high bandwidth usage and problems * with HIGH-speed split transactions we * disable surround setups on FULL-speed USB * by default */ channels = 4; break; default: channels = UAUDIO_CHANNELS_MAX; break; } } else if (channels > UAUDIO_CHANNELS_MAX) channels = UAUDIO_CHANNELS_MAX; if (sbuf_new(&sc->sc_sndstat, NULL, 4096, SBUF_AUTOEXTEND)) sc->sc_sndstat_valid = 1; /* try to search for a valid config */ for (x = channels; x; x--) { for (y = bits; y; y -= 8) { /* try user defined rate, if any */ if (rate != 0) uaudio_chan_fill_info_sub(sc, udev, rate, x, y); /* try find a matching rate, if any */ for (z = 0; uaudio_rate_list[z]; z++) uaudio_chan_fill_info_sub(sc, udev, uaudio_rate_list[z], x, y); } } if (sc->sc_sndstat_valid) sbuf_finish(&sc->sc_sndstat); } static void uaudio_chan_play_sync_callback(struct usb_xfer *xfer, usb_error_t error) { struct uaudio_chan *ch = usbd_xfer_softc(xfer); struct usb_page_cache *pc; uint64_t sample_rate; uint8_t buf[4]; uint64_t temp; int len; int actlen; int nframes; usbd_xfer_status(xfer, &actlen, NULL, NULL, &nframes); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(6, "transferred %d bytes\n", actlen); if (nframes == 0) break; len = usbd_xfer_frame_len(xfer, 0); if (len == 0) break; if (len > sizeof(buf)) len = sizeof(buf); memset(buf, 0, sizeof(buf)); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, buf, len); temp = UGETDW(buf); DPRINTF("Value = 0x%08x\n", (int)temp); /* auto-detect SYNC format */ if (len == 4) temp &= 0x0fffffff; /* check for no data */ if (temp == 0) break; temp *= 125ULL; sample_rate = ch->usb_alt[ch->cur_alt].sample_rate; /* auto adjust */ while (temp < (sample_rate - (sample_rate / 4))) temp *= 2; while (temp > (sample_rate + (sample_rate / 2))) temp /= 2; DPRINTF("Comparing %d Hz :: %d Hz\n", (int)temp, (int)sample_rate); /* * Use feedback value as fallback when there is no * recording channel: */ if (ch->priv_sc->sc_rec_chan.num_alt == 0) ch->jitter_curr = temp - sample_rate; ch->feedback_rate = temp; break; case USB_ST_SETUP: usbd_xfer_set_frames(xfer, 1); usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_framelen(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ break; } } static int uaudio_chan_is_async(struct uaudio_chan *ch, uint8_t alt) { uint8_t attr = ch->usb_alt[alt].p_ed1->bmAttributes; return (UE_GET_ISO_TYPE(attr) == UE_ISO_ASYNC); } static void uaudio_chan_play_callback(struct usb_xfer *xfer, usb_error_t error) { struct uaudio_chan *ch = usbd_xfer_softc(xfer); struct uaudio_chan *ch_rec; struct usb_page_cache *pc; uint32_t mfl; uint32_t total; uint32_t blockcount; uint32_t n; uint32_t offset; int sample_size; int actlen; int sumlen; if (ch->running == 0 || ch->start == ch->end) { DPRINTF("not running or no buffer!\n"); return; } /* check if there is a record channel */ if (ch->priv_sc->sc_rec_chan.num_alt > 0) ch_rec = &ch->priv_sc->sc_rec_chan; else ch_rec = NULL; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_SETUP: tr_setup: if (ch_rec != NULL) { /* reset receive jitter counters */ mtx_lock(ch_rec->pcm_mtx); ch_rec->jitter_curr = 0; ch_rec->jitter_rem = 0; mtx_unlock(ch_rec->pcm_mtx); } /* reset transmit jitter counters */ ch->jitter_curr = 0; ch->jitter_rem = 0; /* FALLTHROUGH */ case USB_ST_TRANSFERRED: if (actlen < sumlen) { DPRINTF("short transfer, " "%d of %d bytes\n", actlen, sumlen); } chn_intr(ch->pcm_ch); /* * Check for asynchronous playback endpoint and that * the playback endpoint is properly configured: */ if (ch_rec != NULL && uaudio_chan_is_async(ch, ch->cur_alt) != 0) { mtx_lock(ch_rec->pcm_mtx); if (ch_rec->cur_alt < ch_rec->num_alt) { int64_t tx_jitter; int64_t rx_rate; /* translate receive jitter into transmit jitter */ tx_jitter = ch->usb_alt[ch->cur_alt].sample_rate; tx_jitter = (tx_jitter * ch_rec->jitter_curr) + ch->jitter_rem; /* reset receive jitter counters */ ch_rec->jitter_curr = 0; ch_rec->jitter_rem = 0; /* compute exact number of transmit jitter samples */ rx_rate = ch_rec->usb_alt[ch_rec->cur_alt].sample_rate; ch->jitter_curr += tx_jitter / rx_rate; ch->jitter_rem = tx_jitter % rx_rate; } mtx_unlock(ch_rec->pcm_mtx); } /* start the SYNC transfer one time per second, if any */ if (++(ch->intr_counter) >= UAUDIO_IRQS) { ch->intr_counter = 0; usbd_transfer_start(ch->xfer[UAUDIO_NCHANBUFS]); } mfl = usbd_xfer_max_framelen(xfer); if (ch->bytes_per_frame[1] > mfl) { DPRINTF("bytes per transfer, %d, " "exceeds maximum, %d!\n", ch->bytes_per_frame[1], mfl); break; } blockcount = ch->intr_frames; /* setup number of frames */ usbd_xfer_set_frames(xfer, blockcount); /* get sample size */ sample_size = ch->usb_alt[ch->cur_alt].sample_size; /* reset total length */ total = 0; /* setup frame lengths */ for (n = 0; n != blockcount; n++) { uint32_t frame_len; ch->sample_curr += ch->sample_rem; if (ch->sample_curr >= ch->frames_per_second) { ch->sample_curr -= ch->frames_per_second; frame_len = ch->bytes_per_frame[1]; } else { frame_len = ch->bytes_per_frame[0]; } /* handle free running clock case */ if (ch->jitter_curr > 0 && (frame_len + sample_size) <= mfl) { DPRINTFN(6, "sending one sample more\n"); ch->jitter_curr--; frame_len += sample_size; } else if (ch->jitter_curr < 0 && frame_len >= sample_size) { DPRINTFN(6, "sending one sample less\n"); ch->jitter_curr++; frame_len -= sample_size; } usbd_xfer_set_frame_len(xfer, n, frame_len); total += frame_len; } DPRINTFN(6, "transferring %d bytes\n", total); offset = 0; pc = usbd_xfer_get_frame(xfer, 0); while (total > 0) { n = (ch->end - ch->cur); if (n > total) n = total; usbd_copy_in(pc, offset, ch->cur, n); total -= n; ch->cur += n; offset += n; if (ch->cur >= ch->end) ch->cur = ch->start; } usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) goto tr_setup; break; } } static void uaudio_chan_record_sync_callback(struct usb_xfer *xfer, usb_error_t error) { /* TODO */ } static void uaudio_chan_record_callback(struct usb_xfer *xfer, usb_error_t error) { struct uaudio_chan *ch = usbd_xfer_softc(xfer); struct usb_page_cache *pc; uint32_t offset0; uint32_t mfl; int m; int n; int len; int actlen; int nframes; int expected_bytes; int sample_size; if (ch->start == ch->end) { DPRINTF("no buffer!\n"); return; } usbd_xfer_status(xfer, &actlen, NULL, NULL, &nframes); mfl = usbd_xfer_max_framelen(xfer); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: offset0 = 0; pc = usbd_xfer_get_frame(xfer, 0); /* try to compute the number of expected bytes */ ch->sample_curr += (ch->sample_rem * ch->intr_frames); /* compute number of expected bytes */ expected_bytes = (ch->intr_frames * ch->bytes_per_frame[0]) + ((ch->sample_curr / ch->frames_per_second) * (ch->bytes_per_frame[1] - ch->bytes_per_frame[0])); /* keep remainder */ ch->sample_curr %= ch->frames_per_second; /* get current sample size */ sample_size = ch->usb_alt[ch->cur_alt].sample_size; for (n = 0; n != nframes; n++) { uint32_t offset1 = offset0; len = usbd_xfer_frame_len(xfer, n); /* make sure we only receive complete samples */ len = len - (len % sample_size); /* subtract bytes received from expected payload */ expected_bytes -= len; /* don't receive data when not ready */ if (ch->running == 0 || ch->cur_alt != ch->set_alt) continue; /* fill ring buffer with samples, if any */ while (len > 0) { m = (ch->end - ch->cur); if (m > len) m = len; usbd_copy_out(pc, offset1, ch->cur, m); len -= m; offset1 += m; ch->cur += m; if (ch->cur >= ch->end) ch->cur = ch->start; } offset0 += mfl; } /* update current jitter */ ch->jitter_curr -= (expected_bytes / sample_size); /* don't allow a huge amount of jitter to accumulate */ nframes = 2 * ch->intr_frames; /* range check current jitter */ if (ch->jitter_curr < -nframes) ch->jitter_curr = -nframes; else if (ch->jitter_curr > nframes) ch->jitter_curr = nframes; DPRINTFN(6, "transferred %d bytes, jitter %d samples\n", actlen, ch->jitter_curr); if (ch->running != 0) chn_intr(ch->pcm_ch); case USB_ST_SETUP: tr_setup: nframes = ch->intr_frames; usbd_xfer_set_frames(xfer, nframes); for (n = 0; n != nframes; n++) usbd_xfer_set_frame_len(xfer, n, mfl); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) goto tr_setup; break; } } void * uaudio_chan_init(struct uaudio_softc *sc, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct uaudio_chan *ch = ((dir == PCMDIR_PLAY) ? &sc->sc_play_chan : &sc->sc_rec_chan); uint32_t buf_size; uint8_t x; /* store mutex and PCM channel */ ch->pcm_ch = c; ch->pcm_mtx = c->lock; /* compute worst case buffer */ buf_size = 0; for (x = 0; x != ch->num_alt; x++) { uint32_t temp = uaudio_get_buffer_size(ch, x); if (temp > buf_size) buf_size = temp; } /* allow double buffering */ buf_size *= 2; DPRINTF("Worst case buffer is %d bytes\n", (int)buf_size); ch->buf = malloc(buf_size, M_DEVBUF, M_WAITOK | M_ZERO); if (ch->buf == NULL) goto error; if (sndbuf_setup(b, ch->buf, buf_size) != 0) goto error; ch->start = ch->buf; ch->end = ch->buf + buf_size; ch->cur = ch->buf; ch->pcm_buf = b; ch->max_buf = buf_size; if (ch->pcm_mtx == NULL) { DPRINTF("ERROR: PCM channels does not have a mutex!\n"); goto error; } return (ch); error: uaudio_chan_free(ch); return (NULL); } int uaudio_chan_free(struct uaudio_chan *ch) { if (ch->buf != NULL) { free(ch->buf, M_DEVBUF); ch->buf = NULL; } usbd_transfer_unsetup(ch->xfer, UAUDIO_NCHANBUFS + 1); ch->num_alt = 0; return (0); } int uaudio_chan_set_param_blocksize(struct uaudio_chan *ch, uint32_t blocksize) { uint32_t temp = 2 * uaudio_get_buffer_size(ch, ch->set_alt); sndbuf_setup(ch->pcm_buf, ch->buf, temp); return (temp / 2); } int uaudio_chan_set_param_fragments(struct uaudio_chan *ch, uint32_t blocksize, uint32_t blockcount) { return (1); } int uaudio_chan_set_param_speed(struct uaudio_chan *ch, uint32_t speed) { struct uaudio_softc *sc; uint8_t x; sc = ch->priv_sc; for (x = 0; x < ch->num_alt; x++) { if (ch->usb_alt[x].sample_rate < speed) { /* sample rate is too low */ break; } } if (x != 0) x--; usb_proc_explore_lock(sc->sc_udev); ch->set_alt = x; usb_proc_explore_unlock(sc->sc_udev); DPRINTF("Selecting alt %d\n", (int)x); return (ch->usb_alt[x].sample_rate); } int uaudio_chan_getptr(struct uaudio_chan *ch) { return (ch->cur - ch->start); } struct pcmchan_caps * uaudio_chan_getcaps(struct uaudio_chan *ch) { return (&ch->pcm_cap); } static struct pcmchan_matrix uaudio_chan_matrix_swap_2_0 = { .id = SND_CHN_MATRIX_DRV, .channels = 2, .ext = 0, .map = { /* Right */ [0] = { .type = SND_CHN_T_FR, .members = SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR }, /* Left */ [1] = { .type = SND_CHN_T_FL, .members = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL }, [2] = { .type = SND_CHN_T_MAX, .members = 0 } }, .mask = SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FL, .offset = { 1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 } }; struct pcmchan_matrix * uaudio_chan_getmatrix(struct uaudio_chan *ch, uint32_t format) { struct uaudio_softc *sc; sc = ch->priv_sc; if (sc != NULL && sc->sc_uq_audio_swap_lr != 0 && AFMT_CHANNEL(format) == 2) return (&uaudio_chan_matrix_swap_2_0); return (feeder_matrix_format_map(format)); } int uaudio_chan_set_param_format(struct uaudio_chan *ch, uint32_t format) { DPRINTF("Selecting format 0x%08x\n", (unsigned int)format); return (0); } static void uaudio_chan_start_sub(struct uaudio_chan *ch) { struct uaudio_softc *sc = ch->priv_sc; int do_start = 0; if (ch->operation != CHAN_OP_DRAIN) { if (ch->cur_alt == ch->set_alt && ch->operation == CHAN_OP_NONE && mtx_owned(ch->pcm_mtx) != 0) { /* save doing the explore task */ do_start = 1; } else { ch->operation = CHAN_OP_START; (void)usb_proc_explore_msignal(sc->sc_udev, &sc->sc_config_msg[0], &sc->sc_config_msg[1]); } } if (do_start) { usbd_transfer_start(ch->xfer[0]); usbd_transfer_start(ch->xfer[1]); } } static int uaudio_chan_need_both(struct uaudio_softc *sc) { return (sc->sc_play_chan.num_alt > 0 && sc->sc_play_chan.running != 0 && uaudio_chan_is_async(&sc->sc_play_chan, sc->sc_play_chan.set_alt) != 0 && sc->sc_rec_chan.num_alt > 0 && sc->sc_rec_chan.running == 0); } static int uaudio_chan_need_none(struct uaudio_softc *sc) { return (sc->sc_play_chan.num_alt > 0 && sc->sc_play_chan.running == 0 && sc->sc_rec_chan.num_alt > 0 && sc->sc_rec_chan.running == 0); } void uaudio_chan_start(struct uaudio_chan *ch) { struct uaudio_softc *sc = ch->priv_sc; /* make operation atomic */ usb_proc_explore_lock(sc->sc_udev); /* check if not running */ if (ch->running == 0) { uint32_t temp; /* get current buffer size */ temp = 2 * uaudio_get_buffer_size(ch, ch->set_alt); /* set running flag */ ch->running = 1; /* ensure the hardware buffer is reset */ ch->start = ch->buf; ch->end = ch->buf + temp; ch->cur = ch->buf; if (uaudio_chan_need_both(sc)) { /* * Start both endpoints because of need for * jitter information: */ uaudio_chan_start_sub(&sc->sc_rec_chan); uaudio_chan_start_sub(&sc->sc_play_chan); } else { uaudio_chan_start_sub(ch); } } /* exit atomic operation */ usb_proc_explore_unlock(sc->sc_udev); } static void uaudio_chan_stop_sub(struct uaudio_chan *ch) { struct uaudio_softc *sc = ch->priv_sc; int do_stop = 0; if (ch->operation != CHAN_OP_DRAIN) { if (ch->cur_alt == ch->set_alt && ch->operation == CHAN_OP_NONE && mtx_owned(ch->pcm_mtx) != 0) { /* save doing the explore task */ do_stop = 1; } else { ch->operation = CHAN_OP_STOP; (void)usb_proc_explore_msignal(sc->sc_udev, &sc->sc_config_msg[0], &sc->sc_config_msg[1]); } } if (do_stop) { usbd_transfer_stop(ch->xfer[0]); usbd_transfer_stop(ch->xfer[1]); } } void uaudio_chan_stop(struct uaudio_chan *ch) { struct uaudio_softc *sc = ch->priv_sc; /* make operation atomic */ usb_proc_explore_lock(sc->sc_udev); /* check if running */ if (ch->running != 0) { /* clear running flag */ ch->running = 0; if (uaudio_chan_need_both(sc)) { /* * Leave the endpoints running because we need * information about jitter! */ } else if (uaudio_chan_need_none(sc)) { /* * Stop both endpoints in case the one was used for * jitter information: */ uaudio_chan_stop_sub(&sc->sc_rec_chan); uaudio_chan_stop_sub(&sc->sc_play_chan); } else { uaudio_chan_stop_sub(ch); } } /* exit atomic operation */ usb_proc_explore_unlock(sc->sc_udev); } /*========================================================================* * AC - Audio Controller - routines *========================================================================*/ static int uaudio_mixer_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct uaudio_softc *sc; struct uaudio_mixer_node *pmc; int hint; int error; int temp = 0; int chan = 0; sc = (struct uaudio_softc *)oidp->oid_arg1; hint = oidp->oid_arg2; if (sc->sc_mixer_lock == NULL) return (ENXIO); /* lookup mixer node */ mtx_lock(sc->sc_mixer_lock); for (pmc = sc->sc_mixer_root; pmc != NULL; pmc = pmc->next) { for (chan = 0; chan != (int)pmc->nchan; chan++) { if (pmc->wValue[chan] != -1 && pmc->wValue[chan] == hint) { temp = pmc->wData[chan]; goto found; } } } found: mtx_unlock(sc->sc_mixer_lock); error = sysctl_handle_int(oidp, &temp, 0, req); if (error != 0 || req->newptr == NULL) return (error); /* update mixer value */ mtx_lock(sc->sc_mixer_lock); if (pmc != NULL && temp >= pmc->minval && temp <= pmc->maxval) { pmc->wData[chan] = temp; pmc->update[(chan / 8)] |= (1 << (chan % 8)); /* start the transfer, if not already started */ usbd_transfer_start(sc->sc_mixer_xfer[0]); } mtx_unlock(sc->sc_mixer_lock); return (0); } static void uaudio_mixer_ctl_free(struct uaudio_softc *sc) { struct uaudio_mixer_node *p_mc; while ((p_mc = sc->sc_mixer_root) != NULL) { sc->sc_mixer_root = p_mc->next; free(p_mc, M_USBDEV); } } static void uaudio_mixer_register_sysctl(struct uaudio_softc *sc, device_t dev) { struct uaudio_mixer_node *pmc; struct sysctl_oid *mixer_tree; struct sysctl_oid *control_tree; char buf[32]; int chan; int n; mixer_tree = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "mixer", CTLFLAG_RD, NULL, ""); if (mixer_tree == NULL) return; for (n = 0, pmc = sc->sc_mixer_root; pmc != NULL; pmc = pmc->next, n++) { for (chan = 0; chan < pmc->nchan; chan++) { if (pmc->nchan > 1) { snprintf(buf, sizeof(buf), "%s_%d_%d", pmc->name, n, chan); } else { snprintf(buf, sizeof(buf), "%s_%d", pmc->name, n); } control_tree = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(mixer_tree), OID_AUTO, buf, CTLFLAG_RD, NULL, "Mixer control nodes"); if (control_tree == NULL) continue; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(control_tree), OID_AUTO, "val", CTLTYPE_INT | CTLFLAG_RWTUN, sc, pmc->wValue[chan], uaudio_mixer_sysctl_handler, "I", "Current value"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(control_tree), OID_AUTO, "min", CTLFLAG_RD, 0, pmc->minval, "Minimum value"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(control_tree), OID_AUTO, "max", CTLFLAG_RD, 0, pmc->maxval, "Maximum value"); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(control_tree), OID_AUTO, "desc", CTLFLAG_RD, pmc->desc, 0, "Description"); } } } /* M-Audio FastTrack Ultra Mixer Description */ /* Origin: Linux USB Audio driver */ static void uaudio_mixer_controls_create_ftu(struct uaudio_softc *sc) { int chx; int chy; memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(6, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(8, 0); MIX(sc).class = UAC_OUTPUT; MIX(sc).type = MIX_UNSIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect"; MIX(sc).minval = 0; MIX(sc).maxval = 7; MIX(sc).mul = 7; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; strlcpy(MIX(sc).desc, "Room1,2,3,Hall1,2,Plate,Delay,Echo", sizeof(MIX(sc).desc)); uaudio_mixer_add_ctl_sub(sc, &MIX(sc)); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(5, sc->sc_mixer_iface_no); for (chx = 0; chx != 8; chx++) { for (chy = 0; chy != 8; chy++) { MIX(sc).wValue[0] = MAKE_WORD(chx + 1, chy + 1); MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "mix_rec"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; MIX(sc).val_default = 0; snprintf(MIX(sc).desc, sizeof(MIX(sc).desc), "AIn%d - Out%d Record Volume", chy + 1, chx + 1); uaudio_mixer_add_ctl(sc, &MIX(sc)); MIX(sc).wValue[0] = MAKE_WORD(chx + 1, chy + 1 + 8); MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "mix_play"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; MIX(sc).val_default = (chx == chy) ? 2 : 0; snprintf(MIX(sc).desc, sizeof(MIX(sc).desc), "DIn%d - Out%d Playback Volume", chy + 1, chx + 1); uaudio_mixer_add_ctl(sc, &MIX(sc)); } } memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(6, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(2, 0); MIX(sc).class = UAC_OUTPUT; MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_vol"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; MIX(sc).minval = 0; MIX(sc).maxval = 0x7f; MIX(sc).mul = 0x7f; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; strlcpy(MIX(sc).desc, "Effect Volume", sizeof(MIX(sc).desc)); uaudio_mixer_add_ctl_sub(sc, &MIX(sc)); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(6, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(3, 0); MIX(sc).class = UAC_OUTPUT; MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_dur"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; MIX(sc).minval = 0; MIX(sc).maxval = 0x7f00; MIX(sc).mul = 0x7f00; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; strlcpy(MIX(sc).desc, "Effect Duration", sizeof(MIX(sc).desc)); uaudio_mixer_add_ctl_sub(sc, &MIX(sc)); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(6, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(4, 0); MIX(sc).class = UAC_OUTPUT; MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_fb"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; MIX(sc).minval = 0; MIX(sc).maxval = 0x7f; MIX(sc).mul = 0x7f; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; strlcpy(MIX(sc).desc, "Effect Feedback Volume", sizeof(MIX(sc).desc)); uaudio_mixer_add_ctl_sub(sc, &MIX(sc)); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(7, sc->sc_mixer_iface_no); for (chy = 0; chy != 4; chy++) { MIX(sc).wValue[0] = MAKE_WORD(7, chy + 1); MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_ret"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; snprintf(MIX(sc).desc, sizeof(MIX(sc).desc), "Effect Return %d Volume", chy + 1); uaudio_mixer_add_ctl(sc, &MIX(sc)); } memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(5, sc->sc_mixer_iface_no); for (chy = 0; chy != 8; chy++) { MIX(sc).wValue[0] = MAKE_WORD(9, chy + 1); MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_send"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; snprintf(MIX(sc).desc, sizeof(MIX(sc).desc), "Effect Send AIn%d Volume", chy + 1); uaudio_mixer_add_ctl(sc, &MIX(sc)); MIX(sc).wValue[0] = MAKE_WORD(9, chy + 1 + 8); MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "effect_send"; MIX(sc).nchan = 1; MIX(sc).update[0] = 1; snprintf(MIX(sc).desc, sizeof(MIX(sc).desc), "Effect Send DIn%d Volume", chy + 1); uaudio_mixer_add_ctl(sc, &MIX(sc)); } } static void uaudio_mixer_reload_all(struct uaudio_softc *sc) { struct uaudio_mixer_node *pmc; int chan; if (sc->sc_mixer_lock == NULL) return; mtx_lock(sc->sc_mixer_lock); for (pmc = sc->sc_mixer_root; pmc != NULL; pmc = pmc->next) { /* use reset defaults for non-oss controlled settings */ if (pmc->ctl == SOUND_MIXER_NRDEVICES) continue; for (chan = 0; chan < pmc->nchan; chan++) pmc->update[chan / 8] |= (1 << (chan % 8)); } usbd_transfer_start(sc->sc_mixer_xfer[0]); /* start HID volume keys, if any */ usbd_transfer_start(sc->sc_hid.xfer[0]); mtx_unlock(sc->sc_mixer_lock); } static void uaudio_mixer_add_ctl_sub(struct uaudio_softc *sc, struct uaudio_mixer_node *mc) { struct uaudio_mixer_node *p_mc_new = malloc(sizeof(*p_mc_new), M_USBDEV, M_WAITOK); int ch; if (p_mc_new != NULL) { memcpy(p_mc_new, mc, sizeof(*p_mc_new)); p_mc_new->next = sc->sc_mixer_root; sc->sc_mixer_root = p_mc_new; sc->sc_mixer_count++; /* set default value for all channels */ for (ch = 0; ch < p_mc_new->nchan; ch++) { switch (p_mc_new->val_default) { case 1: /* 50% */ p_mc_new->wData[ch] = (p_mc_new->maxval + p_mc_new->minval) / 2; break; case 2: /* 100% */ p_mc_new->wData[ch] = p_mc_new->maxval; break; default: /* 0% */ p_mc_new->wData[ch] = p_mc_new->minval; break; } } } else { DPRINTF("out of memory\n"); } } static void uaudio_mixer_add_ctl(struct uaudio_softc *sc, struct uaudio_mixer_node *mc) { int32_t res; if (mc->class < UAC_NCLASSES) { DPRINTF("adding %s.%d\n", uac_names[mc->class], mc->ctl); } else { DPRINTF("adding %d\n", mc->ctl); } if (mc->type == MIX_ON_OFF) { mc->minval = 0; mc->maxval = 1; } else if (mc->type == MIX_SELECTOR) { } else { /* determine min and max values */ mc->minval = uaudio_mixer_get(sc->sc_udev, sc->sc_audio_rev, GET_MIN, mc); mc->maxval = uaudio_mixer_get(sc->sc_udev, sc->sc_audio_rev, GET_MAX, mc); /* check if max and min was swapped */ if (mc->maxval < mc->minval) { res = mc->maxval; mc->maxval = mc->minval; mc->minval = res; } /* compute value range */ mc->mul = mc->maxval - mc->minval; if (mc->mul == 0) mc->mul = 1; /* compute value alignment */ res = uaudio_mixer_get(sc->sc_udev, sc->sc_audio_rev, GET_RES, mc); DPRINTF("Resolution = %d\n", (int)res); } uaudio_mixer_add_ctl_sub(sc, mc); #ifdef USB_DEBUG if (uaudio_debug > 2) { uint8_t i; for (i = 0; i < mc->nchan; i++) { DPRINTF("[mix] wValue=%04x\n", mc->wValue[0]); } DPRINTF("[mix] wIndex=%04x type=%d ctl='%d' " "min=%d max=%d\n", mc->wIndex, mc->type, mc->ctl, mc->minval, mc->maxval); } #endif } static void uaudio_mixer_add_mixer(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_mixer_unit_0 *d0 = iot[id].u.mu_v1; const struct usb_audio_mixer_unit_1 *d1; uint32_t bno; /* bit number */ uint32_t p; /* bit number accumulator */ uint32_t mo; /* matching outputs */ uint32_t mc; /* matching channels */ uint32_t ichs; /* input channels */ uint32_t ochs; /* output channels */ uint32_t c; uint32_t chs; /* channels */ uint32_t i; uint32_t o; DPRINTFN(3, "bUnitId=%d bNrInPins=%d\n", d0->bUnitId, d0->bNrInPins); /* compute the number of input channels */ ichs = 0; for (i = 0; i < d0->bNrInPins; i++) { ichs += uaudio_mixer_get_cluster( d0->baSourceId[i], iot).bNrChannels; } d1 = (const void *)(d0->baSourceId + d0->bNrInPins); /* and the number of output channels */ ochs = d1->bNrChannels; DPRINTFN(3, "ichs=%d ochs=%d\n", ichs, ochs); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d0->bUnitId, sc->sc_mixer_iface_no); uaudio_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).type = MIX_SIGNED_16; if (uaudio_mixer_verify_desc(d0, ((ichs * ochs) + 7) / 8) == NULL) return; for (p = i = 0; i < d0->bNrInPins; i++) { chs = uaudio_mixer_get_cluster( d0->baSourceId[i], iot).bNrChannels; mc = 0; for (c = 0; c < chs; c++) { mo = 0; for (o = 0; o < ochs; o++) { bno = ((p + c) * ochs) + o; if (BIT_TEST(d1->bmControls, bno)) mo++; } if (mo == 1) mc++; } if ((mc == chs) && (chs <= MIX_MAX_CHAN)) { /* repeat bit-scan */ mc = 0; for (c = 0; c < chs; c++) { for (o = 0; o < ochs; o++) { bno = ((p + c) * ochs) + o; if (BIT_TEST(d1->bmControls, bno)) MIX(sc).wValue[mc++] = MAKE_WORD(p + c + 1, o + 1); } } MIX(sc).nchan = chs; uaudio_mixer_add_ctl(sc, &MIX(sc)); } p += chs; } } static void uaudio20_mixer_add_mixer(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio20_mixer_unit_0 *d0 = iot[id].u.mu_v2; const struct usb_audio20_mixer_unit_1 *d1; uint32_t bno; /* bit number */ uint32_t p; /* bit number accumulator */ uint32_t mo; /* matching outputs */ uint32_t mc; /* matching channels */ uint32_t ichs; /* input channels */ uint32_t ochs; /* output channels */ uint32_t c; uint32_t chs; /* channels */ uint32_t i; uint32_t o; DPRINTFN(3, "bUnitId=%d bNrInPins=%d\n", d0->bUnitId, d0->bNrInPins); /* compute the number of input channels */ ichs = 0; for (i = 0; i < d0->bNrInPins; i++) { ichs += uaudio20_mixer_get_cluster( d0->baSourceId[i], iot).bNrChannels; } d1 = (const void *)(d0->baSourceId + d0->bNrInPins); /* and the number of output channels */ ochs = d1->bNrChannels; DPRINTFN(3, "ichs=%d ochs=%d\n", ichs, ochs); memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d0->bUnitId, sc->sc_mixer_iface_no); uaudio20_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).type = MIX_SIGNED_16; if (uaudio20_mixer_verify_desc(d0, ((ichs * ochs) + 7) / 8) == NULL) return; for (p = i = 0; i < d0->bNrInPins; i++) { chs = uaudio20_mixer_get_cluster( d0->baSourceId[i], iot).bNrChannels; mc = 0; for (c = 0; c < chs; c++) { mo = 0; for (o = 0; o < ochs; o++) { bno = ((p + c) * ochs) + o; if (BIT_TEST(d1->bmControls, bno)) mo++; } if (mo == 1) mc++; } if ((mc == chs) && (chs <= MIX_MAX_CHAN)) { /* repeat bit-scan */ mc = 0; for (c = 0; c < chs; c++) { for (o = 0; o < ochs; o++) { bno = ((p + c) * ochs) + o; if (BIT_TEST(d1->bmControls, bno)) MIX(sc).wValue[mc++] = MAKE_WORD(p + c + 1, o + 1); } } MIX(sc).nchan = chs; uaudio_mixer_add_ctl(sc, &MIX(sc)); } p += chs; } } static void uaudio_mixer_add_selector(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_selector_unit *d = iot[id].u.su_v1; uint16_t i; DPRINTFN(3, "bUnitId=%d bNrInPins=%d\n", d->bUnitId, d->bNrInPins); if (d->bNrInPins == 0) return; memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d->bUnitId, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(0, 0); uaudio_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).nchan = 1; MIX(sc).type = MIX_SELECTOR; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).minval = 1; MIX(sc).maxval = d->bNrInPins; MIX(sc).name = "selector"; i = d->baSourceId[d->bNrInPins]; if (i == 0 || usbd_req_get_string_any(sc->sc_udev, NULL, MIX(sc).desc, sizeof(MIX(sc).desc), i) != 0) { MIX(sc).desc[0] = 0; } if (MIX(sc).maxval > MAX_SELECTOR_INPUT_PIN) { MIX(sc).maxval = MAX_SELECTOR_INPUT_PIN; } MIX(sc).mul = (MIX(sc).maxval - MIX(sc).minval); for (i = 0; i < MAX_SELECTOR_INPUT_PIN; i++) { MIX(sc).slctrtype[i] = SOUND_MIXER_NRDEVICES; } for (i = 0; i < MIX(sc).maxval; i++) { MIX(sc).slctrtype[i] = uaudio_mixer_feature_name( &iot[d->baSourceId[i]], &MIX(sc)); } MIX(sc).class = 0; /* not used */ uaudio_mixer_add_ctl(sc, &MIX(sc)); } static void uaudio20_mixer_add_selector(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio20_selector_unit *d = iot[id].u.su_v2; uint16_t i; DPRINTFN(3, "bUnitId=%d bNrInPins=%d\n", d->bUnitId, d->bNrInPins); if (d->bNrInPins == 0) return; memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d->bUnitId, sc->sc_mixer_iface_no); MIX(sc).wValue[0] = MAKE_WORD(0, 0); uaudio20_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).nchan = 1; MIX(sc).type = MIX_SELECTOR; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).minval = 1; MIX(sc).maxval = d->bNrInPins; MIX(sc).name = "selector"; i = d->baSourceId[d->bNrInPins]; if (i == 0 || usbd_req_get_string_any(sc->sc_udev, NULL, MIX(sc).desc, sizeof(MIX(sc).desc), i) != 0) { MIX(sc).desc[0] = 0; } if (MIX(sc).maxval > MAX_SELECTOR_INPUT_PIN) MIX(sc).maxval = MAX_SELECTOR_INPUT_PIN; MIX(sc).mul = (MIX(sc).maxval - MIX(sc).minval); for (i = 0; i < MAX_SELECTOR_INPUT_PIN; i++) MIX(sc).slctrtype[i] = SOUND_MIXER_NRDEVICES; for (i = 0; i < MIX(sc).maxval; i++) { MIX(sc).slctrtype[i] = uaudio20_mixer_feature_name( &iot[d->baSourceId[i]], &MIX(sc)); } MIX(sc).class = 0; /* not used */ uaudio_mixer_add_ctl(sc, &MIX(sc)); } static uint32_t uaudio_mixer_feature_get_bmaControls(const struct usb_audio_feature_unit *d, uint8_t i) { uint32_t temp = 0; uint32_t offset = (i * d->bControlSize); if (d->bControlSize > 0) { temp |= d->bmaControls[offset]; if (d->bControlSize > 1) { temp |= d->bmaControls[offset + 1] << 8; if (d->bControlSize > 2) { temp |= d->bmaControls[offset + 2] << 16; if (d->bControlSize > 3) { temp |= d->bmaControls[offset + 3] << 24; } } } } return (temp); } static void uaudio_mixer_add_feature(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_feature_unit *d = iot[id].u.fu_v1; uint32_t fumask; uint32_t mmask; uint32_t cmask; uint16_t mixernumber; uint8_t nchan; uint8_t chan; uint8_t ctl; uint8_t i; if (d->bControlSize == 0) return; memset(&MIX(sc), 0, sizeof(MIX(sc))); nchan = (d->bLength - 7) / d->bControlSize; mmask = uaudio_mixer_feature_get_bmaControls(d, 0); cmask = 0; if (nchan == 0) return; /* figure out what we can control */ for (chan = 1; chan < nchan; chan++) { DPRINTFN(10, "chan=%d mask=%x\n", chan, uaudio_mixer_feature_get_bmaControls(d, chan)); cmask |= uaudio_mixer_feature_get_bmaControls(d, chan); } if (nchan > MIX_MAX_CHAN) { nchan = MIX_MAX_CHAN; } MIX(sc).wIndex = MAKE_WORD(d->bUnitId, sc->sc_mixer_iface_no); i = d->bmaControls[d->bControlSize]; if (i == 0 || usbd_req_get_string_any(sc->sc_udev, NULL, MIX(sc).desc, sizeof(MIX(sc).desc), i) != 0) { MIX(sc).desc[0] = 0; } for (ctl = 1; ctl <= LOUDNESS_CONTROL; ctl++) { fumask = FU_MASK(ctl); DPRINTFN(5, "ctl=%d fumask=0x%04x\n", ctl, fumask); if (mmask & fumask) { MIX(sc).nchan = 1; MIX(sc).wValue[0] = MAKE_WORD(ctl, 0); } else if (cmask & fumask) { MIX(sc).nchan = nchan - 1; for (i = 1; i < nchan; i++) { if (uaudio_mixer_feature_get_bmaControls(d, i) & fumask) MIX(sc).wValue[i - 1] = MAKE_WORD(ctl, i); else MIX(sc).wValue[i - 1] = -1; } } else { continue; } mixernumber = uaudio_mixer_feature_name(&iot[id], &MIX(sc)); switch (ctl) { case MUTE_CONTROL: MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "mute"; break; case VOLUME_CONTROL: MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = mixernumber; MIX(sc).name = "vol"; break; case BASS_CONTROL: MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_BASS; MIX(sc).name = "bass"; break; case MID_CONTROL: MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "mid"; break; case TREBLE_CONTROL: MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_TREBLE; MIX(sc).name = "treble"; break; case GRAPHIC_EQUALIZER_CONTROL: continue; /* XXX don't add anything */ case AGC_CONTROL: MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "agc"; break; case DELAY_CONTROL: MIX(sc).type = MIX_UNSIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "delay"; break; case BASS_BOOST_CONTROL: MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "boost"; break; case LOUDNESS_CONTROL: MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_LOUD; /* Is this correct ? */ MIX(sc).name = "loudness"; break; default: MIX(sc).type = MIX_UNKNOWN; break; } if (MIX(sc).type != MIX_UNKNOWN) uaudio_mixer_add_ctl(sc, &MIX(sc)); } } static void uaudio20_mixer_add_feature(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio20_feature_unit *d = iot[id].u.fu_v2; uint32_t ctl; uint32_t mmask; uint32_t cmask; uint16_t mixernumber; uint8_t nchan; uint8_t chan; uint8_t i; uint8_t what; if (UGETDW(d->bmaControls[0]) == 0) return; memset(&MIX(sc), 0, sizeof(MIX(sc))); nchan = (d->bLength - 6) / 4; mmask = UGETDW(d->bmaControls[0]); cmask = 0; if (nchan == 0) return; /* figure out what we can control */ for (chan = 1; chan < nchan; chan++) cmask |= UGETDW(d->bmaControls[chan]); if (nchan > MIX_MAX_CHAN) nchan = MIX_MAX_CHAN; MIX(sc).wIndex = MAKE_WORD(d->bUnitId, sc->sc_mixer_iface_no); i = d->bmaControls[nchan][0]; if (i == 0 || usbd_req_get_string_any(sc->sc_udev, NULL, MIX(sc).desc, sizeof(MIX(sc).desc), i) != 0) { MIX(sc).desc[0] = 0; } for (ctl = 3; ctl != 0; ctl <<= 2) { mixernumber = uaudio20_mixer_feature_name(&iot[id], &MIX(sc)); switch (ctl) { case (3 << 0): MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; MIX(sc).name = "mute"; what = MUTE_CONTROL; break; case (3 << 2): MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = mixernumber; MIX(sc).name = "vol"; what = VOLUME_CONTROL; break; case (3 << 4): MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_BASS; MIX(sc).name = "bass"; what = BASS_CONTROL; break; case (3 << 6): MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "mid"; what = MID_CONTROL; break; case (3 << 8): MIX(sc).type = MIX_SIGNED_8; MIX(sc).ctl = SOUND_MIXER_TREBLE; MIX(sc).name = "treble"; what = TREBLE_CONTROL; break; case (3 << 12): MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "agc"; what = AGC_CONTROL; break; case (3 << 14): MIX(sc).type = MIX_UNSIGNED_16; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "delay"; what = DELAY_CONTROL; break; case (3 << 16): MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_NRDEVICES; /* XXXXX */ MIX(sc).name = "boost"; what = BASS_BOOST_CONTROL; break; case (3 << 18): MIX(sc).type = MIX_ON_OFF; MIX(sc).ctl = SOUND_MIXER_LOUD; /* Is this correct ? */ MIX(sc).name = "loudness"; what = LOUDNESS_CONTROL; break; case (3 << 20): MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = mixernumber; MIX(sc).name = "igain"; what = INPUT_GAIN_CONTROL; break; case (3 << 22): MIX(sc).type = MIX_SIGNED_16; MIX(sc).ctl = mixernumber; MIX(sc).name = "igainpad"; what = INPUT_GAIN_PAD_CONTROL; break; default: continue; } if ((mmask & ctl) == ctl) { MIX(sc).nchan = 1; MIX(sc).wValue[0] = MAKE_WORD(what, 0); } else if ((cmask & ctl) == ctl) { MIX(sc).nchan = nchan - 1; for (i = 1; i < nchan; i++) { if ((UGETDW(d->bmaControls[i]) & ctl) == ctl) MIX(sc).wValue[i - 1] = MAKE_WORD(what, i); else MIX(sc).wValue[i - 1] = -1; } } else { continue; } if (MIX(sc).type != MIX_UNKNOWN) uaudio_mixer_add_ctl(sc, &MIX(sc)); } } static void uaudio_mixer_add_processing_updown(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_processing_unit_0 *d0 = iot[id].u.pu_v1; const struct usb_audio_processing_unit_1 *d1 = (const void *)(d0->baSourceId + d0->bNrInPins); const struct usb_audio_processing_unit_updown *ud = (const void *)(d1->bmControls + d1->bControlSize); uint8_t i; if (uaudio_mixer_verify_desc(d0, sizeof(*ud)) == NULL) { return; } if (uaudio_mixer_verify_desc(d0, sizeof(*ud) + (2 * ud->bNrModes)) == NULL) { return; } DPRINTFN(3, "bUnitId=%d bNrModes=%d\n", d0->bUnitId, ud->bNrModes); if (!(d1->bmControls[0] & UA_PROC_MASK(UD_MODE_SELECT_CONTROL))) { DPRINTF("no mode select\n"); return; } memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d0->bUnitId, sc->sc_mixer_iface_no); MIX(sc).nchan = 1; MIX(sc).wValue[0] = MAKE_WORD(UD_MODE_SELECT_CONTROL, 0); uaudio_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).type = MIX_ON_OFF; /* XXX */ for (i = 0; i < ud->bNrModes; i++) { DPRINTFN(3, "i=%d bm=0x%x\n", i, UGETW(ud->waModes[i])); /* XXX */ } uaudio_mixer_add_ctl(sc, &MIX(sc)); } static void uaudio_mixer_add_processing(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_processing_unit_0 *d0 = iot[id].u.pu_v1; const struct usb_audio_processing_unit_1 *d1 = (const void *)(d0->baSourceId + d0->bNrInPins); uint16_t ptype; memset(&MIX(sc), 0, sizeof(MIX(sc))); ptype = UGETW(d0->wProcessType); DPRINTFN(3, "wProcessType=%d bUnitId=%d " "bNrInPins=%d\n", ptype, d0->bUnitId, d0->bNrInPins); if (d1->bControlSize == 0) { return; } if (d1->bmControls[0] & UA_PROC_ENABLE_MASK) { MIX(sc).wIndex = MAKE_WORD(d0->bUnitId, sc->sc_mixer_iface_no); MIX(sc).nchan = 1; MIX(sc).wValue[0] = MAKE_WORD(XX_ENABLE_CONTROL, 0); uaudio_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).type = MIX_ON_OFF; uaudio_mixer_add_ctl(sc, &MIX(sc)); } switch (ptype) { case UPDOWNMIX_PROCESS: uaudio_mixer_add_processing_updown(sc, iot, id); break; case DOLBY_PROLOGIC_PROCESS: case P3D_STEREO_EXTENDER_PROCESS: case REVERBATION_PROCESS: case CHORUS_PROCESS: case DYN_RANGE_COMP_PROCESS: default: DPRINTF("unit %d, type=%d is not implemented\n", d0->bUnitId, ptype); break; } } static void uaudio_mixer_add_extension(struct uaudio_softc *sc, const struct uaudio_terminal_node *iot, int id) { const struct usb_audio_extension_unit_0 *d0 = iot[id].u.eu_v1; const struct usb_audio_extension_unit_1 *d1 = (const void *)(d0->baSourceId + d0->bNrInPins); DPRINTFN(3, "bUnitId=%d bNrInPins=%d\n", d0->bUnitId, d0->bNrInPins); if (sc->sc_uq_au_no_xu) { return; } if (d1->bControlSize == 0) { return; } if (d1->bmControls[0] & UA_EXT_ENABLE_MASK) { memset(&MIX(sc), 0, sizeof(MIX(sc))); MIX(sc).wIndex = MAKE_WORD(d0->bUnitId, sc->sc_mixer_iface_no); MIX(sc).nchan = 1; MIX(sc).wValue[0] = MAKE_WORD(UA_EXT_ENABLE, 0); uaudio_mixer_determine_class(&iot[id], &MIX(sc)); MIX(sc).type = MIX_ON_OFF; uaudio_mixer_add_ctl(sc, &MIX(sc)); } } static const void * uaudio_mixer_verify_desc(const void *arg, uint32_t len) { const struct usb_audio_mixer_unit_1 *d1; const struct usb_audio_extension_unit_1 *e1; const struct usb_audio_processing_unit_1 *u1; union { const struct usb_descriptor *desc; const struct usb_audio_input_terminal *it; const struct usb_audio_output_terminal *ot; const struct usb_audio_mixer_unit_0 *mu; const struct usb_audio_selector_unit *su; const struct usb_audio_feature_unit *fu; const struct usb_audio_processing_unit_0 *pu; const struct usb_audio_extension_unit_0 *eu; } u; u.desc = arg; if (u.desc == NULL) { goto error; } if (u.desc->bDescriptorType != UDESC_CS_INTERFACE) { goto error; } switch (u.desc->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: len += sizeof(*u.it); break; case UDESCSUB_AC_OUTPUT: len += sizeof(*u.ot); break; case UDESCSUB_AC_MIXER: len += sizeof(*u.mu); if (u.desc->bLength < len) { goto error; } len += u.mu->bNrInPins; if (u.desc->bLength < len) { goto error; } d1 = (const void *)(u.mu->baSourceId + u.mu->bNrInPins); len += sizeof(*d1); break; case UDESCSUB_AC_SELECTOR: len += sizeof(*u.su); if (u.desc->bLength < len) { goto error; } len += u.su->bNrInPins + 1; break; case UDESCSUB_AC_FEATURE: len += sizeof(*u.fu) + 1; if (u.desc->bLength < len) goto error; len += u.fu->bControlSize; break; case UDESCSUB_AC_PROCESSING: len += sizeof(*u.pu); if (u.desc->bLength < len) { goto error; } len += u.pu->bNrInPins; if (u.desc->bLength < len) { goto error; } u1 = (const void *)(u.pu->baSourceId + u.pu->bNrInPins); len += sizeof(*u1); if (u.desc->bLength < len) { goto error; } len += u1->bControlSize; break; case UDESCSUB_AC_EXTENSION: len += sizeof(*u.eu); if (u.desc->bLength < len) { goto error; } len += u.eu->bNrInPins; if (u.desc->bLength < len) { goto error; } e1 = (const void *)(u.eu->baSourceId + u.eu->bNrInPins); len += sizeof(*e1); if (u.desc->bLength < len) { goto error; } len += e1->bControlSize; break; default: goto error; } if (u.desc->bLength < len) { goto error; } return (u.desc); error: if (u.desc) { DPRINTF("invalid descriptor, type=%d, " "sub_type=%d, len=%d of %d bytes\n", u.desc->bDescriptorType, u.desc->bDescriptorSubtype, u.desc->bLength, len); } return (NULL); } static const void * uaudio20_mixer_verify_desc(const void *arg, uint32_t len) { const struct usb_audio20_mixer_unit_1 *d1; const struct usb_audio20_extension_unit_1 *e1; const struct usb_audio20_processing_unit_1 *u1; const struct usb_audio20_clock_selector_unit_1 *c1; union { const struct usb_descriptor *desc; const struct usb_audio20_clock_source_unit *csrc; const struct usb_audio20_clock_selector_unit_0 *csel; const struct usb_audio20_clock_multiplier_unit *cmul; const struct usb_audio20_input_terminal *it; const struct usb_audio20_output_terminal *ot; const struct usb_audio20_mixer_unit_0 *mu; const struct usb_audio20_selector_unit *su; const struct usb_audio20_feature_unit *fu; const struct usb_audio20_sample_rate_unit *ru; const struct usb_audio20_processing_unit_0 *pu; const struct usb_audio20_extension_unit_0 *eu; const struct usb_audio20_effect_unit *ef; } u; u.desc = arg; if (u.desc == NULL) goto error; if (u.desc->bDescriptorType != UDESC_CS_INTERFACE) goto error; switch (u.desc->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: len += sizeof(*u.it); break; case UDESCSUB_AC_OUTPUT: len += sizeof(*u.ot); break; case UDESCSUB_AC_MIXER: len += sizeof(*u.mu); if (u.desc->bLength < len) goto error; len += u.mu->bNrInPins; if (u.desc->bLength < len) goto error; d1 = (const void *)(u.mu->baSourceId + u.mu->bNrInPins); len += sizeof(*d1) + d1->bNrChannels; break; case UDESCSUB_AC_SELECTOR: len += sizeof(*u.su); if (u.desc->bLength < len) goto error; len += u.su->bNrInPins + 1; break; case UDESCSUB_AC_FEATURE: len += sizeof(*u.fu) + 1; break; case UDESCSUB_AC_EFFECT: len += sizeof(*u.ef) + 4; break; case UDESCSUB_AC_PROCESSING_V2: len += sizeof(*u.pu); if (u.desc->bLength < len) goto error; len += u.pu->bNrInPins; if (u.desc->bLength < len) goto error; u1 = (const void *)(u.pu->baSourceId + u.pu->bNrInPins); len += sizeof(*u1); break; case UDESCSUB_AC_EXTENSION_V2: len += sizeof(*u.eu); if (u.desc->bLength < len) goto error; len += u.eu->bNrInPins; if (u.desc->bLength < len) goto error; e1 = (const void *)(u.eu->baSourceId + u.eu->bNrInPins); len += sizeof(*e1); break; case UDESCSUB_AC_CLOCK_SRC: len += sizeof(*u.csrc); break; case UDESCSUB_AC_CLOCK_SEL: len += sizeof(*u.csel); if (u.desc->bLength < len) goto error; len += u.csel->bNrInPins; if (u.desc->bLength < len) goto error; c1 = (const void *)(u.csel->baCSourceId + u.csel->bNrInPins); len += sizeof(*c1); break; case UDESCSUB_AC_CLOCK_MUL: len += sizeof(*u.cmul); break; case UDESCSUB_AC_SAMPLE_RT: len += sizeof(*u.ru); break; default: goto error; } if (u.desc->bLength < len) goto error; return (u.desc); error: if (u.desc) { DPRINTF("invalid descriptor, type=%d, " "sub_type=%d, len=%d of %d bytes\n", u.desc->bDescriptorType, u.desc->bDescriptorSubtype, u.desc->bLength, len); } return (NULL); } static struct usb_audio_cluster uaudio_mixer_get_cluster(uint8_t id, const struct uaudio_terminal_node *iot) { struct usb_audio_cluster r; const struct usb_descriptor *dp; uint8_t i; for (i = 0; i < UAUDIO_RECURSE_LIMIT; i++) { /* avoid infinite loops */ dp = iot[id].u.desc; if (dp == NULL) { goto error; } switch (dp->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: r.bNrChannels = iot[id].u.it_v1->bNrChannels; r.wChannelConfig[0] = iot[id].u.it_v1->wChannelConfig[0]; r.wChannelConfig[1] = iot[id].u.it_v1->wChannelConfig[1]; r.iChannelNames = iot[id].u.it_v1->iChannelNames; goto done; case UDESCSUB_AC_OUTPUT: id = iot[id].u.ot_v1->bSourceId; break; case UDESCSUB_AC_MIXER: r = *(const struct usb_audio_cluster *) &iot[id].u.mu_v1->baSourceId[ iot[id].u.mu_v1->bNrInPins]; goto done; case UDESCSUB_AC_SELECTOR: if (iot[id].u.su_v1->bNrInPins > 0) { /* XXX This is not really right */ id = iot[id].u.su_v1->baSourceId[0]; } break; case UDESCSUB_AC_FEATURE: id = iot[id].u.fu_v1->bSourceId; break; case UDESCSUB_AC_PROCESSING: r = *((const struct usb_audio_cluster *) &iot[id].u.pu_v1->baSourceId[ iot[id].u.pu_v1->bNrInPins]); goto done; case UDESCSUB_AC_EXTENSION: r = *((const struct usb_audio_cluster *) &iot[id].u.eu_v1->baSourceId[ iot[id].u.eu_v1->bNrInPins]); goto done; default: goto error; } } error: DPRINTF("bad data\n"); memset(&r, 0, sizeof(r)); done: return (r); } static struct usb_audio20_cluster uaudio20_mixer_get_cluster(uint8_t id, const struct uaudio_terminal_node *iot) { struct usb_audio20_cluster r; const struct usb_descriptor *dp; uint8_t i; for (i = 0; i < UAUDIO_RECURSE_LIMIT; i++) { /* avoid infinite loops */ dp = iot[id].u.desc; if (dp == NULL) goto error; switch (dp->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: r.bNrChannels = iot[id].u.it_v2->bNrChannels; r.bmChannelConfig[0] = iot[id].u.it_v2->bmChannelConfig[0]; r.bmChannelConfig[1] = iot[id].u.it_v2->bmChannelConfig[1]; r.bmChannelConfig[2] = iot[id].u.it_v2->bmChannelConfig[2]; r.bmChannelConfig[3] = iot[id].u.it_v2->bmChannelConfig[3]; r.iChannelNames = iot[id].u.it_v2->iTerminal; goto done; case UDESCSUB_AC_OUTPUT: id = iot[id].u.ot_v2->bSourceId; break; case UDESCSUB_AC_MIXER: r = *(const struct usb_audio20_cluster *) &iot[id].u.mu_v2->baSourceId[ iot[id].u.mu_v2->bNrInPins]; goto done; case UDESCSUB_AC_SELECTOR: if (iot[id].u.su_v2->bNrInPins > 0) { /* XXX This is not really right */ id = iot[id].u.su_v2->baSourceId[0]; } break; case UDESCSUB_AC_SAMPLE_RT: id = iot[id].u.ru_v2->bSourceId; break; case UDESCSUB_AC_EFFECT: id = iot[id].u.ef_v2->bSourceId; break; case UDESCSUB_AC_FEATURE: id = iot[id].u.fu_v2->bSourceId; break; case UDESCSUB_AC_PROCESSING_V2: r = *((const struct usb_audio20_cluster *) &iot[id].u.pu_v2->baSourceId[ iot[id].u.pu_v2->bNrInPins]); goto done; case UDESCSUB_AC_EXTENSION_V2: r = *((const struct usb_audio20_cluster *) &iot[id].u.eu_v2->baSourceId[ iot[id].u.eu_v2->bNrInPins]); goto done; default: goto error; } } error: DPRINTF("Bad data!\n"); memset(&r, 0, sizeof(r)); done: return (r); } static uint16_t uaudio_mixer_determine_class(const struct uaudio_terminal_node *iot, struct uaudio_mixer_node *mix) { uint16_t terminal_type = 0x0000; const struct uaudio_terminal_node *input[2]; const struct uaudio_terminal_node *output[2]; input[0] = uaudio_mixer_get_input(iot, 0); input[1] = uaudio_mixer_get_input(iot, 1); output[0] = uaudio_mixer_get_output(iot, 0); output[1] = uaudio_mixer_get_output(iot, 1); /* * check if there is only * one output terminal: */ if (output[0] && (!output[1])) { terminal_type = UGETW(output[0]->u.ot_v1->wTerminalType); } /* * If the only output terminal is USB, * the class is UAC_RECORD. */ if ((terminal_type & 0xff00) == (UAT_UNDEFINED & 0xff00)) { mix->class = UAC_RECORD; if (input[0] && (!input[1])) { terminal_type = UGETW(input[0]->u.it_v1->wTerminalType); } else { terminal_type = 0; } goto done; } /* * if the unit is connected to just * one input terminal, the * class is UAC_INPUT: */ if (input[0] && (!input[1])) { mix->class = UAC_INPUT; terminal_type = UGETW(input[0]->u.it_v1->wTerminalType); goto done; } /* * Otherwise, the class is UAC_OUTPUT. */ mix->class = UAC_OUTPUT; done: return (terminal_type); } static uint16_t uaudio20_mixer_determine_class(const struct uaudio_terminal_node *iot, struct uaudio_mixer_node *mix) { uint16_t terminal_type = 0x0000; const struct uaudio_terminal_node *input[2]; const struct uaudio_terminal_node *output[2]; input[0] = uaudio_mixer_get_input(iot, 0); input[1] = uaudio_mixer_get_input(iot, 1); output[0] = uaudio_mixer_get_output(iot, 0); output[1] = uaudio_mixer_get_output(iot, 1); /* * check if there is only * one output terminal: */ if (output[0] && (!output[1])) terminal_type = UGETW(output[0]->u.ot_v2->wTerminalType); /* * If the only output terminal is USB, * the class is UAC_RECORD. */ if ((terminal_type & 0xff00) == (UAT_UNDEFINED & 0xff00)) { mix->class = UAC_RECORD; if (input[0] && (!input[1])) { terminal_type = UGETW(input[0]->u.it_v2->wTerminalType); } else { terminal_type = 0; } goto done; } /* * if the unit is connected to just * one input terminal, the * class is UAC_INPUT: */ if (input[0] && (!input[1])) { mix->class = UAC_INPUT; terminal_type = UGETW(input[0]->u.it_v2->wTerminalType); goto done; } /* * Otherwise, the class is UAC_OUTPUT. */ mix->class = UAC_OUTPUT; done: return (terminal_type); } struct uaudio_tt_to_feature { uint16_t terminal_type; uint16_t feature; }; static const struct uaudio_tt_to_feature uaudio_tt_to_feature[] = { {UAT_STREAM, SOUND_MIXER_PCM}, {UATI_MICROPHONE, SOUND_MIXER_MIC}, {UATI_DESKMICROPHONE, SOUND_MIXER_MIC}, {UATI_PERSONALMICROPHONE, SOUND_MIXER_MIC}, {UATI_OMNIMICROPHONE, SOUND_MIXER_MIC}, {UATI_MICROPHONEARRAY, SOUND_MIXER_MIC}, {UATI_PROCMICROPHONEARR, SOUND_MIXER_MIC}, {UATO_SPEAKER, SOUND_MIXER_SPEAKER}, {UATO_DESKTOPSPEAKER, SOUND_MIXER_SPEAKER}, {UATO_ROOMSPEAKER, SOUND_MIXER_SPEAKER}, {UATO_COMMSPEAKER, SOUND_MIXER_SPEAKER}, {UATE_ANALOGCONN, SOUND_MIXER_LINE}, {UATE_LINECONN, SOUND_MIXER_LINE}, {UATE_LEGACYCONN, SOUND_MIXER_LINE}, {UATE_DIGITALAUIFC, SOUND_MIXER_ALTPCM}, {UATE_SPDIF, SOUND_MIXER_ALTPCM}, {UATE_1394DA, SOUND_MIXER_ALTPCM}, {UATE_1394DV, SOUND_MIXER_ALTPCM}, {UATF_CDPLAYER, SOUND_MIXER_CD}, {UATF_SYNTHESIZER, SOUND_MIXER_SYNTH}, {UATF_VIDEODISCAUDIO, SOUND_MIXER_VIDEO}, {UATF_DVDAUDIO, SOUND_MIXER_VIDEO}, {UATF_TVTUNERAUDIO, SOUND_MIXER_VIDEO}, /* telephony terminal types */ {UATT_UNDEFINED, SOUND_MIXER_PHONEIN}, /* SOUND_MIXER_PHONEOUT */ {UATT_PHONELINE, SOUND_MIXER_PHONEIN}, /* SOUND_MIXER_PHONEOUT */ {UATT_TELEPHONE, SOUND_MIXER_PHONEIN}, /* SOUND_MIXER_PHONEOUT */ {UATT_DOWNLINEPHONE, SOUND_MIXER_PHONEIN}, /* SOUND_MIXER_PHONEOUT */ {UATF_RADIORECV, SOUND_MIXER_RADIO}, {UATF_RADIOXMIT, SOUND_MIXER_RADIO}, {UAT_UNDEFINED, SOUND_MIXER_VOLUME}, {UAT_VENDOR, SOUND_MIXER_VOLUME}, {UATI_UNDEFINED, SOUND_MIXER_VOLUME}, /* output terminal types */ {UATO_UNDEFINED, SOUND_MIXER_VOLUME}, {UATO_DISPLAYAUDIO, SOUND_MIXER_VOLUME}, {UATO_SUBWOOFER, SOUND_MIXER_VOLUME}, {UATO_HEADPHONES, SOUND_MIXER_VOLUME}, /* bidir terminal types */ {UATB_UNDEFINED, SOUND_MIXER_VOLUME}, {UATB_HANDSET, SOUND_MIXER_VOLUME}, {UATB_HEADSET, SOUND_MIXER_VOLUME}, {UATB_SPEAKERPHONE, SOUND_MIXER_VOLUME}, {UATB_SPEAKERPHONEESUP, SOUND_MIXER_VOLUME}, {UATB_SPEAKERPHONEECANC, SOUND_MIXER_VOLUME}, /* external terminal types */ {UATE_UNDEFINED, SOUND_MIXER_VOLUME}, /* embedded function terminal types */ {UATF_UNDEFINED, SOUND_MIXER_VOLUME}, {UATF_CALIBNOISE, SOUND_MIXER_VOLUME}, {UATF_EQUNOISE, SOUND_MIXER_VOLUME}, {UATF_DAT, SOUND_MIXER_VOLUME}, {UATF_DCC, SOUND_MIXER_VOLUME}, {UATF_MINIDISK, SOUND_MIXER_VOLUME}, {UATF_ANALOGTAPE, SOUND_MIXER_VOLUME}, {UATF_PHONOGRAPH, SOUND_MIXER_VOLUME}, {UATF_VCRAUDIO, SOUND_MIXER_VOLUME}, {UATF_SATELLITE, SOUND_MIXER_VOLUME}, {UATF_CABLETUNER, SOUND_MIXER_VOLUME}, {UATF_DSS, SOUND_MIXER_VOLUME}, {UATF_MULTITRACK, SOUND_MIXER_VOLUME}, {0xffff, SOUND_MIXER_VOLUME}, /* default */ {0x0000, SOUND_MIXER_VOLUME}, }; static uint16_t uaudio_mixer_feature_name(const struct uaudio_terminal_node *iot, struct uaudio_mixer_node *mix) { const struct uaudio_tt_to_feature *uat = uaudio_tt_to_feature; uint16_t terminal_type = uaudio_mixer_determine_class(iot, mix); if ((mix->class == UAC_RECORD) && (terminal_type == 0)) { return (SOUND_MIXER_IMIX); } while (uat->terminal_type) { if (uat->terminal_type == terminal_type) { break; } uat++; } DPRINTF("terminal_type=0x%04x -> %d\n", terminal_type, uat->feature); return (uat->feature); } static uint16_t uaudio20_mixer_feature_name(const struct uaudio_terminal_node *iot, struct uaudio_mixer_node *mix) { const struct uaudio_tt_to_feature *uat; uint16_t terminal_type = uaudio20_mixer_determine_class(iot, mix); if ((mix->class == UAC_RECORD) && (terminal_type == 0)) return (SOUND_MIXER_IMIX); for (uat = uaudio_tt_to_feature; uat->terminal_type != 0; uat++) { if (uat->terminal_type == terminal_type) break; } DPRINTF("terminal_type=0x%04x -> %d\n", terminal_type, uat->feature); return (uat->feature); } static const struct uaudio_terminal_node * uaudio_mixer_get_input(const struct uaudio_terminal_node *iot, uint8_t i) { struct uaudio_terminal_node *root = iot->root; uint8_t n; n = iot->usr.id_max; do { if (iot->usr.bit_input[n / 8] & (1 << (n % 8))) { if (!i--) return (root + n); } } while (n--); return (NULL); } static const struct uaudio_terminal_node * uaudio_mixer_get_output(const struct uaudio_terminal_node *iot, uint8_t i) { struct uaudio_terminal_node *root = iot->root; uint8_t n; n = iot->usr.id_max; do { if (iot->usr.bit_output[n / 8] & (1 << (n % 8))) { if (!i--) return (root + n); } } while (n--); return (NULL); } static void uaudio_mixer_find_inputs_sub(struct uaudio_terminal_node *root, const uint8_t *p_id, uint8_t n_id, struct uaudio_search_result *info) { struct uaudio_terminal_node *iot; uint8_t n; uint8_t i; uint8_t is_last; top: for (n = 0; n < n_id; n++) { i = p_id[n]; if (info->recurse_level == UAUDIO_RECURSE_LIMIT) { DPRINTF("avoided going into a circle at id=%d!\n", i); return; } info->recurse_level++; iot = (root + i); if (iot->u.desc == NULL) continue; is_last = ((n + 1) == n_id); switch (iot->u.desc->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: info->bit_input[i / 8] |= (1 << (i % 8)); break; case UDESCSUB_AC_FEATURE: if (is_last) { p_id = &iot->u.fu_v1->bSourceId; n_id = 1; goto top; } uaudio_mixer_find_inputs_sub( root, &iot->u.fu_v1->bSourceId, 1, info); break; case UDESCSUB_AC_OUTPUT: if (is_last) { p_id = &iot->u.ot_v1->bSourceId; n_id = 1; goto top; } uaudio_mixer_find_inputs_sub( root, &iot->u.ot_v1->bSourceId, 1, info); break; case UDESCSUB_AC_MIXER: if (is_last) { p_id = iot->u.mu_v1->baSourceId; n_id = iot->u.mu_v1->bNrInPins; goto top; } uaudio_mixer_find_inputs_sub( root, iot->u.mu_v1->baSourceId, iot->u.mu_v1->bNrInPins, info); break; case UDESCSUB_AC_SELECTOR: if (is_last) { p_id = iot->u.su_v1->baSourceId; n_id = iot->u.su_v1->bNrInPins; goto top; } uaudio_mixer_find_inputs_sub( root, iot->u.su_v1->baSourceId, iot->u.su_v1->bNrInPins, info); break; case UDESCSUB_AC_PROCESSING: if (is_last) { p_id = iot->u.pu_v1->baSourceId; n_id = iot->u.pu_v1->bNrInPins; goto top; } uaudio_mixer_find_inputs_sub( root, iot->u.pu_v1->baSourceId, iot->u.pu_v1->bNrInPins, info); break; case UDESCSUB_AC_EXTENSION: if (is_last) { p_id = iot->u.eu_v1->baSourceId; n_id = iot->u.eu_v1->bNrInPins; goto top; } uaudio_mixer_find_inputs_sub( root, iot->u.eu_v1->baSourceId, iot->u.eu_v1->bNrInPins, info); break; default: break; } } } static void uaudio20_mixer_find_inputs_sub(struct uaudio_terminal_node *root, const uint8_t *p_id, uint8_t n_id, struct uaudio_search_result *info) { struct uaudio_terminal_node *iot; uint8_t n; uint8_t i; uint8_t is_last; top: for (n = 0; n < n_id; n++) { i = p_id[n]; if (info->recurse_level == UAUDIO_RECURSE_LIMIT) { DPRINTF("avoided going into a circle at id=%d!\n", i); return; } info->recurse_level++; iot = (root + i); if (iot->u.desc == NULL) continue; is_last = ((n + 1) == n_id); switch (iot->u.desc->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: info->bit_input[i / 8] |= (1 << (i % 8)); break; case UDESCSUB_AC_OUTPUT: if (is_last) { p_id = &iot->u.ot_v2->bSourceId; n_id = 1; goto top; } uaudio20_mixer_find_inputs_sub( root, &iot->u.ot_v2->bSourceId, 1, info); break; case UDESCSUB_AC_MIXER: if (is_last) { p_id = iot->u.mu_v2->baSourceId; n_id = iot->u.mu_v2->bNrInPins; goto top; } uaudio20_mixer_find_inputs_sub( root, iot->u.mu_v2->baSourceId, iot->u.mu_v2->bNrInPins, info); break; case UDESCSUB_AC_SELECTOR: if (is_last) { p_id = iot->u.su_v2->baSourceId; n_id = iot->u.su_v2->bNrInPins; goto top; } uaudio20_mixer_find_inputs_sub( root, iot->u.su_v2->baSourceId, iot->u.su_v2->bNrInPins, info); break; case UDESCSUB_AC_SAMPLE_RT: if (is_last) { p_id = &iot->u.ru_v2->bSourceId; n_id = 1; goto top; } uaudio20_mixer_find_inputs_sub( root, &iot->u.ru_v2->bSourceId, 1, info); break; case UDESCSUB_AC_EFFECT: if (is_last) { p_id = &iot->u.ef_v2->bSourceId; n_id = 1; goto top; } uaudio20_mixer_find_inputs_sub( root, &iot->u.ef_v2->bSourceId, 1, info); break; case UDESCSUB_AC_FEATURE: if (is_last) { p_id = &iot->u.fu_v2->bSourceId; n_id = 1; goto top; } uaudio20_mixer_find_inputs_sub( root, &iot->u.fu_v2->bSourceId, 1, info); break; case UDESCSUB_AC_PROCESSING_V2: if (is_last) { p_id = iot->u.pu_v2->baSourceId; n_id = iot->u.pu_v2->bNrInPins; goto top; } uaudio20_mixer_find_inputs_sub( root, iot->u.pu_v2->baSourceId, iot->u.pu_v2->bNrInPins, info); break; case UDESCSUB_AC_EXTENSION_V2: if (is_last) { p_id = iot->u.eu_v2->baSourceId; n_id = iot->u.eu_v2->bNrInPins; goto top; } uaudio20_mixer_find_inputs_sub( root, iot->u.eu_v2->baSourceId, iot->u.eu_v2->bNrInPins, info); break; default: break; } } } static void uaudio20_mixer_find_clocks_sub(struct uaudio_terminal_node *root, const uint8_t *p_id, uint8_t n_id, struct uaudio_search_result *info) { struct uaudio_terminal_node *iot; uint8_t n; uint8_t i; uint8_t is_last; uint8_t id; top: for (n = 0; n < n_id; n++) { i = p_id[n]; if (info->recurse_level == UAUDIO_RECURSE_LIMIT) { DPRINTF("avoided going into a circle at id=%d!\n", i); return; } info->recurse_level++; iot = (root + i); if (iot->u.desc == NULL) continue; is_last = ((n + 1) == n_id); switch (iot->u.desc->bDescriptorSubtype) { case UDESCSUB_AC_INPUT: info->is_input = 1; if (is_last) { p_id = &iot->u.it_v2->bCSourceId; n_id = 1; goto top; } uaudio20_mixer_find_clocks_sub(root, &iot->u.it_v2->bCSourceId, 1, info); break; case UDESCSUB_AC_OUTPUT: info->is_input = 0; if (is_last) { p_id = &iot->u.ot_v2->bCSourceId; n_id = 1; goto top; } uaudio20_mixer_find_clocks_sub(root, &iot->u.ot_v2->bCSourceId, 1, info); break; case UDESCSUB_AC_CLOCK_SEL: if (is_last) { p_id = iot->u.csel_v2->baCSourceId; n_id = iot->u.csel_v2->bNrInPins; goto top; } uaudio20_mixer_find_clocks_sub(root, iot->u.csel_v2->baCSourceId, iot->u.csel_v2->bNrInPins, info); break; case UDESCSUB_AC_CLOCK_MUL: if (is_last) { p_id = &iot->u.cmul_v2->bCSourceId; n_id = 1; goto top; } uaudio20_mixer_find_clocks_sub(root, &iot->u.cmul_v2->bCSourceId, 1, info); break; case UDESCSUB_AC_CLOCK_SRC: id = iot->u.csrc_v2->bClockId; switch (info->is_input) { case 0: info->bit_output[id / 8] |= (1 << (id % 8)); break; case 1: info->bit_input[id / 8] |= (1 << (id % 8)); break; default: break; } break; default: break; } } } static void uaudio_mixer_find_outputs_sub(struct uaudio_terminal_node *root, uint8_t id, uint8_t n_id, struct uaudio_search_result *info) { struct uaudio_terminal_node *iot = (root + id); uint8_t j; j = n_id; do { if ((j != id) && ((root + j)->u.desc) && ((root + j)->u.desc->bDescriptorSubtype == UDESCSUB_AC_OUTPUT)) { /* * "j" (output) <--- virtual wire <--- "id" (input) * * if "j" has "id" on the input, then "id" have "j" on * the output, because they are connected: */ if ((root + j)->usr.bit_input[id / 8] & (1 << (id % 8))) { iot->usr.bit_output[j / 8] |= (1 << (j % 8)); } } } while (j--); } static void uaudio_mixer_fill_info(struct uaudio_softc *sc, struct usb_device *udev, void *desc) { const struct usb_audio_control_descriptor *acdp; struct usb_config_descriptor *cd = usbd_get_config_descriptor(udev); const struct usb_descriptor *dp; const struct usb_audio_unit *au; struct uaudio_terminal_node *iot = NULL; uint16_t wTotalLen; uint8_t ID_max = 0; /* inclusive */ uint8_t i; desc = usb_desc_foreach(cd, desc); if (desc == NULL) { DPRINTF("no Audio Control header\n"); goto done; } acdp = desc; if ((acdp->bLength < sizeof(*acdp)) || (acdp->bDescriptorType != UDESC_CS_INTERFACE) || (acdp->bDescriptorSubtype != UDESCSUB_AC_HEADER)) { DPRINTF("invalid Audio Control header\n"); goto done; } /* "wTotalLen" is allowed to be corrupt */ wTotalLen = UGETW(acdp->wTotalLength) - acdp->bLength; /* get USB audio revision */ sc->sc_audio_rev = UGETW(acdp->bcdADC); DPRINTFN(3, "found AC header, vers=%03x, len=%d\n", sc->sc_audio_rev, wTotalLen); iot = malloc(sizeof(struct uaudio_terminal_node) * 256, M_TEMP, M_WAITOK | M_ZERO); if (iot == NULL) { DPRINTF("no memory!\n"); goto done; } while ((desc = usb_desc_foreach(cd, desc))) { dp = desc; if (dp->bLength > wTotalLen) { break; } else { wTotalLen -= dp->bLength; } if (sc->sc_audio_rev >= UAUDIO_VERSION_30) au = NULL; else if (sc->sc_audio_rev >= UAUDIO_VERSION_20) au = uaudio20_mixer_verify_desc(dp, 0); else au = uaudio_mixer_verify_desc(dp, 0); if (au) { iot[au->bUnitId].u.desc = (const void *)au; if (au->bUnitId > ID_max) ID_max = au->bUnitId; } } DPRINTF("Maximum ID=%d\n", ID_max); /* * determine sourcing inputs for * all nodes in the tree: */ i = ID_max; do { if (sc->sc_audio_rev >= UAUDIO_VERSION_30) { /* FALLTHROUGH */ } else if (sc->sc_audio_rev >= UAUDIO_VERSION_20) { uaudio20_mixer_find_inputs_sub(iot, &i, 1, &((iot + i)->usr)); sc->sc_mixer_clocks.is_input = 255; sc->sc_mixer_clocks.recurse_level = 0; uaudio20_mixer_find_clocks_sub(iot, &i, 1, &sc->sc_mixer_clocks); } else { uaudio_mixer_find_inputs_sub(iot, &i, 1, &((iot + i)->usr)); } } while (i--); /* * determine outputs for * all nodes in the tree: */ i = ID_max; do { uaudio_mixer_find_outputs_sub(iot, i, ID_max, &((iot + i)->usr)); } while (i--); /* set "id_max" and "root" */ i = ID_max; do { (iot + i)->usr.id_max = ID_max; (iot + i)->root = iot; } while (i--); /* * Scan the config to create a linked list of "mixer" nodes: */ i = ID_max; do { dp = iot[i].u.desc; if (dp == NULL) continue; DPRINTFN(11, "id=%d subtype=%d\n", i, dp->bDescriptorSubtype); if (sc->sc_audio_rev >= UAUDIO_VERSION_30) { continue; } else if (sc->sc_audio_rev >= UAUDIO_VERSION_20) { switch (dp->bDescriptorSubtype) { case UDESCSUB_AC_HEADER: DPRINTF("unexpected AC header\n"); break; case UDESCSUB_AC_INPUT: case UDESCSUB_AC_OUTPUT: case UDESCSUB_AC_PROCESSING_V2: case UDESCSUB_AC_EXTENSION_V2: case UDESCSUB_AC_EFFECT: case UDESCSUB_AC_CLOCK_SRC: case UDESCSUB_AC_CLOCK_SEL: case UDESCSUB_AC_CLOCK_MUL: case UDESCSUB_AC_SAMPLE_RT: break; case UDESCSUB_AC_MIXER: uaudio20_mixer_add_mixer(sc, iot, i); break; case UDESCSUB_AC_SELECTOR: uaudio20_mixer_add_selector(sc, iot, i); break; case UDESCSUB_AC_FEATURE: uaudio20_mixer_add_feature(sc, iot, i); break; default: DPRINTF("bad AC desc subtype=0x%02x\n", dp->bDescriptorSubtype); break; } continue; } switch (dp->bDescriptorSubtype) { case UDESCSUB_AC_HEADER: DPRINTF("unexpected AC header\n"); break; case UDESCSUB_AC_INPUT: case UDESCSUB_AC_OUTPUT: break; case UDESCSUB_AC_MIXER: uaudio_mixer_add_mixer(sc, iot, i); break; case UDESCSUB_AC_SELECTOR: uaudio_mixer_add_selector(sc, iot, i); break; case UDESCSUB_AC_FEATURE: uaudio_mixer_add_feature(sc, iot, i); break; case UDESCSUB_AC_PROCESSING: uaudio_mixer_add_processing(sc, iot, i); break; case UDESCSUB_AC_EXTENSION: uaudio_mixer_add_extension(sc, iot, i); break; default: DPRINTF("bad AC desc subtype=0x%02x\n", dp->bDescriptorSubtype); break; } } while (i--); done: free(iot, M_TEMP); } static int uaudio_mixer_get(struct usb_device *udev, uint16_t audio_rev, uint8_t what, struct uaudio_mixer_node *mc) { struct usb_device_request req; int val; uint8_t data[2 + (2 * 3)]; usb_error_t err; if (mc->wValue[0] == -1) return (0); if (audio_rev >= UAUDIO_VERSION_30) return (0); else if (audio_rev >= UAUDIO_VERSION_20) { if (what == GET_CUR) { req.bRequest = UA20_CS_CUR; USETW(req.wLength, 2); } else { req.bRequest = UA20_CS_RANGE; USETW(req.wLength, 8); } } else { uint16_t len = MIX_SIZE(mc->type); req.bRequest = what; USETW(req.wLength, len); } req.bmRequestType = UT_READ_CLASS_INTERFACE; USETW(req.wValue, mc->wValue[0]); USETW(req.wIndex, mc->wIndex); memset(data, 0, sizeof(data)); err = usbd_do_request(udev, NULL, &req, data); if (err) { DPRINTF("err=%s\n", usbd_errstr(err)); return (0); } if (audio_rev >= UAUDIO_VERSION_30) { val = 0; } else if (audio_rev >= UAUDIO_VERSION_20) { switch (what) { case GET_CUR: val = (data[0] | (data[1] << 8)); break; case GET_MIN: val = (data[2] | (data[3] << 8)); break; case GET_MAX: val = (data[4] | (data[5] << 8)); break; case GET_RES: val = (data[6] | (data[7] << 8)); break; default: val = 0; break; } } else { val = (data[0] | (data[1] << 8)); } if (what == GET_CUR || what == GET_MIN || what == GET_MAX) val = uaudio_mixer_signext(mc->type, val); DPRINTFN(3, "val=%d\n", val); return (val); } static void uaudio_mixer_write_cfg_callback(struct usb_xfer *xfer, usb_error_t error) { struct usb_device_request req; struct uaudio_softc *sc = usbd_xfer_softc(xfer); struct uaudio_mixer_node *mc = sc->sc_mixer_curr; struct usb_page_cache *pc; uint16_t len; uint8_t repeat = 1; uint8_t update; uint8_t chan; uint8_t buf[2]; DPRINTF("\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: tr_transferred: case USB_ST_SETUP: tr_setup: if (mc == NULL) { mc = sc->sc_mixer_root; sc->sc_mixer_curr = mc; sc->sc_mixer_chan = 0; repeat = 0; } while (mc) { while (sc->sc_mixer_chan < mc->nchan) { chan = sc->sc_mixer_chan; sc->sc_mixer_chan++; update = ((mc->update[chan / 8] & (1 << (chan % 8))) && (mc->wValue[chan] != -1)); mc->update[chan / 8] &= ~(1 << (chan % 8)); if (update) { req.bmRequestType = UT_WRITE_CLASS_INTERFACE; USETW(req.wValue, mc->wValue[chan]); USETW(req.wIndex, mc->wIndex); if (sc->sc_audio_rev >= UAUDIO_VERSION_30) { return; } else if (sc->sc_audio_rev >= UAUDIO_VERSION_20) { len = 2; req.bRequest = UA20_CS_CUR; USETW(req.wLength, len); } else { len = MIX_SIZE(mc->type); req.bRequest = SET_CUR; USETW(req.wLength, len); } buf[0] = (mc->wData[chan] & 0xFF); buf[1] = (mc->wData[chan] >> 8) & 0xFF; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); pc = usbd_xfer_get_frame(xfer, 1); usbd_copy_in(pc, 0, buf, len); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, len); usbd_xfer_set_frames(xfer, len ? 2 : 1); usbd_transfer_submit(xfer); return; } } mc = mc->next; sc->sc_mixer_curr = mc; sc->sc_mixer_chan = 0; } if (repeat) { goto tr_setup; } break; default: /* Error */ DPRINTF("error=%s\n", usbd_errstr(error)); if (error == USB_ERR_CANCELLED) { /* do nothing - we are detaching */ break; } goto tr_transferred; } } static usb_error_t uaudio_set_speed(struct usb_device *udev, uint8_t endpt, uint32_t speed) { struct usb_device_request req; uint8_t data[3]; DPRINTFN(6, "endpt=%d speed=%u\n", endpt, speed); req.bmRequestType = UT_WRITE_CLASS_ENDPOINT; req.bRequest = SET_CUR; USETW2(req.wValue, SAMPLING_FREQ_CONTROL, 0); USETW(req.wIndex, endpt); USETW(req.wLength, 3); data[0] = speed; data[1] = speed >> 8; data[2] = speed >> 16; return (usbd_do_request(udev, NULL, &req, data)); } static usb_error_t uaudio20_set_speed(struct usb_device *udev, uint8_t iface_no, uint8_t clockid, uint32_t speed) { struct usb_device_request req; uint8_t data[4]; DPRINTFN(6, "ifaceno=%d clockid=%d speed=%u\n", iface_no, clockid, speed); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UA20_CS_CUR; USETW2(req.wValue, UA20_CS_SAM_FREQ_CONTROL, 0); USETW2(req.wIndex, clockid, iface_no); USETW(req.wLength, 4); data[0] = speed; data[1] = speed >> 8; data[2] = speed >> 16; data[3] = speed >> 24; return (usbd_do_request(udev, NULL, &req, data)); } static int uaudio_mixer_signext(uint8_t type, int val) { if (!MIX_UNSIGNED(type)) { if (MIX_SIZE(type) == 2) { val = (int16_t)val; } else { val = (int8_t)val; } } return (val); } static int uaudio_mixer_bsd2value(struct uaudio_mixer_node *mc, int32_t val) { if (mc->type == MIX_ON_OFF) { val = (val != 0); } else if (mc->type == MIX_SELECTOR) { if ((val < mc->minval) || (val > mc->maxval)) { val = mc->minval; } } else { /* compute actual volume */ val = (val * mc->mul) / 255; /* add lower offset */ val = val + mc->minval; /* make sure we don't write a value out of range */ if (val > mc->maxval) val = mc->maxval; else if (val < mc->minval) val = mc->minval; } DPRINTFN(6, "type=0x%03x val=%d min=%d max=%d val=%d\n", mc->type, val, mc->minval, mc->maxval, val); return (val); } static void uaudio_mixer_ctl_set(struct uaudio_softc *sc, struct uaudio_mixer_node *mc, uint8_t chan, int32_t val) { val = uaudio_mixer_bsd2value(mc, val); mc->update[chan / 8] |= (1 << (chan % 8)); mc->wData[chan] = val; /* start the transfer, if not already started */ usbd_transfer_start(sc->sc_mixer_xfer[0]); } static void uaudio_mixer_init(struct uaudio_softc *sc) { struct uaudio_mixer_node *mc; int32_t i; for (mc = sc->sc_mixer_root; mc; mc = mc->next) { if (mc->ctl != SOUND_MIXER_NRDEVICES) { /* * Set device mask bits. See * /usr/include/machine/soundcard.h */ sc->sc_mix_info |= (1 << mc->ctl); } if ((mc->ctl == SOUND_MIXER_NRDEVICES) && (mc->type == MIX_SELECTOR)) { for (i = mc->minval; (i > 0) && (i <= mc->maxval); i++) { if (mc->slctrtype[i - 1] == SOUND_MIXER_NRDEVICES) { continue; } sc->sc_recsrc_info |= 1 << mc->slctrtype[i - 1]; } } } } int uaudio_mixer_init_sub(struct uaudio_softc *sc, struct snd_mixer *m) { DPRINTF("\n"); sc->sc_mixer_lock = mixer_get_lock(m); sc->sc_mixer_dev = m; if (usbd_transfer_setup(sc->sc_udev, &sc->sc_mixer_iface_index, sc->sc_mixer_xfer, uaudio_mixer_config, 1, sc, sc->sc_mixer_lock)) { DPRINTFN(0, "could not allocate USB " "transfer for audio mixer!\n"); return (ENOMEM); } if (!(sc->sc_mix_info & SOUND_MASK_VOLUME)) { mix_setparentchild(m, SOUND_MIXER_VOLUME, SOUND_MASK_PCM); mix_setrealdev(m, SOUND_MIXER_VOLUME, SOUND_MIXER_NONE); } mix_setdevs(m, sc->sc_mix_info); mix_setrecdevs(m, sc->sc_recsrc_info); return (0); } int uaudio_mixer_uninit_sub(struct uaudio_softc *sc) { DPRINTF("\n"); usbd_transfer_unsetup(sc->sc_mixer_xfer, 1); sc->sc_mixer_lock = NULL; return (0); } void uaudio_mixer_set(struct uaudio_softc *sc, unsigned type, unsigned left, unsigned right) { struct uaudio_mixer_node *mc; int chan; for (mc = sc->sc_mixer_root; mc != NULL; mc = mc->next) { if (mc->ctl == type) { for (chan = 0; chan < mc->nchan; chan++) { uaudio_mixer_ctl_set(sc, mc, chan, (int)((chan == 0 ? left : right) * 255) / 100); } } } } uint32_t uaudio_mixer_setrecsrc(struct uaudio_softc *sc, uint32_t src) { struct uaudio_mixer_node *mc; uint32_t mask; uint32_t temp; int32_t i; for (mc = sc->sc_mixer_root; mc; mc = mc->next) { if ((mc->ctl == SOUND_MIXER_NRDEVICES) && (mc->type == MIX_SELECTOR)) { /* compute selector mask */ mask = 0; for (i = mc->minval; (i > 0) && (i <= mc->maxval); i++) { mask |= (1 << mc->slctrtype[i - 1]); } temp = mask & src; if (temp == 0) { continue; } /* find the first set bit */ temp = (-temp) & temp; /* update "src" */ src &= ~mask; src |= temp; for (i = mc->minval; (i > 0) && (i <= mc->maxval); i++) { if (temp != (1 << mc->slctrtype[i - 1])) { continue; } uaudio_mixer_ctl_set(sc, mc, 0, i); break; } } } return (src); } /*========================================================================* * MIDI support routines *========================================================================*/ static void umidi_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct umidi_chan *chan = usbd_xfer_softc(xfer); struct umidi_sub_chan *sub; struct usb_page_cache *pc; uint8_t buf[4]; uint8_t cmd_len; uint8_t cn; uint16_t pos; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("actlen=%d bytes\n", actlen); pos = 0; pc = usbd_xfer_get_frame(xfer, 0); while (actlen >= 4) { /* copy out the MIDI data */ usbd_copy_out(pc, pos, buf, 4); /* command length */ cmd_len = umidi_cmd_to_len[buf[0] & 0xF]; /* cable number */ cn = buf[0] >> 4; /* * Lookup sub-channel. The index is range * checked below. */ sub = &chan->sub[cn]; if ((cmd_len != 0) && (cn < chan->max_emb_jack) && (sub->read_open != 0)) { /* Send data to the application */ usb_fifo_put_data_linear( sub->fifo.fp[USB_FIFO_RX], buf + 1, cmd_len, 1); } actlen -= 4; pos += 4; } case USB_ST_SETUP: DPRINTF("start\n"); tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: DPRINTF("error=%s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } /* * The following statemachine, that converts MIDI commands to * USB MIDI packets, derives from Linux's usbmidi.c, which * was written by "Clemens Ladisch": * * Returns: * 0: No command * Else: Command is complete */ static uint8_t umidi_convert_to_usb(struct umidi_sub_chan *sub, uint8_t cn, uint8_t b) { uint8_t p0 = (cn << 4); if (b >= 0xf8) { sub->temp_0[0] = p0 | 0x0f; sub->temp_0[1] = b; sub->temp_0[2] = 0; sub->temp_0[3] = 0; sub->temp_cmd = sub->temp_0; return (1); } else if (b >= 0xf0) { switch (b) { case 0xf0: /* system exclusive begin */ sub->temp_1[1] = b; sub->state = UMIDI_ST_SYSEX_1; break; case 0xf1: /* MIDI time code */ case 0xf3: /* song select */ sub->temp_1[1] = b; sub->state = UMIDI_ST_1PARAM; break; case 0xf2: /* song position pointer */ sub->temp_1[1] = b; sub->state = UMIDI_ST_2PARAM_1; break; case 0xf4: /* unknown */ case 0xf5: /* unknown */ sub->state = UMIDI_ST_UNKNOWN; break; case 0xf6: /* tune request */ sub->temp_1[0] = p0 | 0x05; sub->temp_1[1] = 0xf6; sub->temp_1[2] = 0; sub->temp_1[3] = 0; sub->temp_cmd = sub->temp_1; sub->state = UMIDI_ST_UNKNOWN; return (1); case 0xf7: /* system exclusive end */ switch (sub->state) { case UMIDI_ST_SYSEX_0: sub->temp_1[0] = p0 | 0x05; sub->temp_1[1] = 0xf7; sub->temp_1[2] = 0; sub->temp_1[3] = 0; sub->temp_cmd = sub->temp_1; sub->state = UMIDI_ST_UNKNOWN; return (1); case UMIDI_ST_SYSEX_1: sub->temp_1[0] = p0 | 0x06; sub->temp_1[2] = 0xf7; sub->temp_1[3] = 0; sub->temp_cmd = sub->temp_1; sub->state = UMIDI_ST_UNKNOWN; return (1); case UMIDI_ST_SYSEX_2: sub->temp_1[0] = p0 | 0x07; sub->temp_1[3] = 0xf7; sub->temp_cmd = sub->temp_1; sub->state = UMIDI_ST_UNKNOWN; return (1); } sub->state = UMIDI_ST_UNKNOWN; break; } } else if (b >= 0x80) { sub->temp_1[1] = b; if ((b >= 0xc0) && (b <= 0xdf)) { sub->state = UMIDI_ST_1PARAM; } else { sub->state = UMIDI_ST_2PARAM_1; } } else { /* b < 0x80 */ switch (sub->state) { case UMIDI_ST_1PARAM: if (sub->temp_1[1] < 0xf0) { p0 |= sub->temp_1[1] >> 4; } else { p0 |= 0x02; sub->state = UMIDI_ST_UNKNOWN; } sub->temp_1[0] = p0; sub->temp_1[2] = b; sub->temp_1[3] = 0; sub->temp_cmd = sub->temp_1; return (1); case UMIDI_ST_2PARAM_1: sub->temp_1[2] = b; sub->state = UMIDI_ST_2PARAM_2; break; case UMIDI_ST_2PARAM_2: if (sub->temp_1[1] < 0xf0) { p0 |= sub->temp_1[1] >> 4; sub->state = UMIDI_ST_2PARAM_1; } else { p0 |= 0x03; sub->state = UMIDI_ST_UNKNOWN; } sub->temp_1[0] = p0; sub->temp_1[3] = b; sub->temp_cmd = sub->temp_1; return (1); case UMIDI_ST_SYSEX_0: sub->temp_1[1] = b; sub->state = UMIDI_ST_SYSEX_1; break; case UMIDI_ST_SYSEX_1: sub->temp_1[2] = b; sub->state = UMIDI_ST_SYSEX_2; break; case UMIDI_ST_SYSEX_2: sub->temp_1[0] = p0 | 0x04; sub->temp_1[3] = b; sub->temp_cmd = sub->temp_1; sub->state = UMIDI_ST_SYSEX_0; return (1); default: break; } } return (0); } static void umidi_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct umidi_chan *chan = usbd_xfer_softc(xfer); struct umidi_sub_chan *sub; struct usb_page_cache *pc; uint32_t actlen; uint16_t nframes; uint8_t buf; uint8_t start_cable; uint8_t tr_any; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); /* * NOTE: Some MIDI devices only accept 4 bytes of data per * short terminated USB transfer. */ switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("actlen=%d bytes\n", len); case USB_ST_SETUP: tr_setup: DPRINTF("start\n"); nframes = 0; /* reset */ start_cable = chan->curr_cable; tr_any = 0; pc = usbd_xfer_get_frame(xfer, 0); while (1) { /* round robin de-queueing */ sub = &chan->sub[chan->curr_cable]; if (sub->write_open) { usb_fifo_get_data_linear(sub->fifo.fp[USB_FIFO_TX], &buf, 1, &actlen, 0); } else { actlen = 0; } if (actlen) { tr_any = 1; DPRINTF("byte=0x%02x from FIFO %u\n", buf, (unsigned int)chan->curr_cable); if (umidi_convert_to_usb(sub, chan->curr_cable, buf)) { DPRINTF("sub=0x%02x 0x%02x 0x%02x 0x%02x\n", sub->temp_cmd[0], sub->temp_cmd[1], sub->temp_cmd[2], sub->temp_cmd[3]); usbd_copy_in(pc, nframes * 4, sub->temp_cmd, 4); nframes++; if ((nframes >= UMIDI_TX_FRAMES) || (chan->single_command != 0)) break; } else { continue; } } chan->curr_cable++; if (chan->curr_cable >= chan->max_emb_jack) chan->curr_cable = 0; if (chan->curr_cable == start_cable) { if (tr_any == 0) break; tr_any = 0; } } if (nframes != 0) { DPRINTF("Transferring %d frames\n", (int)nframes); usbd_xfer_set_frame_len(xfer, 0, 4 * nframes); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTF("error=%s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static struct umidi_sub_chan * umidi_sub_by_fifo(struct usb_fifo *fifo) { struct umidi_chan *chan = usb_fifo_softc(fifo); struct umidi_sub_chan *sub; uint32_t n; for (n = 0; n < UMIDI_EMB_JACK_MAX; n++) { sub = &chan->sub[n]; if ((sub->fifo.fp[USB_FIFO_RX] == fifo) || (sub->fifo.fp[USB_FIFO_TX] == fifo)) { return (sub); } } panic("%s:%d cannot find usb_fifo!\n", __FILE__, __LINE__); return (NULL); } static void umidi_start_read(struct usb_fifo *fifo) { struct umidi_chan *chan = usb_fifo_softc(fifo); usbd_transfer_start(chan->xfer[UMIDI_RX_TRANSFER]); } static void umidi_stop_read(struct usb_fifo *fifo) { struct umidi_chan *chan = usb_fifo_softc(fifo); struct umidi_sub_chan *sub = umidi_sub_by_fifo(fifo); DPRINTF("\n"); sub->read_open = 0; if (--(chan->read_open_refcount) == 0) { /* * XXX don't stop the read transfer here, hence that causes * problems with some MIDI adapters */ DPRINTF("(stopping read transfer)\n"); } } static void umidi_start_write(struct usb_fifo *fifo) { struct umidi_chan *chan = usb_fifo_softc(fifo); usbd_transfer_start(chan->xfer[UMIDI_TX_TRANSFER]); } static void umidi_stop_write(struct usb_fifo *fifo) { struct umidi_chan *chan = usb_fifo_softc(fifo); struct umidi_sub_chan *sub = umidi_sub_by_fifo(fifo); DPRINTF("\n"); sub->write_open = 0; if (--(chan->write_open_refcount) == 0) { DPRINTF("(stopping write transfer)\n"); usbd_transfer_stop(chan->xfer[UMIDI_TX_TRANSFER]); } } static int umidi_open(struct usb_fifo *fifo, int fflags) { struct umidi_chan *chan = usb_fifo_softc(fifo); struct umidi_sub_chan *sub = umidi_sub_by_fifo(fifo); if (fflags & FREAD) { if (usb_fifo_alloc_buffer(fifo, 4, (1024 / 4))) { return (ENOMEM); } mtx_lock(&chan->mtx); chan->read_open_refcount++; sub->read_open = 1; mtx_unlock(&chan->mtx); } if (fflags & FWRITE) { if (usb_fifo_alloc_buffer(fifo, 32, (1024 / 32))) { return (ENOMEM); } /* clear stall first */ mtx_lock(&chan->mtx); chan->write_open_refcount++; sub->write_open = 1; /* reset */ sub->state = UMIDI_ST_UNKNOWN; mtx_unlock(&chan->mtx); } return (0); /* success */ } static void umidi_close(struct usb_fifo *fifo, int fflags) { if (fflags & FREAD) { usb_fifo_free_buffer(fifo); } if (fflags & FWRITE) { usb_fifo_free_buffer(fifo); } } static int umidi_ioctl(struct usb_fifo *fifo, u_long cmd, void *data, int fflags) { return (ENODEV); } static void umidi_init(device_t dev) { struct uaudio_softc *sc = device_get_softc(dev); struct umidi_chan *chan = &sc->sc_midi_chan; mtx_init(&chan->mtx, "umidi lock", NULL, MTX_DEF | MTX_RECURSE); } static struct usb_fifo_methods umidi_fifo_methods = { .f_start_read = &umidi_start_read, .f_start_write = &umidi_start_write, .f_stop_read = &umidi_stop_read, .f_stop_write = &umidi_stop_write, .f_open = &umidi_open, .f_close = &umidi_close, .f_ioctl = &umidi_ioctl, .basename[0] = "umidi", }; static int umidi_probe(device_t dev) { struct uaudio_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct umidi_chan *chan = &sc->sc_midi_chan; struct umidi_sub_chan *sub; int unit = device_get_unit(dev); int error; uint32_t n; if (usb_test_quirk(uaa, UQ_SINGLE_CMD_MIDI)) chan->single_command = 1; if (usbd_set_alt_interface_index(sc->sc_udev, chan->iface_index, chan->iface_alt_index)) { DPRINTF("setting of alternate index failed!\n"); goto detach; } usbd_set_parent_iface(sc->sc_udev, chan->iface_index, sc->sc_mixer_iface_index); error = usbd_transfer_setup(uaa->device, &chan->iface_index, chan->xfer, umidi_config, UMIDI_N_TRANSFER, chan, &chan->mtx); if (error) { DPRINTF("error=%s\n", usbd_errstr(error)); goto detach; } /* * Some USB MIDI device makers couldn't resist using * wMaxPacketSize = 4 for RX and TX BULK endpoints, although * that size is an unsupported value for FULL speed BULK * endpoints. The same applies to some HIGH speed MIDI devices * which are using a wMaxPacketSize different from 512 bytes. * * Refer to section 5.8.3 in USB 2.0 PDF: Cite: "All Host * Controllers are required to have support for 8-, 16-, 32-, * and 64-byte maximum packet sizes for full-speed bulk * endpoints and 512 bytes for high-speed bulk endpoints." */ if (usbd_xfer_maxp_was_clamped(chan->xfer[UMIDI_TX_TRANSFER])) chan->single_command = 1; if (chan->single_command != 0) device_printf(dev, "Single command MIDI quirk enabled\n"); if ((chan->max_emb_jack == 0) || (chan->max_emb_jack > UMIDI_EMB_JACK_MAX)) { chan->max_emb_jack = UMIDI_EMB_JACK_MAX; } for (n = 0; n < chan->max_emb_jack; n++) { sub = &chan->sub[n]; error = usb_fifo_attach(sc->sc_udev, chan, &chan->mtx, &umidi_fifo_methods, &sub->fifo, unit, n, chan->iface_index, UID_ROOT, GID_OPERATOR, 0644); if (error) { goto detach; } } mtx_lock(&chan->mtx); /* * NOTE: At least one device will not work properly unless the * BULK IN pipe is open all the time. This might have to do * about that the internal queues of the device overflow if we * don't read them regularly. */ usbd_transfer_start(chan->xfer[UMIDI_RX_TRANSFER]); mtx_unlock(&chan->mtx); return (0); /* success */ detach: return (ENXIO); /* failure */ } static int umidi_detach(device_t dev) { struct uaudio_softc *sc = device_get_softc(dev); struct umidi_chan *chan = &sc->sc_midi_chan; uint32_t n; for (n = 0; n < UMIDI_EMB_JACK_MAX; n++) usb_fifo_detach(&chan->sub[n].fifo); mtx_lock(&chan->mtx); usbd_transfer_stop(chan->xfer[UMIDI_RX_TRANSFER]); mtx_unlock(&chan->mtx); usbd_transfer_unsetup(chan->xfer, UMIDI_N_TRANSFER); mtx_destroy(&chan->mtx); return (0); } static void uaudio_hid_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct uaudio_softc *sc = usbd_xfer_softc(xfer); const uint8_t *buffer = usbd_xfer_get_frame_buffer(xfer, 0); struct snd_mixer *m; uint8_t id; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("actlen=%d\n", actlen); if (actlen != 0 && (sc->sc_hid.flags & UAUDIO_HID_HAS_ID)) { id = *buffer; buffer++; actlen--; } else { id = 0; } m = sc->sc_mixer_dev; if ((sc->sc_hid.flags & UAUDIO_HID_HAS_MUTE) && (sc->sc_hid.mute_id == id) && hid_get_data(buffer, actlen, &sc->sc_hid.mute_loc)) { DPRINTF("Mute toggle\n"); mixer_hwvol_mute_locked(m); } if ((sc->sc_hid.flags & UAUDIO_HID_HAS_VOLUME_UP) && (sc->sc_hid.volume_up_id == id) && hid_get_data(buffer, actlen, &sc->sc_hid.volume_up_loc)) { DPRINTF("Volume Up\n"); mixer_hwvol_step_locked(m, 1, 1); } if ((sc->sc_hid.flags & UAUDIO_HID_HAS_VOLUME_DOWN) && (sc->sc_hid.volume_down_id == id) && hid_get_data(buffer, actlen, &sc->sc_hid.volume_down_loc)) { DPRINTF("Volume Down\n"); mixer_hwvol_step_locked(m, -1, -1); } case USB_ST_SETUP: tr_setup: /* check if we can put more data into the FIFO */ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("error=%s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static int uaudio_hid_probe(struct uaudio_softc *sc, struct usb_attach_arg *uaa) { void *d_ptr; uint32_t flags; uint16_t d_len; uint8_t id; int error; if (!(sc->sc_hid.flags & UAUDIO_HID_VALID)) return (-1); if (sc->sc_mixer_lock == NULL) return (-1); /* Get HID descriptor */ error = usbd_req_get_hid_desc(uaa->device, NULL, &d_ptr, &d_len, M_TEMP, sc->sc_hid.iface_index); if (error) { DPRINTF("error reading report description\n"); return (-1); } /* check if there is an ID byte */ hid_report_size(d_ptr, d_len, hid_input, &id); if (id != 0) sc->sc_hid.flags |= UAUDIO_HID_HAS_ID; if (hid_locate(d_ptr, d_len, HID_USAGE2(HUP_CONSUMER, 0xE9 /* Volume Increment */), hid_input, 0, &sc->sc_hid.volume_up_loc, &flags, &sc->sc_hid.volume_up_id)) { if (flags & HIO_VARIABLE) sc->sc_hid.flags |= UAUDIO_HID_HAS_VOLUME_UP; DPRINTFN(1, "Found Volume Up key\n"); } if (hid_locate(d_ptr, d_len, HID_USAGE2(HUP_CONSUMER, 0xEA /* Volume Decrement */), hid_input, 0, &sc->sc_hid.volume_down_loc, &flags, &sc->sc_hid.volume_down_id)) { if (flags & HIO_VARIABLE) sc->sc_hid.flags |= UAUDIO_HID_HAS_VOLUME_DOWN; DPRINTFN(1, "Found Volume Down key\n"); } if (hid_locate(d_ptr, d_len, HID_USAGE2(HUP_CONSUMER, 0xE2 /* Mute */), hid_input, 0, &sc->sc_hid.mute_loc, &flags, &sc->sc_hid.mute_id)) { if (flags & HIO_VARIABLE) sc->sc_hid.flags |= UAUDIO_HID_HAS_MUTE; DPRINTFN(1, "Found Mute key\n"); } free(d_ptr, M_TEMP); if (!(sc->sc_hid.flags & (UAUDIO_HID_HAS_VOLUME_UP | UAUDIO_HID_HAS_VOLUME_DOWN | UAUDIO_HID_HAS_MUTE))) { DPRINTFN(1, "Did not find any volume related keys\n"); return (-1); } /* prevent the uhid driver from attaching */ usbd_set_parent_iface(uaa->device, sc->sc_hid.iface_index, sc->sc_mixer_iface_index); /* allocate USB transfers */ error = usbd_transfer_setup(uaa->device, &sc->sc_hid.iface_index, sc->sc_hid.xfer, uaudio_hid_config, UAUDIO_HID_N_TRANSFER, sc, sc->sc_mixer_lock); if (error) { DPRINTF("error=%s\n", usbd_errstr(error)); return (-1); } return (0); } static void uaudio_hid_detach(struct uaudio_softc *sc) { usbd_transfer_unsetup(sc->sc_hid.xfer, UAUDIO_HID_N_TRANSFER); } DRIVER_MODULE_ORDERED(uaudio, uhub, uaudio_driver, uaudio_devclass, NULL, 0, SI_ORDER_ANY); MODULE_DEPEND(uaudio, usb, 1, 1, 1); MODULE_DEPEND(uaudio, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(uaudio, 1); USB_PNP_HOST_INFO(uaudio_devs); diff --git a/sys/dev/sym/sym_hipd.c b/sys/dev/sym/sym_hipd.c index 0d0afdf3a377..53c66bbbf085 100644 --- a/sys/dev/sym/sym_hipd.c +++ b/sys/dev/sym/sym_hipd.c @@ -1,9623 +1,9623 @@ /*- * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 * PCI-SCSI controllers. * * Copyright (C) 1999-2001 Gerard Roudier * * This driver also supports the following Symbios/LSI PCI-SCSI chips: * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. * * * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-1999 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier * Stefan Esser * Copyright (C) 1994 Wolfgang Stanglmeier * * The initialisation code, and part of the code that addresses * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM * written by Justin T. Gibbs. * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham * *----------------------------------------------------------------------------- * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define SYM_DRIVER_NAME "sym-1.6.5-20000902" /* #define SYM_DEBUG_GENERIC_SUPPORT */ #include /* * Driver configuration options. */ #include "opt_sym.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __sparc64__ #include #include #endif #include #include #include #include #include #include #include #include /* Short and quite clear integer types */ typedef int8_t s8; typedef int16_t s16; typedef int32_t s32; typedef u_int8_t u8; typedef u_int16_t u16; typedef u_int32_t u32; /* * Driver definitions. */ #include #include /* * IA32 architecture does not reorder STORES and prevents * LOADS from passing STORES. It is called `program order' * by Intel and allows device drivers to deal with memory * ordering by only ensuring that the code is not reordered * by the compiler when ordering is required. * Other architectures implement a weaker ordering that * requires memory barriers (and also IO barriers when they * make sense) to be used. */ #if defined __i386__ || defined __amd64__ #define MEMORY_BARRIER() do { ; } while(0) #elif defined __powerpc__ #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory") #elif defined __sparc64__ #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory") #elif defined __arm__ #define MEMORY_BARRIER() dmb() #elif defined __aarch64__ #define MEMORY_BARRIER() dmb(sy) #else #error "Not supported platform" #endif /* * A la VMS/CAM-3 queue management. */ typedef struct sym_quehead { struct sym_quehead *flink; /* Forward pointer */ struct sym_quehead *blink; /* Backward pointer */ } SYM_QUEHEAD; #define sym_que_init(ptr) do { \ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ } while (0) static __inline void __sym_que_add(struct sym_quehead * new, struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = new; new->flink = flink; new->blink = blink; blink->flink = new; } static __inline void __sym_que_del(struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = blink; blink->flink = flink; } static __inline int sym_que_empty(struct sym_quehead *head) { return head->flink == head; } static __inline void sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) { struct sym_quehead *first = list->flink; if (first != list) { struct sym_quehead *last = list->blink; struct sym_quehead *at = head->flink; first->blink = head; head->flink = first; last->flink = at; at->blink = last; } } #define sym_que_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member))) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) { struct sym_quehead *elem = head->flink; if (elem != head) __sym_que_del(head, elem->flink); else elem = NULL; return elem; } #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) /* * This one may be useful. */ #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ for (qp = (head)->flink; qp != (head); qp = qp->flink) /* * FreeBSD does not offer our kind of queue in the CAM CCB. * So, we have to cast. */ #define sym_qptr(p) ((struct sym_quehead *) (p)) /* * Simple bitmap operations. */ #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) /* * Number of tasks per device we want to handle. */ #if SYM_CONF_MAX_TAG_ORDER > 8 #error "more than 256 tags per logical unit not allowed." #endif #define SYM_CONF_MAX_TASK (1< SYM_CONF_MAX_TASK #undef SYM_CONF_MAX_TAG #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK #endif /* * This one means 'NO TAG for this job' */ #define NO_TAG (256) /* * Number of SCSI targets. */ #if SYM_CONF_MAX_TARGET > 16 #error "more than 16 targets not allowed." #endif /* * Number of logical units per target. */ #if SYM_CONF_MAX_LUN > 64 #error "more than 64 logical units per target not allowed." #endif /* * Asynchronous pre-scaler (ns). Shall be 40 for * the SCSI timings to be compliant. */ #define SYM_CONF_MIN_ASYNC (40) /* * Number of entries in the START and DONE queues. * * We limit to 1 PAGE in order to succeed allocation of * these queues. Each entry is 8 bytes long (2 DWORDS). */ #ifdef SYM_CONF_MAX_START #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) #else #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 #undef SYM_CONF_MAX_QUEUE #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 #undef SYM_CONF_MAX_START #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif /* * For this one, we want a short name :-) */ #define MAX_QUEUE SYM_CONF_MAX_QUEUE /* * Active debugging tags and verbosity. */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_POINTER (0x0800) #if 0 static int sym_debug = 0; #define DEBUG_FLAGS sym_debug #else /* #define DEBUG_FLAGS (0x0631) */ #define DEBUG_FLAGS (0x0000) #endif #define sym_verbose (np->verbose) /* * Insert a delay in micro-seconds and milli-seconds. */ static void UDELAY(int us) { DELAY(us); } static void MDELAY(int ms) { while (ms--) UDELAY(1000); } /* * Simple power of two buddy-like allocator. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developed for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ #if 0 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ #endif #define MEMO_WARN 1 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) #define free_pages(p) free((p), M_DEVBUF) typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ typedef struct m_link { /* Link between free memory chunks */ struct m_link *next; } m_link_s; typedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; bus_dmamap_t dmamap; /* Map for this chunk */ m_addr_t vaddr; /* Virtual address */ m_addr_t baddr; /* Bus physical address */ } m_vtob_s; /* Hash this stuff a bit to speed up translations */ #define VTOB_HASH_SHIFT 5 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) #define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) typedef struct m_pool { /* Memory pool of a given kind */ bus_dma_tag_t dev_dmat; /* Identifies the pool */ bus_dma_tag_t dmat; /* Tag for our fixed allocations */ m_addr_t (*getp)(struct m_pool *); #ifdef MEMO_FREE_UNUSED void (*freep)(struct m_pool *, m_addr_t); #endif #define M_GETP() mp->getp(mp) #define M_FREEP(p) mp->freep(mp, p) int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next; struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; } m_pool_s; static void *___sym_malloc(m_pool_s *mp, int size) { int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > MEMO_CLUSTER_SIZE) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == MEMO_CLUSTER_SIZE) { h[j].next = (m_link_s *) M_GETP(); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return (void *) a; } static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) { int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > MEMO_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) { #ifdef MEMO_FREE_UNUSED if (s == MEMO_CLUSTER_SIZE) { M_FREEP(a); break; } #endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("new %-10s[%4d] @%p.\n", name, size, p); if (p) bzero(p, size); else if (uflags & MEMO_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. */ /* * With the `bus dma abstraction', we use a separate pool for * memory we donnot need to involve in DMA. */ static m_addr_t ___mp0_getp(m_pool_s *mp) { m_addr_t m = (m_addr_t) get_pages(); if (m) ++mp->nump; return m; } #ifdef MEMO_FREE_UNUSED static void ___mp0_freep(m_pool_s *mp, m_addr_t m) { free_pages(m); --mp->nump; } #endif #ifdef MEMO_FREE_UNUSED static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; #else static m_pool_s mp0 = {0, 0, ___mp0_getp}; #endif /* * Actual memory allocation routine for non-DMAed memory. */ static void *sym_calloc(int size, char *name) { void *m; /* Lock */ m = __sym_calloc(&mp0, size, name); /* Unlock */ return m; } /* * Actual memory allocation routine for non-DMAed memory. */ static void sym_mfree(void *ptr, int size, char *name) { /* Lock */ __sym_mfree(&mp0, ptr, size, name); /* Unlock */ } /* * DMAable pools. */ /* * With `bus dma abstraction', we use a separate pool per parent * BUS handle. A reverse table (hashed) is maintained for virtual * to BUS address translation. */ static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg __unused, int error) { bus_addr_t *baddr; KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); baddr = (bus_addr_t *)arg; if (error) *baddr = 0; else *baddr = segs->ds_addr; } static m_addr_t ___dma_getp(m_pool_s *mp) { m_vtob_s *vbp; void *vaddr = NULL; bus_addr_t baddr = 0; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; if (bus_dmamem_alloc(mp->dmat, &vaddr, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &vbp->dmamap)) goto out_err; bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, BUS_DMA_NOWAIT); if (baddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->vaddr = (m_addr_t) vaddr; vbp->baddr = (m_addr_t) baddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return (m_addr_t) vaddr; } out_err: if (baddr) bus_dmamap_unload(mp->dmat, vbp->dmamap); if (vaddr) bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); if (vbp) __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; } #ifdef MEMO_FREE_UNUSED static void ___dma_freep(m_pool_s *mp, m_addr_t m) { m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; bus_dmamap_unload(mp->dmat, vbp->dmamap); bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp; for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); return mp; } static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp = NULL; mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MEMO_CLUSTER_SIZE, 1, MEMO_CLUSTER_SIZE, 0, NULL, NULL, &mp->dmat)) { mp->getp = ___dma_getp; #ifdef MEMO_FREE_UNUSED mp->freep = ___dma_freep; #endif mp->next = mp0.next; mp0.next = mp; return mp; } } if (mp) __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); return NULL; } #ifdef MEMO_FREE_UNUSED static void ___del_dma_pool(m_pool_s *p) { struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; bus_dma_tag_destroy(p->dmat); __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) { struct m_pool *mp; void *m = NULL; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (mp) m = __sym_calloc(mp, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ return m; } static void __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) { struct m_pool *mp; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) __sym_mfree(mp, m, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ } static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) { m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = NULL; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } /* Unlock */ if (!vp) panic("sym: VTOBUS FAILED!\n"); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; } /* * Verbs for DMAable memory handling. * The _uvptv_ macro avoids a nasty warning about pointer to volatile * being discarded. */ #define _uvptv_(p) ((void *)((vm_offset_t)(p))) #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) #define _sym_mfree_dma(np, p, s, n) \ __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) #define vtobus(p) _vtobus(np, p) /* * Print a buffer in hexadecimal format. */ static void sym_printb_hex (u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); } /* * Same with a label at beginning and .\n at end. */ static void sym_printl_hex (char *label, u_char *p, int n) { printf ("%s", label); sym_printb_hex (p, n); printf (".\n"); } /* * Return a string for SCSI BUS mode. */ static const char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Some poor and bogus sync table that refers to Tekram NVRAM layout. */ #ifdef SYM_CONF_NVRAM_SUPPORT static const u_char Tekram_sync[16] = {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; #endif /* * Union of supported NVRAM formats. */ struct sym_nvram { int type; #define SYM_SYMBIOS_NVRAM (1) #define SYM_TEKRAM_NVRAM (2) #ifdef SYM_CONF_NVRAM_SUPPORT union { Symbios_nvram Symbios; Tekram_nvram Tekram; } data; #endif }; /* * This one is hopefully useless, but actually useful. :-) */ #ifndef assert #define assert(expression) { \ if (!(expression)) { \ (void)panic( \ "assertion \"%s\" failed: file \"%s\", line %d\n", \ #expression, \ __FILE__, __LINE__); \ } \ } #endif /* * Some provision for a possible big endian mode supported by * Symbios chips (never seen, by the way). * For now, this stuff does not deserve any comments. :) */ #define sym_offb(o) (o) #define sym_offw(o) (o) /* * Some provision for support for BIG ENDIAN CPU. */ #define cpu_to_scr(dw) htole32(dw) #define scr_to_cpu(dw) le32toh(dw) /* * Access to the chip IO registers and on-chip RAM. * We use the `bus space' interface under FreeBSD-4 and * later kernel versions. */ #if defined(SYM_CONF_IOMAPPED) #define INB_OFF(o) bus_read_1(np->io_res, (o)) #define INW_OFF(o) bus_read_2(np->io_res, (o)) #define INL_OFF(o) bus_read_4(np->io_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->io_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->io_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->io_res, (o), (v)) #else /* Memory mapped IO */ #define INB_OFF(o) bus_read_1(np->mmio_res, (o)) #define INW_OFF(o) bus_read_2(np->mmio_res, (o)) #define INL_OFF(o) bus_read_4(np->mmio_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->mmio_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->mmio_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->mmio_res, (o), (v)) #endif /* SYM_CONF_IOMAPPED */ #define OUTRAM_OFF(o, a, l) \ bus_write_region_1(np->ram_res, (o), (a), (l)) /* * Common definitions for both bus space and legacy IO methods. */ #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /* * We normally want the chip to have a consistent view * of driver internal data structures when we restart it. * Thus these macros. */ #define OUTL_DSP(v) \ do { \ MEMORY_BARRIER(); \ OUTL (nc_dsp, (v)); \ } while (0) #define OUTONB_STD() \ do { \ MEMORY_BARRIER(); \ OUTONB (nc_dcntl, (STD|NOCOM)); \ } while (0) /* * Command control block states. */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_WAIT (4) /* waiting for resource */ #define HS_DONEMASK (0x80) #define HS_COMPLETE (4|HS_DONEMASK) #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ /* * Software Interrupt Codes */ #define SIR_BAD_SCSI_STATUS (1) #define SIR_SEL_ATN_NO_MSG_OUT (2) #define SIR_MSG_RECEIVED (3) #define SIR_MSG_WEIRD (4) #define SIR_NEGO_FAILED (5) #define SIR_NEGO_PROTO (6) #define SIR_SCRIPT_STOPPED (7) #define SIR_REJECT_TO_SEND (8) #define SIR_SWIDE_OVERRUN (9) #define SIR_SODL_UNDERRUN (10) #define SIR_RESEL_NO_MSG_IN (11) #define SIR_RESEL_NO_IDENTIFY (12) #define SIR_RESEL_BAD_LUN (13) #define SIR_TARGET_SELECTED (14) #define SIR_RESEL_BAD_I_T_L (15) #define SIR_RESEL_BAD_I_T_L_Q (16) #define SIR_ABORT_SENT (17) #define SIR_RESEL_ABORTED (18) #define SIR_MSG_OUT_DONE (19) #define SIR_COMPLETE_ERROR (20) #define SIR_DATA_OVERRUN (21) #define SIR_BAD_PHASE (22) #define SIR_MAX (22) /* * Extended error bit codes. * xerr_status field of struct sym_ccb. */ #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ /* * Negotiation status. * nego_status field of struct sym_ccb. */ #define NS_SYNC (1) #define NS_WIDE (2) #define NS_PPR (3) /* * A CCB hashed table is used to retrieve CCB address * from DSA value. */ #define CCB_HASH_SHIFT 8 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) #define CCB_HASH_MASK (CCB_HASH_SIZE-1) #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) /* * Device flags. */ #define SYM_DISC_ENABLED (1) #define SYM_TAGS_ENABLED (1<<1) #define SYM_SCAN_BOOT_DISABLED (1<<2) #define SYM_SCAN_LUNS_DISABLED (1<<3) /* * Host adapter miscellaneous flags. */ #define SYM_AVOID_BUS_RESET (1) #define SYM_SCAN_TARGETS_HILO (1<<1) /* * Device quirks. * Some devices, for example the CHEETAH 2 LVD, disconnects without * saving the DATA POINTER then reselects and terminates the IO. * On reselection, the automatic RESTORE DATA POINTER makes the * CURRENT DATA POINTER not point at the end of the IO. * This behaviour just breaks our calculation of the residual. * For now, we just force an AUTO SAVE on disconnection and will * fix that in a further driver version. */ #define SYM_QUIRK_AUTOSAVE 1 /* * Misc. */ #define SYM_LOCK() mtx_lock(&np->mtx) #define SYM_LOCK_ASSERT(_what) mtx_assert(&np->mtx, (_what)) #define SYM_LOCK_DESTROY() mtx_destroy(&np->mtx) #define SYM_LOCK_INIT() mtx_init(&np->mtx, "sym_lock", NULL, MTX_DEF) #define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx) #define SYM_UNLOCK() mtx_unlock(&np->mtx) #define SYM_SNOOP_TIMEOUT (10000000) #define SYM_PCI_IO PCIR_BAR(0) #define SYM_PCI_MMIO PCIR_BAR(1) #define SYM_PCI_RAM PCIR_BAR(2) #define SYM_PCI_RAM64 PCIR_BAR(3) /* * Back-pointer from the CAM CCB to our data structures. */ #define sym_hcb_ptr spriv_ptr0 /* #define sym_ccb_ptr spriv_ptr1 */ /* * We mostly have to deal with pointers. * Thus these typedef's. */ typedef struct sym_tcb *tcb_p; typedef struct sym_lcb *lcb_p; typedef struct sym_ccb *ccb_p; typedef struct sym_hcb *hcb_p; /* * Gather negotiable parameters value */ struct sym_trans { u8 scsi_version; u8 spi_version; u8 period; u8 offset; u8 width; u8 options; /* PPR options */ }; struct sym_tinfo { struct sym_trans current; struct sym_trans goal; struct sym_trans user; }; #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT /* * Global TCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the TCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_tcbh { /* * Scripts bus addresses of LUN table accessed from scripts. * LUN #0 is a special case, since multi-lun devices are rare, * and we we want to speed-up the general case and not waste * resources. */ u32 luntbl_sa; /* bus address of this table */ u32 lun0_sa; /* bus address of LCB #0 */ /* * Actual SYNC/WIDE IO registers value for this target. * 'sval', 'wval' and 'uval' are read from SCRIPTS and * so have alignment constraints. */ /*0*/ u_char uval; /* -> SCNTL4 register */ /*1*/ u_char sval; /* -> SXFER io register */ /*2*/ u_char filler1; /*3*/ u_char wval; /* -> SCNTL3 io register */ }; /* * Target Control Block */ struct sym_tcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_tcbh head; /* * LUN table used by the SCRIPTS processor. * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ /* * LUN table used by the C code. */ lcb_p lun0p; /* LCB of LUN #0 (usual case) */ #if SYM_CONF_MAX_LUN > 1 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ #endif /* * Bitmap that tells about LUNs that succeeded at least * 1 IO and therefore assumed to be a real device. * Avoid useless allocation of the LCB structure. */ u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Bitmap that tells about LUNs that haven't yet an LCB * allocated (not discovered or LCB allocation failed). */ u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Transfer capabilities (SIP) */ struct sym_tinfo tinfo; /* * Keep track of the CCB used for the negotiation in order * to ensure that only 1 negotiation is queued at a time. */ ccb_p nego_cp; /* CCB used for the nego */ /* * Set when we want to reset the device. */ u_char to_reset; /* * Other user settable limits and options. * These limits are read from the NVRAM if present. */ u_char usrflags; u_short usrtags; }; /* * Assert some alignments required by the chip. */ CTASSERT(((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); CTASSERT(((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); /* * Global LCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the LCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_lcbh { /* * SCRIPTS address jumped by SCRIPTS on reselection. * For not probed logical units, this address points to * SCRIPTS that deal with bad LU handling (must be at * offset zero of the LCB for that reason). */ /*0*/ u32 resel_sa; /* * Task (bus address of a CCB) read from SCRIPTS that points * to the unique ITL nexus allowed to be disconnected. */ u32 itl_task_sa; /* * Task table bus address (read from SCRIPTS). */ u32 itlq_tbl_sa; }; /* * Logical Unit Control Block */ struct sym_lcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_lcbh head; /* * Task table read from SCRIPTS that contains pointers to * ITLQ nexuses. The bus address read from SCRIPTS is * inside the header. */ u32 *itlq_tbl; /* Kernel virtual address */ /* * Busy CCBs management. */ u_short busy_itlq; /* Number of busy tagged CCBs */ u_short busy_itl; /* Number of busy untagged CCBs */ /* * Circular tag allocation buffer. */ u_short ia_tag; /* Tag allocation index */ u_short if_tag; /* Tag release index */ u_char *cb_tags; /* Circular tags buffer */ /* * Set when we want to clear all tasks. */ u_char to_clear; /* * Capabilities. */ u_char user_flags; u_char current_flags; }; /* * Action from SCRIPTS on a task. * Is part of the CCB, but is also used separately to plug * error handling action to perform from SCRIPTS. */ struct sym_actscr { u32 start; /* Jumped by SCRIPTS after selection */ u32 restart; /* Jumped by SCRIPTS on relection */ }; /* * Phase mismatch context. * * It is part of the CCB and is used as parameters for the * DATA pointer. We need two contexts to handle correctly the * SAVED DATA POINTER. */ struct sym_pmc { struct sym_tblmove sg; /* Updated interrupted SG block */ u32 ret; /* SCRIPT return address */ }; /* * LUN control block lookup. * We use a direct pointer for LUN #0, and a table of * pointers which is only allocated for devices that support * LUN(s) > 0. */ #if SYM_CONF_MAX_LUN <= 1 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : 0 #else #define sym_lp(tp, lun) \ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 #endif /* * Status are used by the host and the script processor. * * The last four bytes (status[4]) are copied to the * scratchb register (declared as scr0..scr3) just after the * select/reselect, and copied back just after disconnecting. * Inside the script the XX_REG are used. */ /* * Last four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define SS_PRT nc_scr2 #define HF_REG scr3 #define HF_PRT nc_scr3 /* * Last four bytes (host) */ #define actualquirks phys.head.status[0] #define host_status phys.head.status[1] #define ssss_status phys.head.status[2] #define host_flags phys.head.status[3] /* * Host flags */ #define HF_IN_PM0 1u #define HF_IN_PM1 (1u<<1) #define HF_ACT_PM (1u<<2) #define HF_DP_SAVED (1u<<3) #define HF_SENSE (1u<<4) #define HF_EXT_ERR (1u<<5) #define HF_DATA_IN (1u<<6) #ifdef SYM_CONF_IARB_SUPPORT #define HF_HINT_IARB (1u<<7) #endif /* * Global CCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the ccb to a global * address after selection (or reselection) and copied back * before disconnect. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_ccbh { /* * Start and restart SCRIPTS addresses (must be at 0). */ /*0*/ struct sym_actscr go; /* * SCRIPTS jump address that deal with data pointers. * 'savep' points to the position in the script responsible * for the actual transfer of data. * It's written on reception of a SAVE_DATA_POINTER message. */ u32 savep; /* Jump address to saved data pointer */ u32 lastp; /* SCRIPTS address at end of data */ u32 goalp; /* Not accessed for now from SCRIPTS */ /* * Status fields. */ u8 status[4]; }; /* * Data Structure Block * * During execution of a ccb by the script processor, the * DSA (data structure address) register points to this * substructure of the ccb. */ struct sym_dsb { /* * CCB header. * Also assumed at offset 0 of the sym_ccb structure. */ /*0*/ struct sym_ccbh head; /* * Phase mismatch contexts. * We need two to handle correctly the SAVED DATA POINTER. * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic * for address calculation from SCRIPTS. */ struct sym_pmc pm0; struct sym_pmc pm1; /* * Table data for Script */ struct sym_tblsel select; struct sym_tblmove smsg; struct sym_tblmove smsg_ext; struct sym_tblmove cmd; struct sym_tblmove sense; struct sym_tblmove wresid; struct sym_tblmove data [SYM_CONF_MAX_SG]; }; /* * Our Command Control Block */ struct sym_ccb { /* * This is the data structure which is pointed by the DSA * register when it is executed by the script processor. * It must be the first entry. */ struct sym_dsb phys; /* * Pointer to CAM ccb and related stuff. */ struct callout ch; /* callout handle */ union ccb *cam_ccb; /* CAM scsiio ccb */ u8 cdb_buf[16]; /* Copy of CDB */ u8 *sns_bbuf; /* Bounce buffer for sense data */ #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) int data_len; /* Total data length */ int segments; /* Number of SG segments */ /* * Miscellaneous status'. */ u_char nego_status; /* Negotiation status */ u_char xerr_status; /* Extended error flags */ u32 extra_bytes; /* Extraneous bytes transferred */ /* * Message areas. * We prepare a message to be sent after selection. * We may use a second one if the command is rescheduled * due to CHECK_CONDITION or COMMAND TERMINATED. * Contents are IDENTIFY and SIMPLE_TAG. * While negotiating sync or wide transfer, * a SDTR or WDTR message is appended. */ u_char scsi_smsg [12]; u_char scsi_smsg2[12]; /* * Auto request sense related fields. */ u_char sensecmd[6]; /* Request Sense command */ u_char sv_scsi_status; /* Saved SCSI status */ u_char sv_xerr_status; /* Saved extended status */ int sv_resid; /* Saved residual */ /* * Map for the DMA of user data. */ void *arg; /* Argument for some callback */ bus_dmamap_t dmamap; /* DMA map for user data */ u_char dmamapped; #define SYM_DMA_NONE 0 #define SYM_DMA_READ 1 #define SYM_DMA_WRITE 2 /* * Other fields. */ u32 ccb_ba; /* BUS address of this CCB */ u_short tag; /* Tag for this transfer */ /* NO_TAG means no tag */ u_char target; u_char lun; ccb_p link_ccbh; /* Host adapter CCB hash chain */ SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ u32 startp; /* Initial data pointer */ int ext_sg; /* Extreme data pointer, used */ int ext_ofs; /* to calculate the residual. */ u_char to_abort; /* Want this IO to be aborted */ }; #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) /* * Host Control Block */ struct sym_hcb { struct mtx mtx; /* * Global headers. * Due to poorness of addressing capabilities, earlier * chips (810, 815, 825) copy part of the data structures * (CCB, TCB and LCB) in fixed areas. */ #ifdef SYM_CONF_GENERIC_SUPPORT struct sym_ccbh ccb_head; struct sym_tcbh tcb_head; struct sym_lcbh lcb_head; #endif /* * Idle task and invalid task actions and * their bus addresses. */ struct sym_actscr idletask, notask, bad_itl, bad_itlq; vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; /* * Dummy lun table to protect us against target * returning bad lun number on reselection. */ u32 *badluntbl; /* Table physical address */ u32 badlun_sa; /* SCRIPT handler BUS address */ /* * Bus address of this host control block. */ u32 hcb_ba; /* * Bit 32-63 of the on-chip RAM bus address in LE format. * The START_RAM64 script loads the MMRS and MMWS from this * field. */ u32 scr_ram_seg; /* * Chip and controller indentification. */ device_t device; /* * Initial value of some IO register bits. * These values are assumed to have been set by BIOS, and may * be used to probe adapter implementation differences. */ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, sv_stest1; /* * Actual initial value of IO register bits used by the * driver. They are loaded at initialisation according to * features that are to be enabled/disabled. */ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; /* * Target data. */ #ifdef __amd64__ struct sym_tcb *target; #else struct sym_tcb target[SYM_CONF_MAX_TARGET]; #endif /* * Target control block bus address array used by the SCRIPT * on reselection. */ u32 *targtbl; u32 targtbl_ba; /* * CAM SIM information for this instance. */ struct cam_sim *sim; struct cam_path *path; /* * Allocated hardware resources. */ struct resource *irq_res; struct resource *io_res; struct resource *mmio_res; struct resource *ram_res; int ram_id; void *intr; /* * Bus stuff. * * My understanding of PCI is that all agents must share the * same addressing range and model. * But some hardware architecture guys provide complex and * brain-deaded stuff that makes shit. * This driver only support PCI compliant implementations and * deals with part of the BUS stuff complexity only to fit O/S * requirements. */ /* * DMA stuff. */ bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ bus_dma_tag_t data_dmat; /* DMA tag for user data */ /* * BUS addresses of the chip */ vm_offset_t mmio_ba; /* MMIO BUS address */ int mmio_ws; /* MMIO Window size */ vm_offset_t ram_ba; /* RAM BUS address */ int ram_ws; /* RAM window size */ /* * SCRIPTS virtual and physical bus addresses. * 'script' is loaded in the on-chip RAM if present. * 'scripth' stays in main memory for all chips except the * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. */ u_char *scripta0; /* Copies of script and scripth */ u_char *scriptb0; /* Copies of script and scripth */ vm_offset_t scripta_ba; /* Actual script and scripth */ vm_offset_t scriptb_ba; /* bus addresses. */ vm_offset_t scriptb0_ba; u_short scripta_sz; /* Actual size of script A */ u_short scriptb_sz; /* Actual size of script B */ /* * Bus addresses, setup and patch methods for * the selected firmware. */ struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ void (*fw_setup)(hcb_p np, const struct sym_fw *fw); void (*fw_patch)(hcb_p np); const char *fw_name; /* * General controller parameters and configuration. */ u_short device_id; /* PCI device id */ u_char revision_id; /* PCI device revision id */ u_int features; /* Chip features map */ u_char myaddr; /* SCSI id of the adapter */ u_char maxburst; /* log base 2 of dwords burst */ u_char maxwide; /* Maximum transfer width */ u_char minsync; /* Min sync period factor (ST) */ u_char maxsync; /* Max sync period factor (ST) */ u_char maxoffs; /* Max scsi offset (ST) */ u_char minsync_dt; /* Min sync period factor (DT) */ u_char maxsync_dt; /* Max sync period factor (DT) */ u_char maxoffs_dt; /* Max scsi offset (DT) */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char clock_divn; /* Number of clock divisors */ u32 clock_khz; /* SCSI clock frequency in KHz */ u32 pciclk_khz; /* Estimated PCI clock in KHz */ /* * Start queue management. * It is filled up by the host processor and accessed by the * SCRIPTS processor in order to start SCSI commands. */ volatile /* Prevent code optimizations */ u32 *squeue; /* Start queue virtual address */ u32 squeue_ba; /* Start queue BUS address */ u_short squeueput; /* Next free slot of the queue */ u_short actccbs; /* Number of allocated CCBs */ /* * Command completion queue. * It is the same size as the start queue to avoid overflow. */ u_short dqueueget; /* Next position to scan */ volatile /* Prevent code optimizations */ u32 *dqueue; /* Completion (done) queue */ u32 dqueue_ba; /* Done queue BUS address */ /* * Miscellaneous buffers accessed by the scripts-processor. * They shall be DWORD aligned, because they may be read or * written with a script command. */ u_char msgout[8]; /* Buffer for MESSAGE OUT */ u_char msgin [8]; /* Buffer for MESSAGE IN */ u32 lastmsg; /* Last SCSI message sent */ u_char scratch; /* Scratch for SCSI receive */ /* * Miscellaneous configuration and status parameters. */ u_char usrflags; /* Miscellaneous user flags */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char verbose; /* Verbosity for this controller*/ u32 cache; /* Used for cache test at init. */ /* * CCB lists and queue. */ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ /* * During error handling and/or recovery, * active CCBs that are to be completed with * error or requeued are moved from the busy_ccbq * to the comp_ccbq prior to completion. */ SYM_QUEHEAD comp_ccbq; /* * CAM CCB pending queue. */ SYM_QUEHEAD cam_ccbq; /* * IMMEDIATE ARBITRATION (IARB) control. * * We keep track in 'last_cp' of the last CCB that has been * queued to the SCRIPTS processor and clear 'last_cp' when * this CCB completes. If last_cp is not zero at the moment * we queue a new CCB, we set a flag in 'last_cp' that is * used by the SCRIPTS as a hint for setting IARB. * We donnot set more than 'iarb_max' consecutive hints for * IARB in order to leave devices a chance to reselect. * By the way, any non zero value of 'iarb_max' is unfair. :) */ #ifdef SYM_CONF_IARB_SUPPORT u_short iarb_max; /* Max. # consecutive IARB hints*/ u_short iarb_count; /* Actual # of these hints */ ccb_p last_cp; #endif /* * Command abort handling. * We need to synchronize tightly with the SCRIPTS * processor in order to handle things correctly. */ u_char abrt_msg[4]; /* Message to send buffer */ struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ struct sym_tblsel abrt_sel; /* Sync params for selection */ u_char istat_sem; /* Tells the chip to stop (SEM) */ }; #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) /* * Return the name of the controller. */ static __inline const char *sym_name(hcb_p np) { return device_get_nameunit(np->device); } /*--------------------------------------------------------------------------*/ /*------------------------------ FIRMWARES ---------------------------------*/ /*--------------------------------------------------------------------------*/ /* * This stuff will be moved to a separate source file when * the driver will be broken into several source modules. */ /* * Macros used for all firmwares. */ #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) #ifdef SYM_CONF_GENERIC_SUPPORT /* * Allocate firmware #1 script area. */ #define SYM_FWA_SCR sym_fw1a_scr #define SYM_FWB_SCR sym_fw1b_scr #include static const struct sym_fwa_ofs sym_fw1a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw1b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Allocate firmware #2 script area. */ #define SYM_FWA_SCR sym_fw2a_scr #define SYM_FWB_SCR sym_fw2b_scr #include static const struct sym_fwa_ofs sym_fw2a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw2b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) SYM_GEN_B(struct SYM_FWB_SCR, start64) SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_GEN_A #undef SYM_GEN_B #undef PADDR_A #undef PADDR_B #ifdef SYM_CONF_GENERIC_SUPPORT /* * Patch routine for firmware #1. */ static void sym_fw1_patch(hcb_p np) { struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some data in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Patch routine for firmware #2. */ static void sym_fw2_patch(hcb_p np) { struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some variable in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); /* * Remove the load of SCNTL4 on reselection if not a C10. */ if (!(np->features & FE_C10)) { scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); scripta0->resel_scntl4[1] = cpu_to_scr(0); } /* * Remove a couple of work-arounds specific to C1010 if * they are not desirable. See `sym_fw2.h' for more details. */ if (!(np->device_id == PCI_ID_LSI53C1010_2 && np->revision_id < 0x1 && np->pciclk_khz < 60000)) { scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); scripta0->datao_phase[1] = cpu_to_scr(0); } if (!(np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1)) { scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); scripta0->sel_done[1] = cpu_to_scr(0); } /* * Patch some other variables in SCRIPTS. * These ones are loaded by the SCRIPTS processor. */ scriptb0->pm0_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm0_data)); scriptb0->pm1_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm1_data)); } /* * Fill the data area in scripts. * To be done for all firmwares. */ static void sym_fw_fill_data (u32 *in, u32 *out) { int i; for (i = 0; i < SYM_CONF_MAX_SG; i++) { *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; *in++ = offsetof (struct sym_dsb, data[i]); *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; *out++ = offsetof (struct sym_dsb, data[i]); } } /* * Setup useful script bus addresses. * To be done for all firmwares. */ static void sym_fw_setup_bus_addresses(hcb_p np, const struct sym_fw *fw) { u32 *pa; const u_short *po; int i; /* * Build the bus address table for script A * from the script A offset table. */ po = (const u_short *) fw->a_ofs; pa = (u32 *) &np->fwa_bas; for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) pa[i] = np->scripta_ba + po[i]; /* * Same for script B. */ po = (const u_short *) fw->b_ofs; pa = (u32 *) &np->fwb_bas; for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) pa[i] = np->scriptb_ba + po[i]; } #ifdef SYM_CONF_GENERIC_SUPPORT /* * Setup routine for firmware #1. */ static void sym_fw1_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw1a_scr *scripta0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Setup routine for firmware #2. */ static void sym_fw2_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw2a_scr *scripta0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } /* * Allocate firmware descriptors. */ #ifdef SYM_CONF_GENERIC_SUPPORT static const struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); #endif /* SYM_CONF_GENERIC_SUPPORT */ static const struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); /* * Find the most appropriate firmware for a chip. */ static const struct sym_fw * sym_find_firmware(const struct sym_pci_chip *chip) { if (chip->features & FE_LDSTR) return &sym_fw2; #ifdef SYM_CONF_GENERIC_SUPPORT else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) return &sym_fw1; #endif else return NULL; } /* * Bind a script to physical addresses. */ static void sym_fw_bind_script (hcb_p np, u32 *start, int len) { u32 opcode, new, old, tmp1, tmp2; u32 *end, *cur; int relocs; cur = start; end = start + len/4; while (cur < end) { opcode = *cur; /* * If we forget to change the length * in scripts, a field will be * padded with 0. This is an illegal * command. */ if (opcode == 0) { printf ("%s: ERROR0 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); ++cur; continue; } /* * We use the bogus value 0xf00ff00f ;-) * to reserve data area in SCRIPTS. */ if (opcode == SCR_DATA_ZERO) { *cur++ = 0; continue; } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%d: <%x>\n", (int) (cur-start), (unsigned)opcode); /* * We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xf: /* * LOAD / STORE DSA relative, don't relocate. */ relocs = 0; break; case 0xe: /* * LOAD / STORE absolute. */ relocs = 1; break; case 0xc: /* * COPY has TWO arguments. */ relocs = 2; tmp1 = cur[1]; tmp2 = cur[2]; if ((tmp1 ^ tmp2) & 3) { printf ("%s: ERROR1 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); } /* * If PREFETCH feature not enabled, remove * the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { opcode = (opcode & ~SCR_NO_FLUSH); } break; case 0x0: /* * MOVE/CHMOV (absolute address) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 1; break; case 0x1: /* * MOVE/CHMOV (table indirect) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 0; break; case 0x8: /* * JUMP / CALL * dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ relocs = 2; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } /* * Scriptify:) the opcode. */ *cur++ = cpu_to_scr(opcode); /* * If no relocation, assume 1 argument * and just scriptize:) it. */ if (!relocs) { *cur = cpu_to_scr(*cur); ++cur; continue; } /* * Otherwise performs all needed relocations. */ while (relocs--) { old = *cur; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->mmio_ba; break; case RELOC_LABEL_A: new = (old & ~RELOC_MASK) + np->scripta_ba; break; case RELOC_LABEL_B: new = (old & ~RELOC_MASK) + np->scriptb_ba; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->hcb_ba; break; case 0: /* * Don't relocate a 0 address. * They are mostly used for patched or * script self-modified areas. */ if (old == 0) { new = old; break; } /* fall through */ default: new = 0; panic("sym_fw_bind_script: " "weird relocation %x\n", old); break; } *cur++ = cpu_to_scr(new); } } } /*---------------------------------------------------------------------------*/ /*--------------------------- END OF FIRMWARES -----------------------------*/ /*---------------------------------------------------------------------------*/ /* * Function prototypes. */ static void sym_save_initial_setting (hcb_p np); static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); static void sym_put_start_queue (hcb_p np, ccb_p cp); static void sym_chip_reset (hcb_p np); static void sym_soft_reset (hcb_p np); static void sym_start_reset (hcb_p np); static int sym_reset_scsi_bus (hcb_p np, int enab_int); static int sym_wakeup_done (hcb_p np); static void sym_flush_busy_queue (hcb_p np, int cam_status); static void sym_flush_comp_queue (hcb_p np, int cam_status); static void sym_init (hcb_p np, int reason); static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp); static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak); static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); static void sym_intr (void *arg); static void sym_poll (struct cam_sim *sim); static void sym_recover_scsi_int (hcb_p np, u_char hsts); static void sym_int_sto (hcb_p np); static void sym_int_udc (hcb_p np); static void sym_int_sbmc (hcb_p np); static void sym_int_par (hcb_p np, u_short sist); static void sym_int_ma (hcb_p np); static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task); static void sym_sir_bad_scsi_status (hcb_p np, ccb_p cp); static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); static void sym_sir_task_recovery (hcb_p np, int num); static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs); static int sym_compute_residual (hcb_p np, ccb_p cp); static int sym_show_msg (u_char * msg); static void sym_print_msg (ccb_p cp, char *label, u_char *msg); static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); static void sym_int_sir (hcb_p np); static void sym_free_ccb (hcb_p np, ccb_p cp); static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); static ccb_p sym_alloc_ccb (hcb_p np); static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa); static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); static int sym_snooptest (hcb_p np); static void sym_selectclock(hcb_p np, u_char scntl3); static void sym_getclock (hcb_p np, int mult); static int sym_getpciclock (hcb_p np); static void sym_complete_ok (hcb_p np, ccb_p cp); static void sym_complete_error (hcb_p np, ccb_p cp); static void sym_callout (void *arg); static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); static void sym_reset_dev (hcb_p np, union ccb *ccb); static void sym_action (struct cam_sim *sim, union ccb *ccb); static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static void sym_action2 (struct cam_sim *sim, union ccb *ccb); static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts); static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts); static const struct sym_pci_chip *sym_find_pci_chip (device_t dev); static int sym_pci_probe (device_t dev); static int sym_pci_attach (device_t dev); static void sym_pci_free (hcb_p np); static int sym_cam_attach (hcb_p np); static void sym_cam_free (hcb_p np); static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); /* * Print something which allows to retrieve the controller type, * unit, target, lun concerned by a kernel message. */ static void PRINT_TARGET (hcb_p np, int target) { printf ("%s:%d:", sym_name(np), target); } static void PRINT_LUN(hcb_p np, int target, int lun) { printf ("%s:%d:%d:", sym_name(np), target, lun); } static void PRINT_ADDR (ccb_p cp) { if (cp && cp->cam_ccb) xpt_print_path(cp->cam_ccb->ccb_h.path); } /* * Take into account this ccb in the freeze count. */ static void sym_freeze_cam_ccb(union ccb *ccb) { if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } } /* * Set the status field of a CAM CCB. */ static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } /* * Get the status field of a CAM CCB. */ static __inline int sym_get_cam_status(union ccb *ccb) { return ccb->ccb_h.status & CAM_STATUS_MASK; } /* * Enqueue a CAM CCB. */ static void sym_enqueue_cam_ccb(ccb_p cp) { hcb_p np; union ccb *ccb; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); ccb->ccb_h.status = CAM_REQ_INPROG; callout_reset_sbt(&cp->ch, SBT_1MS * ccb->ccb_h.timeout, 0, sym_callout, (caddr_t)ccb, 0); ccb->ccb_h.status |= CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = np; sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); } /* * Complete a pending CAM CCB. */ static void sym_xpt_done(hcb_p np, union ccb *ccb, ccb_p cp) { SYM_LOCK_ASSERT(MA_OWNED); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { callout_stop(&cp->ch); sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = NULL; } xpt_done(ccb); } static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) { SYM_LOCK_ASSERT(MA_OWNED); sym_set_cam_status(ccb, cam_status); xpt_done(ccb); } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static __inline void sym_init_burst(hcb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Print out the list of targets that have some flag disabled by user. */ static void sym_print_targets_flag(hcb_p np, int mask, char *msg) { int cnt; int i; for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { if (i == np->myaddr) continue; if (np->target[i].usrflags & mask) { if (!cnt++) printf("%s: %s disabled for targets", sym_name(np), msg); printf(" %d", i); } } if (cnt) printf(".\n"); } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (hcb_p np) { np->sv_scntl0 = INB(nc_scntl0) & 0x0a; np->sv_scntl3 = INB(nc_scntl3) & 0x07; np->sv_dmode = INB(nc_dmode) & 0xce; np->sv_dcntl = INB(nc_dcntl) & 0xa8; np->sv_ctest3 = INB(nc_ctest3) & 0x01; np->sv_ctest4 = INB(nc_ctest4) & 0x80; np->sv_gpcntl = INB(nc_gpcntl); np->sv_stest1 = INB(nc_stest1); np->sv_stest2 = INB(nc_stest2) & 0x20; np->sv_stest4 = INB(nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(nc_scntl4); np->sv_ctest5 = INB(nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(nc_ctest5) & 0x24; } /* * Prepare io register values used by sym_init() according * to selected and supported features. */ static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) { u_char burst_max; u32 period; int i; /* * Wide ? */ np->maxwide = (np->features & FE_WIDE)? 1 : 0; /* * Get the frequency of the chip's clock. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; np->clock_khz *= np->multiplier; if (np->clock_khz != 40000) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ - period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; + period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; - else np->minsync = (period + 40 - 1) / 40; + else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = 62; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) #ifdef __LP64__ np->rv_ccntl1 |= (XTIMOD | EXTIBMV); #else np->rv_ccntl1 |= (DDAC); #endif /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010 Errata. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x2) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((np->device_id == PCI_ID_SYM53C810 && np->revision_id >= 0x10 && np->revision_id <= 0x11) || (np->device_id == PCI_ID_SYM53C860 && np->revision_id <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ if (SYM_SETUP_PCI_PARITY) np->rv_ctest4 |= MPEE; /* Master parity checking */ if (SYM_SETUP_SCSI_PARITY) np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; sym_nvram_setup_host (np, nvram); #ifdef __sparc64__ np->myaddr = OF_getscsinitid(np->device); #endif /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the * current BUS mode through the STEST4 IO register. * - For previous generation chips (825/825A/875), * user has to tell us how to check against HVD, * since a 100% safe algorithm is not possible. */ np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && np->device_id == PCI_ID_SYM53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tcb_p tp = &np->target[i]; tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2; tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2; tp->tinfo.user.period = np->minsync; if (np->features & FE_ULTRA3) tp->tinfo.user.period = np->minsync_dt; tp->tinfo.user.offset = np->maxoffs; tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; sym_nvram_setup_target (np, i, nvram); /* * For now, guess PPR/DT support from the period * and BUS width. */ if (np->features & FE_ULTRA3) { if (tp->tinfo.user.period <= 9 && tp->tinfo.user.width == BUS_16_BIT) { tp->tinfo.user.options |= PPR_OPT_DT; tp->tinfo.user.offset = np->maxoffs_dt; tp->tinfo.user.spi_version = 3; } } if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ i = nvram->type; printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), i == SYM_SYMBIOS_NVRAM ? "Symbios" : (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose > 1) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* * Let user be aware of targets that have some disable flags set. */ sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); if (sym_verbose) sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, "SCAN FOR LUNS"); return 0; } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) { tcb_p tp = &np->target[cp->target]; int msglen = 0; /* * Early C1010 chips need a work-around for DT * data transfer to work. */ if (!(np->features & FE_U3EN)) tp->tinfo.goal.options = 0; /* * negotiate using PPR ? */ if (tp->tinfo.goal.options & PPR_OPT_MASK) nego = NS_PPR; /* * negotiate wide transfers ? */ else if (tp->tinfo.current.width != tp->tinfo.goal.width) nego = NS_WIDE; /* * negotiate synchronous transfers? */ else if (tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset) nego = NS_SYNC; switch (nego) { case NS_SYNC: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 3; msgptr[msglen++] = M_X_SYNC_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; break; case NS_WIDE: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 2; msgptr[msglen++] = M_X_WIDE_REQ; msgptr[msglen++] = tp->tinfo.goal.width; break; case NS_PPR: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 6; msgptr[msglen++] = M_X_PPR_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = 0; msgptr[msglen++] = tp->tinfo.goal.offset; msgptr[msglen++] = tp->tinfo.goal.width; msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ static void sym_put_start_queue(hcb_p np, ccb_p cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif /* * Insert first the idle task and then our job. * The MB should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_BARRIER(); OUTB (nc_istat, SIGP|np->istat_sem); } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (hcb_p np) { OUTB (nc_istat, SRST); UDELAY (10); OUTB (nc_istat, 0); UDELAY(2000); /* For BUS MODE to settle */ } /* * Soft reset the chip. * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (hcb_p np) { u_char istat; int i; OUTB (nc_istat, CABRT); for (i = 1000000 ; i ; --i) { istat = INB (nc_istat); if (istat & SIP) { INW (nc_sist); continue; } if (istat & DIP) { OUTB (nc_istat, 0); INB (nc_dstat); break; } } if (!i) printf("%s: unable to abort current chip operation.\n", sym_name(np)); sym_chip_reset (np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(hcb_p np) { (void) sym_reset_scsi_bus(np, 1); } static int sym_reset_scsi_bus(hcb_p np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW (nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB (nc_stest3, TE); OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); OUTB (nc_scntl1, CRST); UDELAY (200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!(np->features & FE_WIDE)) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB (nc_scntl1, 0); /* MDELAY(100); */ return retv; } /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On architectures that may reorder LOAD/STORE operations, * a memory barrier may be needed after the reading of the * so-called `flag' and prior to dealing with the data. */ static int sym_wakeup_done (hcb_p np) { ccb_p cp; int i, n; u32 dsa; SYM_LOCK_ASSERT(MA_OWNED); n = 0; i = np->dqueueget; while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (hcb_p np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ static void sym_init (hcb_p np, int reason) { int i; u32 phys; SYM_LOCK_ASSERT(MA_OWNED); /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(np); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); /* * Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ UDELAY (2000); /* The 895 needs time for the bus mode to settle */ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr); /* Id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB (nc_stest2, np->rv_stest2); else OUTB (nc_stest2, EXT|np->rv_stest2); OUTB (nc_stest3, TE); /* TolerANT enable */ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (np->device_id == PCI_ID_LSI53C1010_2) OUTB (nc_aipcntl1, DISAIP); /* * C10101 Errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1) OUTB (nc_stest1, INB(nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (np->device_id == PCI_ID_SYM53C875) OUTB (nc_ctest0, (1<<5)); else if (np->device_id == PCI_ID_SYM53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB (nc_ccntl0, np->rv_ccntl0); OUTB (nc_ccntl1, np->rv_ccntl1); } /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW (nc_sien, SBMC); if (reason == 0) { MDELAY(100); INW (nc_sist); } np->scsi_mode = INB (nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;itarget[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. */ if (np->ram_ba) { if (sym_verbose > 1) printf ("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); if (np->ram_ws == 8192) { OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz); OUTL (nc_mmws, np->scr_ram_seg); OUTL (nc_mmrs, np->scr_ram_seg); OUTL (nc_sfs, np->scr_ram_seg); phys = SCRIPTB_BA (np, start64); } else phys = SCRIPTA_BA (np, init); OUTRAM_OFF(0, np->scripta0, np->scripta_sz); } else phys = SCRIPTA_BA (np, init); np->istat_sem = 0; OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) xpt_async(AC_BUS_RESET, np->path, NULL); } /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) {fak = 2; ret = -1;} /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * Tell the SCSI layer about the new transfer parameters. */ static void sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid) { struct ccb_trans_settings cts; struct cam_path *path; int sts; tcb_p tp = &np->target[target]; sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target, CAM_LUN_WILDCARD); if (sts != CAM_REQ_CMP) return; bzero(&cts, sizeof(cts)); #define cts__scsi (cts.proto_specific.scsi) #define cts__spi (cts.xport_specific.spi) cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; cts.protocol_version = tp->tinfo.current.scsi_version; cts.transport_version = tp->tinfo.current.spi_version; cts__spi.valid = spi_valid; if (spi_valid & CTS_SPI_VALID_SYNC_RATE) cts__spi.sync_period = tp->tinfo.current.period; if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET) cts__spi.sync_offset = tp->tinfo.current.offset; if (spi_valid & CTS_SPI_VALID_BUS_WIDTH) cts__spi.bus_width = tp->tinfo.current.width; if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS) cts__spi.ppr_options = tp->tinfo.current.options; #undef cts__spi #undef cts__scsi xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, path, &cts); xpt_free_path(path); } #define SYM_SPI_VALID_WDTR \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_SDTR \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_PPR \ CTS_SPI_VALID_PPR_OPTIONS | \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.current.offset = 0; tp->tinfo.current.period = 0; tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; sym_settrans(np, cp, 0, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, dt, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = dt; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR); } /* * Switch trans mode for current job and it's target. */ static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; union ccb *ccb; tcb_p tp; u_char target = INB (nc_sdid) & 0x0f; u_char sval, wval, uval; assert (cp); if (!cp) return; ccb = cp->cam_ccb; assert (ccb); if (!ccb) return; assert (target == (cp->target & 0xf)); tp = &np->target[target]; sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (dt) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB (nc_stest2, EXT); /* * set actual value and sync_status */ OUTB (nc_sxfer, tp->head.sval); OUTB (nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB (nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sxfer: (see the manual) * scntl3: (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) { u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), (unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf ("%s: regdump:", sym_name(np)); for (i=0; i<24;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); /* * PCI BUS error, read the PCI ststus register. */ if (dstat & (MDPE|BF)) { u_short pci_sts; pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); if (pci_sts & 0xf900) { pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); printf("%s: PCI STATUS = 0x%04x\n", sym_name(np), pci_sts & 0xf900); } } } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When a parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ static void sym_intr1 (hcb_p np) { u_char istat, istatc; u_char dstat; u_short sist; SYM_LOCK_ASSERT(MA_OWNED); /* * interrupt on the fly ? * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * before the scanning of the DONE queue. */ istat = INB (nc_istat); if (istat & INTF) { OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat = INB (nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); (void)sym_wakeup_done (np); } if (!(istat & (SIP|DIP))) return; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB (nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW (nc_sist); if (istatc & DIP) dstat |= INB (nc_dstat); istatc = INB (nc_istat); istat |= istatc; } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); /* * On paper, a memory barrier may be needed here. * And since we are paranoid ... :) */ MEMORY_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir (np); else if (dstat & SSI) OUTONB_STD (); else goto unknown_int; return; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { xpt_print_path(np->path); printf("SCSI BUS reset detected.\n"); sym_init (np, 1); return; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc (np); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(np, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); } static void sym_intr(void *arg) { hcb_p np = arg; SYM_LOCK(); if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); sym_intr1((hcb_p) arg); if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); SYM_UNLOCK(); } static void sym_poll(struct cam_sim *sim) { sym_intr1(cam_sim_softc(sim)); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (hcb_p np, u_char hsts) { u32 dsp = INL (nc_dsp); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && dsp < SCRIPTA_BA (np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA (np, ungetjob) && dsp < SCRIPTA_BA (np, reselect) + 1)) && (!(dsp > SCRIPTB_BA (np, sel_for_abort) && dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA (np, done) && dsp < SCRIPTA_BA (np, done_end) + 1))) { OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP (SCRIPTA_BA (np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL (nc_dsa, 0xffffff); OUTL_DSP (SCRIPTA_BA (np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (hcb_p np) { u32 dsp = INL (nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (hcb_p np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc (hcb_p np) { u_char scsi_mode = INB (nc_stest4) & SMODE; /* * Notify user. */ xpt_print_path(np->path); printf("SCSI BUS mode change from %s to %s.\n", sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_init (np, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (hcb_p np, u_short sist) { u_char hsts = INB (HS_PRT); u32 dsp = INL (nc_dsp); u32 dbc = INL (nc_dbc); u32 dsa = INL (nc_dsa); u_char sbcl = INB (nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; ccb_p cp = sym_ccb_from_dsa(np, dsa); printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB (nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA (np, pm_handle)) OUTL_DSP (dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { OUTL (nc_temp, dsp); OUTL_DSP (SCRIPTA_BA (np, dispatch)); } } else OUTL_DSP (SCRIPTA_BA (np, clrack)); return; reset_all: sym_start_reset(np); } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (hcb_p np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; ccb_p cp; dsp = INL (nc_dsp); dbc = INL (nc_dbc); dsa = INL (nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW (nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB (nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB (nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB (nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = 0; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { PRINT_ADDR(cp); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { PRINT_ADDR(cp); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB (HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB (HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA (np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB (nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp); printf ("PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ OUTL (nc_temp, newcmd); OUTL_DSP (nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonnable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA (np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = M_IDENTIFY | cp->lun; nxtdsp = SCRIPTB_BA (np, ident_break_atn); } else nxtdsp = SCRIPTB_BA (np, ident_break); } else if (dsp == SCRIPTB_BA (np, send_wdtr) || dsp == SCRIPTB_BA (np, send_sdtr) || dsp == SCRIPTB_BA (np, send_ppr)) { nxtdsp = SCRIPTB_BA (np, nego_bad_phase); } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA (np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP (nxtdsp); return; } reset_all: sym_start_reset(np); } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with CAM_REQUEUE_REQ status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) { int j; ccb_p cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(hcb_p np, int cam_status) { SYM_QUEHEAD *qp; ccb_p cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; ccb = cp->cam_ccb; if (cam_status) sym_set_cam_status(ccb, cam_status); sym_freeze_cam_ccb(ccb); sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int nego; int i; SYM_LOCK_ASSERT(MA_OWNED); /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = NULL; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { PRINT_ADDR(cp); printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP (SCRIPTA_BA (np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ /* * identify message */ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; nego = 0; if (tp->tinfo.current.options & PPR_OPT_MASK) nego = NS_PPR; else if (tp->tinfo.current.width != BUS_8_BIT) nego = NS_WIDE; else if (tp->tinfo.current.offset != 0) nego = NS_SYNC; if (nego) msglen += sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = cp->lun << 5; if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7) cp->sensecmd[1] = 0; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA (np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.goalp = cpu_to_scr(startp + 16); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ static int sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; ccb_p cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); ccb = cp->cam_ccb; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) sym_set_cam_status(ccb, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(hcb_p np, int num) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(nc_dsa, np->hcb_ba); OUTL_DSP (SCRIPTB_BA (np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB (nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); else sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { lcb_p lp = sym_lp(tp, lun); lp->to_clear = 0; /* We donnot expect to fail here */ np->abrt_msg[0] = M_IDENTIFY | lun; np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = M_IDENTIFY | cp->lun; /* * If we want to abort an untagged command, we * will send an IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * an IDENTIFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, target, lun, -1); (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make uper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) xpt_async(AC_SENT_BDR, np->path, NULL); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { PRINT_TARGET(np, target); sym_printl_hex("control msgout:", np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD (); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lignes for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA (np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA (np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size); } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->phys.head.goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = INL (nc_temp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->phys.head.goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB (HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB (HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: OUTL (nc_temp, dp_scr); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ static int sym_compute_residual(hcb_p np, ccb_p cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->phys.head.goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } /* * Hopefully, the result is not too wrong. */ return resid; } /* * Print out the content of a SCSI message. */ static int sym_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==M_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void sym_print_msg (ccb_p cp, char *label, u_char *msg) { PRINT_ADDR(cp); if (label) printf ("%s: ", label); (void) sym_show_msg (msg); printf (".\n"); } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, div; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgin", np->msgin); } /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setsync (np, cp, ofs, per, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setsync (np, cp, ofs, per, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; reject_it: sym_setsync (np, cp, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, dt, div, wide; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgin", np->msgin); } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[5]; wide = np->msgin[6]; dt = np->msgin[7] & PPR_OPT_DT; /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * check values against our limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (!wide || !(np->features & FE_ULTRA3)) dt &= ~PPR_OPT_DT; if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ dt &= ~PPR_OPT_DT; if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; if (ofs) { if (dt) { if (ofs > np->maxoffs_dt) {chg = 1; ofs = np->maxoffs_dt;} } else if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (dt) { if (per < np->minsync_dt) {chg = 1; per = np->minsync_dt;} } else if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("ppr: " "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", dt, ofs, per, wide, div, fak, chg); } /* * It was an answer. */ if (req == 0) { if (chg) /* Answer wasn't acceptable */ goto reject_it; sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 6; np->msgout[2] = M_X_PPR_REQ; np->msgout[3] = per; np->msgout[4] = 0; np->msgout[5] = ofs; np->msgout[6] = wide; np->msgout[7] = dt; cp->nego_status = NS_PPR; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, ppr_resp)); return; reject_it: sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); /* * If it was a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !dt) { tp->tinfo.goal.options = 0; tp->tinfo.goal.width = wide; tp->tinfo.goal.period = per; tp->tinfo.goal.offset = ofs; } } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, wide; int req = 1; /* * Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgin", np->msgin); } /* * Is it a request from the device? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; wide = np->msgin[3]; /* * check values against driver limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("wdtr: wide=%d chg=%d.\n", wide, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setwide (np, cp, wide); /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tinfo.goal.offset) { np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = tp->tinfo.goal.period; np->msgout[4] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB (HS_PRT, HS_NEGOTIATE); OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; } OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request, set value and * prepare an answer message */ sym_setwide (np, cp, wide); np->msgout[0] = M_EXTENDED; np->msgout[1] = 2; np->msgout[2] = M_X_WIDE_REQ; np->msgout[3] = wide; np->msgin [0] = M_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgout", np->msgout); } OUTL_DSP (SCRIPTB_BA (np, wdtr_resp)); return; reject_it: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * Reset SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * If it was a PPR that made problems, we may want to * try a legacy negotiation later. */ static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) { /* * any error in negotiation: * fall back to default mode. */ switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); #else tp->tinfo.goal.options = 0; if (tp->tinfo.goal.period < np->minsync) tp->tinfo.goal.period = np->minsync; if (tp->tinfo.goal.offset > np->maxoffs) tp->tinfo.goal.offset = np->maxoffs; #endif break; case NS_SYNC: sym_setsync (np, cp, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * a WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) { sym_nego_default(np, tp, cp); OUTB (HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir (hcb_p np) { u_char num = INB (nc_dsps); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); u_char target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int tmp; SYM_LOCK_ASSERT(MA_OWNED); if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We donnot want to handle * that. */ case SIR_SEL_ATN_NO_MSG_OUT: printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", sym_name (np), target); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reseleted the initiator. */ case SIR_RESEL_NO_MSG_IN: printf ("%s:%d: No MSG IN phase after reselection.\n", sym_name (np), target); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: printf ("%s:%d: No IDENTIFY after reselection.\n", sym_name (np), target); goto out_stuck; /* * The device reselected a LUN we donnot know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we donnot * have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; printf ("%s:%d: message %x sent on bad reselection.\n", sym_name (np), target, np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB (HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL (nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"modify DP",np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"ign wide residue", np->msgin); sym_modify_dp(np, cp, -1); return; case M_REJECT: if (INB (HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { PRINT_ADDR(cp); printf ("M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP (SCRIPTB_BA (np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB (HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD (); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); return; out_clrack: OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; ccb_p cp = (ccb_p) NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) goto out; qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); /* * If the LCB is not yet available and the LUN * has been probed ok, try to allocate the LCB. */ if (!lp && sym_is_bit(tp->lun_map, ln)) { lp = sym_alloc_lcb(np, tn, ln); if (!lp) goto out_free; } /* * If the LCB is not available here, then the * logical unit is not yet discovered. For those * ones only accept 1 SCSI IO per logical unit, * since we cannot allow disconnections. */ if (!lp) { if (!sym_is_bit(tp->busy0_map, ln)) sym_set_bit(tp->busy0_map, ln); else goto out_free; } else { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ assert(lp->busy_itl == 0); /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); ++lp->busy_itlq; lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_tag)); } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ assert(lp->busy_itl == 0 && lp->busy_itlq == 0); /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ if (++lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); } else goto out_free; } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* * Remember all informations needed to free this CCB. */ cp->to_abort = 0; cp->tag = tag; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, tn, ln); printf ("ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ static void sym_free_ccb(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; lcb_p lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, cp->target, cp->lun); printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); } /* * Otherwise, we only accept 1 IO per LUN. * Clear the bit that keeps track of this IO. */ else sym_clr_bit(tp->busy0_map, cp->lun); /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = NULL; #endif /* * Unmap user data from DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_unload(np->data_dmat, cp->dmamap); cp->dmamapped = 0; } /* * Make this CCB available. */ cp->cam_ccb = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); } /* * Allocate a CCB from memory and initialize its fixed part. */ static ccb_p sym_alloc_ccb(hcb_p np) { ccb_p cp = NULL; int hcode; SYM_LOCK_ASSERT(MA_NOTOWNED); /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) return NULL; /* * Allocate a bounce buffer for sense data. */ cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); if (!cp->sns_bbuf) goto out_free; /* * Allocate a map for the DMA of user data. */ if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) goto out_free; /* * Count it. */ np->actccbs++; /* * Initialize the callout. */ callout_init(&cp->ch, 1); /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialize the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return cp; out_free: if (cp->sns_bbuf) sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa) { int hcode; ccb_p cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Lun control block allocation and initialization. */ static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); /* * Already done, just return. */ if (lp) return lp; /* * Check against some race. */ assert(!sym_is_bit(tp->busy0_map, ln)); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), "LUNMP"); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); int i; /* * If LCB not available, try to allocate it. */ if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) return; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) return; lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = 0; return; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifndef SYM_CONF_IOMAPPED static int sym_regtest (hcb_p np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int sym_snooptest (hcb_p np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err=0; #ifndef SYM_CONF_IOMAPPED err |= sym_regtest (np); if (err) return (err); #endif restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB (nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTB0_BA (np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->cache = cpu_to_scr(host_wr); OUTL (nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (pc); /* * Wait 'til done (with timeout) */ for (i=0; i=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB (nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL (nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->cache); sym_rd = INL (nc_scratcha); sym_bk = INL (nc_temp); /* * Check termination position. */ if (pc != SCRIPTB0_BA (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, (u_long) SCRIPTB0_BA (np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return (err); } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * Select SCSI clock frequency */ static void sym_selectclock(hcb_p np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 20 micro-seconds. */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) UDELAY (20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else UDELAY (20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (hcb_p np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of 1<= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms, f); return f; } static unsigned sym_getfreq (hcb_p np) { u_int f1, f2; int gen = 11; (void) getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (hcb_p np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; /* * For the C10 core, assume 40 MHz. */ if (np->features & FE_C10) { np->multiplier = mult; np->clock_khz = 40000 * mult; return; } np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB (nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (hcb_p np) { int f = 0; /* * For the C1010-33, this doesn't work. * For the C1010-66, this will be tested when I'll have * such a beast to play with. */ if (!(np->features & FE_C10)) { OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = (int) sym_getfreq (np); OUTB (nc_stest1, 0); } np->pciclk_khz = f; return f; } /*============= DRIVER ACTION/COMPLETION ====================*/ /* * Print something that tells about extended errors. */ static void sym_print_xerr(ccb_p cp, int x_status) { if (x_status & XE_PARITY_ERR) { PRINT_ADDR(cp); printf ("unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { PRINT_ADDR(cp); printf ("extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { PRINT_ADDR(cp); printf ("illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA IN phase.\n"); } } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = CAM_UNCOR_PARITY; else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) cam_status = CAM_DATA_RUN_ERR; else if (x_status & XE_BAD_PHASE) cam_status = CAM_REQ_CMP_ERR; else cam_status = CAM_REQ_CMP_ERR; } return cam_status; } /* * Complete execution of a SCSI command with extented * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_complete_error (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; u_int cam_status; int i, sense_returned; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, cp->host_status, cp->ssss_status, cp->host_flags, cp->target, cp->lun); MDELAY(100); } /* * Get CAM command pointer. */ csio = &cp->cam_ccb->csio; /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cp, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ csio->sense_resid = 0; csio->resid = sym_compute_residual(np, cp); if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ csio->resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } if (cp->host_flags & HF_SENSE) { /* Auto sense */ csio->scsi_status = cp->sv_scsi_status; /* Restore status */ csio->sense_resid = csio->resid; /* Swap residuals */ csio->resid = cp->sv_resid; cp->sv_resid = 0; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cp, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, cp->sv_xerr_status); cam_status |= CAM_AUTOSNS_VALID; /* * Bounce back the sense data to user and * fix the residual. */ bzero(&csio->sense_data, sizeof(csio->sense_data)); sense_returned = SYM_SNS_BBUF_LEN - csio->sense_resid; if (sense_returned < csio->sense_len) csio->sense_resid = csio->sense_len - sense_returned; else csio->sense_resid = 0; bcopy(cp->sns_bbuf, &csio->sense_data, MIN(csio->sense_len, sense_returned)); #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) csio->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, CAM_REQ_ABORTED, cp->target,cp->lun, -1); } #endif } else cam_status = CAM_AUTOSENSE_FAIL; } else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ csio->scsi_status = cp->ssss_status; cam_status = CAM_SCSI_STATUS_ERROR; } else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = CAM_SEL_TIMEOUT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = CAM_UNEXP_BUSFREE; else { /* Extended error */ if (sym_verbose) { PRINT_ADDR(cp); printf ("COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } csio->scsi_status = cp->ssss_status; /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, cp->xerr_status); } /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP (SCRIPTA_BA (np, start)); /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Add this one to the COMP queue. * Complete all those commands with either error * or requeue condition. */ sym_set_cam_status((union ccb *) csio, cam_status); sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); sym_flush_comp_queue(np, 0); } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ static void sym_complete_ok (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; tcb_p tp; lcb_p lp; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; assert (cp->host_status == HS_COMPLETE); /* * Get command, target and lun pointers. */ csio = &cp->cam_ccb->csio; tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * Assume device discovered on first success. */ if (!lp) sym_set_bit(tp->lun_map, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ csio->resid = 0; if (cp->phys.head.lastp != cp->phys.head.goalp) csio->resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature from * sym_conf.h. Residual support is enabled by default. */ if (!SYM_CONF_RESIDUAL_SUPPORT) csio->resid = 0; /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Set status and complete the command. */ csio->scsi_status = cp->ssss_status; sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); sym_xpt_done(np, (union ccb *) csio, cp); sym_free_ccb(np, cp); } /* * Our callout handler */ static void sym_callout(void *arg) { union ccb *ccb = (union ccb *) arg; hcb_p np = ccb->ccb_h.sym_hcb_ptr; /* * Check that the CAM CCB is still queued. */ if (!np) return; SYM_LOCK(); switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: (void) sym_abort_scsiio(np, ccb, 1); break; default: break; } SYM_UNLOCK(); } /* * Abort an SCSI IO. */ static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) { ccb_p cp; SYM_QUEHEAD *qp; SYM_LOCK_ASSERT(MA_OWNED); /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cam_ccb == ccb) { cp = cp2; break; } } if (!cp || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; callout_reset(&cp->ch, 10 * hz, sym_callout, (caddr_t) ccb); /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); return 0; } /* * Reset a SCSI device (all LUNs of a target). */ static void sym_reset_dev(hcb_p np, union ccb *ccb) { tcb_p tp; struct ccb_hdr *ccb_h = &ccb->ccb_h; SYM_LOCK_ASSERT(MA_OWNED); if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } tp = &np->target[ccb_h->target_id]; tp->to_reset = 1; sym_xpt_done2(np, ccb, CAM_REQ_CMP); np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); } /* * SIM action entry point. */ static void sym_action(struct cam_sim *sim, union ccb *ccb) { hcb_p np; tcb_p tp; lcb_p lp; ccb_p cp; int tmp; u_char idmsg, *msgptr; u_int msglen; struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); /* * The common case is SCSI IO. * We deal with other ones elsewhere. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO) { sym_action2(sim, ccb); return; } csio = &ccb->csio; ccb_h = &csio->ccb_h; /* * Work around races. */ if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } /* * Minimal checkings, so that we will not * go outside our tables. */ if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } /* * Retrieve the target and lun descriptors. */ tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); /* * Complete the 1st INQUIRY command with error * condition if the device is flagged NOSCAN * at BOOT in the NVRAM. This may speed up * the boot and maintain coherency with BIOS * device numbering. Clearing the flag allows * user to rescan skipped devices later. * We also return error for devices not flagged * for SCAN LUNS in the NVRAM since some mono-lun * devices behave badly when asked for some non * zero LUN. Btw, this is an absolute hack.:-) */ if (!(ccb_h->flags & CAM_CDB_PHYS) && (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && ccb_h->target_lun != 0)) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } } /* * Get a control block for this IO. */ tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); if (!cp) { sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); return; } /* * Keep track of the IO in our CCB. */ cp->cam_ccb = ccb; /* * Build the IDENTIFY message. */ idmsg = M_IDENTIFY | cp->lun; if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) idmsg |= 0x40; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = csio->tag_action; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) */ cp->nego_status = 0; if (tp->tinfo.current.width != tp->tinfo.goal.width || tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset || tp->tinfo.current.options != tp->tinfo.goal.options) { if (!tp->nego_cp && lp) msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); } /* * Fill in our ccb */ /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * command */ if (sym_setup_cdb(np, csio, cp) < 0) { sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); return; } /* * status */ #if 0 /* Provision */ cp->actualquirks = tp->quirks; #endif cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the data descriptor block * and start the IO. */ sym_setup_data_and_start(np, csio, cp); } /* * Setup buffers and pointers that address the CDB. * I bet, physical CDBs will never be used on the planet, * since they can be bounced without significant overhead. */ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; u32 cmd_ba; int cmd_len; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * CDB is 16 bytes max. */ if (csio->cdb_len > sizeof(cp->cdb_buf)) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; } cmd_len = csio->cdb_len; if (ccb_h->flags & CAM_CDB_POINTER) { /* CDB is a pointer */ if (!(ccb_h->flags & CAM_CDB_PHYS)) { /* CDB pointer is virtual */ bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } else { /* CDB pointer is physical */ #if 0 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; #else sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; #endif } } else { /* CDB is in the CAM ccb (buffer) */ bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } cp->phys.cmd.addr = cpu_to_scr(cmd_ba); cp->phys.cmd.size = cpu_to_scr(cmd_len); return 0; } /* * Set up data pointers used by SCRIPTS. */ static void __inline sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) { u32 lastp, goalp; SYM_LOCK_ASSERT(MA_OWNED); /* * No segments means no data. */ if (!cp->segments) dir = CAM_DIR_NONE; /* * Set the data pointer. */ switch(dir) { case CAM_DIR_OUT: goalp = SCRIPTA_BA (np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_IN: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA (np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_NONE: default: lastp = goalp = SCRIPTB_BA (np, no_data); break; } cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.goalp = cpu_to_scr(goalp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; } /* * Call back routine for the DMA map service. * If bounce buffers are used (why ?), we may sleep and then * be called there in another context. */ static void sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) { ccb_p cp; hcb_p np; union ccb *ccb; cp = (ccb_p) arg; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; SYM_LOCK_ASSERT(MA_OWNED); /* * Deal with weird races. */ if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) goto out_abort; /* * Deal with weird errors. */ if (error) { cp->dmamapped = 0; sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); goto out_abort; } /* * Build the data descriptor for the chip. */ if (nsegs) { int retv; /* 896 rev 1 requires to be careful about boundaries */ if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); else retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); if (retv < 0) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); goto out_abort; } } /* * Synchronize the DMA map only if we have * actually mapped the data. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } /* * Set host status to busy state. * May have been set back to HS_WAIT to avoid a race. */ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; /* * Set data pointers. */ sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); /* * Enqueue this IO in our pending queue. */ sym_enqueue_cam_ccb(cp); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); MDELAY(10000); break; default: break; } #endif /* * Activate this job. */ sym_put_start_queue(np, cp); return; out_abort: sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } /* * How complex it gets to deal with the data in CAM. * The Bus Dma stuff makes things still more complex. */ static void sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; int dir, retv; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * Now deal with the data. */ cp->data_len = csio->dxfer_len; cp->arg = np; /* * No direction means no data. */ dir = (ccb_h->flags & CAM_DIR_MASK); if (dir == CAM_DIR_NONE) { sym_execute_ccb(cp, NULL, 0, 0); return; } cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE; retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap, (union ccb *)csio, sym_execute_ccb, cp, 0); if (retv == EINPROGRESS) { cp->host_status = HS_WAIT; xpt_freeze_simq(np->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } /* * Move the scatter list to our data block. */ static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { struct sym_tblmove *data; bus_dma_segment_t *psegs2; SYM_LOCK_ASSERT(MA_OWNED); if (nsegs > SYM_CONF_MAX_SG) return -1; data = &cp->phys.data[SYM_CONF_MAX_SG-1]; psegs2 = &psegs[nsegs-1]; cp->segments = nsegs; while (1) { data->addr = cpu_to_scr(psegs2->ds_addr); data->size = cpu_to_scr(psegs2->ds_len); if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), (long) psegs2->ds_addr, (long) psegs2->ds_len); } if (psegs2 != psegs) { --data; --psegs2; continue; } break; } return 0; } /* * Scatter a SG list with physical addresses into bus addressable chunks. */ static int sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { u_long ps, pe, pn; u_long k; int s, t; SYM_LOCK_ASSERT(MA_OWNED); s = SYM_CONF_MAX_SG - 1; t = nsegs - 1; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; while (s >= 0) { pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY); if (pn <= ps) pn = ps; k = pe - pn; if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), pn, k); } cp->phys.data[s].addr = cpu_to_scr(pn); cp->phys.data[s].size = cpu_to_scr(k); --s; if (pn == ps) { if (--t < 0) break; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; } else pe = pn; } cp->segments = SYM_CONF_MAX_SG - 1 - s; return t >= 0 ? -1 : 0; } /* * SIM action for non performance critical stuff. */ static void sym_action2(struct cam_sim *sim, union ccb *ccb) { union ccb *abort_ccb; struct ccb_hdr *ccb_h; struct ccb_pathinq *cpi; struct ccb_trans_settings *cts; struct sym_trans *tip; hcb_p np; tcb_p tp; lcb_p lp; u_char dflags; /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; /* * Update SPI transport settings in TARGET control block. * Update SCSI device settings in LUN control block. */ lp = sym_lp(tp, ccb_h->target_lun); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { sym_update_trans(np, &tp->tinfo.goal, cts); if (lp) sym_update_dflags(np, &lp->current_flags, cts); } if (cts->type == CTS_TYPE_USER_SETTINGS) { sym_update_trans(np, &tp->tinfo.user, cts); if (lp) sym_update_dflags(np, &lp->user_flags, cts); } sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tip = &tp->tinfo.current; dflags = lp ? lp->current_flags : 0; } else { tip = &tp->tinfo.user; dflags = lp ? lp->user_flags : tp->usrflags; } cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; cts->protocol_version = tip->scsi_version; cts->transport_version = tip->spi_version; cts__spi->sync_period = tip->period; cts__spi->sync_offset = tip->offset; cts__spi->bus_width = tip->width; cts__spi->ppr_options = tip->options; cts__spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (dflags & SYM_DISC_ENABLED) cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB; cts__spi->valid |= CTS_SPI_VALID_DISC; cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; if (dflags & SYM_TAGS_ENABLED) cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; cts__scsi->valid |= CTS_SCSI_VALID_TQ; #undef cts__spi #undef cts__scsi sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_PATH_INQ: cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED; if (np->usrflags & SYM_SCAN_TARGETS_HILO) cpi->hba_misc |= PIM_SCANHILO; if (np->usrflags & SYM_AVOID_BUS_RESET) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; /* Semantic problem:)LUN number max = max number of LUNs - 1 */ cpi->max_lun = SYM_CONF_MAX_LUN-1; if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) cpi->max_lun = SYM_SETUP_MAX_LUN-1; cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = np->myaddr; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; if (np->features & FE_ULTRA3) { cpi->transport_version = 3; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; } cpi->maxio = SYM_CONF_MAX_SG * PAGE_SIZE; sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_ABORT: abort_ccb = ccb->cab.abort_ccb; switch(abort_ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } default: sym_xpt_done2(np, ccb, CAM_UA_ABORT); break; } break; case XPT_RESET_DEV: sym_reset_dev(np, ccb); break; case XPT_RESET_BUS: sym_reset_scsi_bus(np, 0); if (sym_verbose) { xpt_print_path(np->path); printf("SCSI BUS reset delivered.\n"); } sym_init (np, 1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_ACCEPT_TARGET_IO: case XPT_CONT_TARGET_IO: case XPT_EN_LUN: case XPT_NOTIFY_ACK: case XPT_IMMED_NOTIFY: case XPT_TERM_IO: default: sym_xpt_done2(np, ccb, CAM_REQ_INVALID); break; } } /* * Asynchronous notification handler. */ static void sym_async(void *cb_arg, u32 code, struct cam_path *path, void *args __unused) { hcb_p np; struct cam_sim *sim; u_int tn; tcb_p tp; sim = (struct cam_sim *) cb_arg; np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); switch (code) { case AC_LOST_DEVICE: tn = xpt_path_target_id(path); if (tn >= SYM_CONF_MAX_TARGET) break; tp = &np->target[tn]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = tp->tinfo.goal.period = 0; tp->tinfo.current.offset = tp->tinfo.goal.offset = 0; tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT; tp->tinfo.current.options = tp->tinfo.goal.options = 0; break; default: break; } } /* * Update transfer settings of a target. */ static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); /* * Update the infos. */ #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tip->width = cts__spi->bus_width; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tip->offset = cts__spi->sync_offset; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tip->period = cts__spi->sync_period; if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0) tip->options = (cts__spi->ppr_options & PPR_OPT_DT); if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED && cts->protocol_version != PROTO_VERSION_UNKNOWN) tip->scsi_version = cts->protocol_version; if (cts->transport_version != XPORT_VERSION_UNSPECIFIED && cts->transport_version != XPORT_VERSION_UNKNOWN) tip->spi_version = cts->transport_version; #undef cts__spi /* * Scale against driver configuration limits. */ if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE; if (tip->period && tip->offset) { if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS; if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC; } else { tip->offset = 0; tip->period = 0; } /* * Scale against actual controller BUS width. */ if (tip->width > np->maxwide) tip->width = np->maxwide; /* * Only accept DT if controller supports and SYNC/WIDE asked. */ if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) || !(tip->width == BUS_16_BIT && tip->offset)) { tip->options &= ~PPR_OPT_DT; } /* * Scale period factor and offset against controller limits. */ if (tip->offset && tip->period) { if (tip->options & PPR_OPT_DT) { if (tip->period < np->minsync_dt) tip->period = np->minsync_dt; if (tip->period > np->maxsync_dt) tip->period = np->maxsync_dt; if (tip->offset > np->maxoffs_dt) tip->offset = np->maxoffs_dt; } else { if (tip->period < np->minsync) tip->period = np->minsync; if (tip->period > np->maxsync) tip->period = np->maxsync; if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; } } } /* * Update flags for a device (logical unit). */ static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *flags |= SYM_DISC_ENABLED; else *flags &= ~SYM_DISC_ENABLED; } if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *flags |= SYM_TAGS_ENABLED; else *flags &= ~SYM_TAGS_ENABLED; } #undef cts__spi #undef cts__scsi } /*============= DRIVER INITIALISATION ==================*/ static device_method_t sym_pci_methods[] = { DEVMETHOD(device_probe, sym_pci_probe), DEVMETHOD(device_attach, sym_pci_attach), DEVMETHOD_END }; static driver_t sym_pci_driver = { "sym", sym_pci_methods, 1 /* no softc */ }; static devclass_t sym_devclass; DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, NULL, NULL); MODULE_DEPEND(sym, cam, 1, 1, 1); MODULE_DEPEND(sym, pci, 1, 1, 1); static const struct sym_pci_chip sym_pci_dev_table[] = { {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ static const struct sym_pci_chip * sym_find_pci_chip(device_t dev) { const struct sym_pci_chip *chip; int i; u_short device_id; u_char revision; if (pci_get_vendor(dev) != PCI_VENDOR_NCR) return NULL; device_id = pci_get_device(dev); revision = pci_get_revid(dev); for (i = 0; i < nitems(sym_pci_dev_table); i++) { chip = &sym_pci_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } /* * Tell upper layer if the chip is supported. */ static int sym_pci_probe(device_t dev) { const struct sym_pci_chip *chip; chip = sym_find_pci_chip(dev); if (chip && sym_find_firmware(chip)) { device_set_desc(dev, chip->name); return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? BUS_PROBE_LOW_PRIORITY : BUS_PROBE_DEFAULT; } return ENXIO; } /* * Attach a sym53c8xx device. */ static int sym_pci_attach(device_t dev) { const struct sym_pci_chip *chip; u_short command; u_char cachelnsz; struct sym_hcb *np = NULL; struct sym_nvram nvram; const struct sym_fw *fw = NULL; int i; bus_dma_tag_t bus_dmat; bus_dmat = bus_get_dma_tag(dev); /* * Only probed devices should be attached. * We just enjoy being paranoid. :) */ chip = sym_find_pci_chip(dev); if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) return (ENXIO); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); if (np) np->bus_dmat = bus_dmat; else return (ENXIO); device_set_softc(dev, np); SYM_LOCK_INIT(); /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = bootverbose; np->device = dev; np->device_id = pci_get_device(dev); np->revision_id = pci_get_revid(dev); np->features = chip->features; np->clock_divn = chip->nr_divisor; np->maxoffs = chip->offset_max; np->maxburst = chip->burst_max; np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; #ifdef __amd64__ np->target = sym_calloc_dma(SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); if (!np->target) goto attach_failed; #endif /* * Initialize the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); sym_que_init(&np->cam_ccbq); /* * Allocate a tag for the DMA of user data. */ if (bus_dma_tag_create(np->bus_dmat, 1, SYM_CONF_DMA_BOUNDARY, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, SYM_CONF_MAX_SG, SYM_CONF_DMA_BOUNDARY, 0, busdma_lock_mutex, &np->mtx, &np->data_dmat)) { device_printf(dev, "failed to create DMA tag.\n"); goto attach_failed; } /* * Read and apply some fix-ups to the PCI COMMAND * register. We want the chip to be enabled for: * - BUS mastering * - PCI parity checking (reporting would also be fine) * - Write And Invalidate. */ command = pci_read_config(dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, command, 2); /* * Let the device know about the cache line size, * if it doesn't yet. */ cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (!cachelnsz) { cachelnsz = 8; pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } /* * Alloc/get/map/retrieve everything that deals with MMIO. */ i = SYM_PCI_MMIO; np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (!np->mmio_res) { device_printf(dev, "failed to allocate MMIO resources\n"); goto attach_failed; } np->mmio_ba = rman_get_start(np->mmio_res); /* * Allocate the IRQ. */ i = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (!np->irq_res) { device_printf(dev, "failed to allocate IRQ resource\n"); goto attach_failed; } #ifdef SYM_CONF_IOMAPPED /* * User want us to use normal IO with PCI. * Alloc/get/map/retrieve everything that deals with IO. */ i = SYM_PCI_IO; np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (!np->io_res) { device_printf(dev, "failed to allocate IO resources\n"); goto attach_failed; } #endif /* SYM_CONF_IOMAPPED */ /* * If the chip has RAM. * Alloc/get/map/retrieve the corresponding resources. */ if (np->features & (FE_RAM|FE_RAM8K)) { int regs_id = SYM_PCI_RAM; if (np->features & FE_64BIT) regs_id = SYM_PCI_RAM64; np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, ®s_id, RF_ACTIVE); if (!np->ram_res) { device_printf(dev,"failed to allocate RAM resources\n"); goto attach_failed; } np->ram_id = regs_id; np->ram_ba = rman_get_start(np->ram_res); } /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset (np); /* * Try to read the user set-up. */ (void) sym_read_nvram(np, &nvram); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ (void) sym_prepare_setting(np, &nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000) device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); /* * Allocate the start queue. */ np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); if (!np->scripta0 || !np->scriptb0) goto attach_failed; /* * Allocate the CCBs. We need at least ONE. */ for (i = 0; sym_alloc_ccb(np) != NULL; i++) ; if (i < 1) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptb0_ba = np->scriptb_ba; if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->ram_ws = 8192; np->scriptb_ba = np->scripta_ba + 4096; #ifdef __LP64__ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } else np->ram_ws = 4096; } /* * Copy scripts to controller instance. */ bcopy(fw->a_base, np->scripta0, np->scripta_sz); bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); goto attach_failed; } /* * Now deal with CAM. * Hopefully, we will succeed with that one.:) */ if (!sym_cam_attach(np)) goto attach_failed; /* * Sigh! we are done. */ return 0; /* * We have failed. * We will try to free all the resources we have * allocated, but if we are a boot device, this * will not help that much.;) */ attach_failed: if (np) sym_pci_free(np); return ENXIO; } /* * Free everything that have been allocated for this device. */ static void sym_pci_free(hcb_p np) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; lcb_p lp; int target, lun; /* * First free CAM resources. */ sym_cam_free(np); /* * Now every should be quiet for us to * free other resources. */ if (np->ram_res) bus_release_resource(np->device, SYS_RES_MEMORY, np->ram_id, np->ram_res); if (np->mmio_res) bus_release_resource(np->device, SYS_RES_MEMORY, SYM_PCI_MMIO, np->mmio_res); if (np->io_res) bus_release_resource(np->device, SYS_RES_IOPORT, SYM_PCI_IO, np->io_res); if (np->irq_res) bus_release_resource(np->device, SYS_RES_IRQ, 0, np->irq_res); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); bus_dmamap_destroy(np->data_dmat, cp->dmamap); sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { lp = sym_lp(tp, lun); if (!lp) continue; if (lp->itlq_tbl) sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (lp->cb_tags) sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, "CB_TAGS"); sym_mfree_dma(lp, sizeof(*lp), "LCB"); } #if SYM_CONF_MAX_LUN > 1 if (tp->lunmp) sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), "LUNMP"); #endif } #ifdef __amd64__ if (np->target) sym_mfree_dma(np->target, SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); #endif if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); if (np->data_dmat) bus_dma_tag_destroy(np->data_dmat); if (SYM_LOCK_INITIALIZED() != 0) SYM_LOCK_DESTROY(); device_set_softc(np->device, NULL); sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Allocate CAM resources and register a bus to CAM. */ static int sym_cam_attach(hcb_p np) { struct cam_devq *devq = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; int err; /* * Establish our interrupt handler. */ err = bus_setup_intr(np->device, np->irq_res, INTR_ENTROPY | INTR_MPSAFE | INTR_TYPE_CAM, NULL, sym_intr, np, &np->intr); if (err) { device_printf(np->device, "bus_setup_intr() failed: %d\n", err); goto fail; } /* * Create the device queue for our sym SIM. */ devq = cam_simq_alloc(SYM_CONF_MAX_START); if (!devq) goto fail; /* * Construct our SIM entry. */ sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, device_get_unit(np->device), &np->mtx, 1, SYM_SETUP_MAX_TAG, devq); if (!sim) goto fail; SYM_LOCK(); if (xpt_bus_register(sim, np->device, 0) != CAM_SUCCESS) goto fail; np->sim = sim; sim = NULL; if (xpt_create_path(&path, NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { goto fail; } np->path = path; /* * Establish our async notification handler. */ if (xpt_register_async(AC_LOST_DEVICE, sym_async, sim, path) != CAM_REQ_CMP) goto fail; /* * Start the chip now, without resetting the BUS, since * it seems that this must stay under control of CAM. * With LVD/SE capable chips and BUS in SE mode, we may * get a spurious SMBC interrupt. */ sym_init (np, 0); SYM_UNLOCK(); return 1; fail: if (sim) cam_sim_free(sim, FALSE); if (devq) cam_simq_free(devq); SYM_UNLOCK(); sym_cam_free(np); return 0; } /* * Free everything that deals with CAM. */ static void sym_cam_free(hcb_p np) { SYM_LOCK_ASSERT(MA_NOTOWNED); if (np->intr) { bus_teardown_intr(np->device, np->irq_res, np->intr); np->intr = NULL; } SYM_LOCK(); if (np->sim) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/ TRUE); np->sim = NULL; } if (np->path) { xpt_free_path(np->path); np->path = NULL; } SYM_UNLOCK(); } /*============ OPTIONNAL NVRAM SUPPORT =================*/ /* * Get host setup from NVRAM. */ static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get parity checking, host ID, verbose mode * and miscellaneous host flags from NVRAM. */ switch(nvram->type) { case SYM_SYMBIOS_NVRAM: if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) np->rv_scntl0 &= ~0x0a; np->myaddr = nvram->data.Symbios.host_id & 0x0f; if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) np->verbose += 1; if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) np->usrflags |= SYM_SCAN_TARGETS_HILO; if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) np->usrflags |= SYM_AVOID_BUS_RESET; break; case SYM_TEKRAM_NVRAM: np->myaddr = nvram->data.Tekram.host_id & 0x0f; break; default: break; } #endif } /* * Get target setup from NVRAM. */ #ifdef SYM_CONF_NVRAM_SUPPORT static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); #endif static void sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT switch(nvp->type) { case SYM_SYMBIOS_NVRAM: sym_Symbios_setup_target (np, target, &nvp->data.Symbios); break; case SYM_TEKRAM_NVRAM: sym_Tekram_setup_target (np, target, &nvp->data.Tekram); break; default: break; } #endif } #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get target set-up from Symbios format NVRAM. */ static void sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) { tcb_p tp = &np->target[target]; Symbios_target *tn = &nvram->target[target]; tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; tp->usrtags = (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) tp->usrflags &= ~SYM_DISC_ENABLED; if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) tp->usrflags |= SYM_SCAN_BOOT_DISABLED; if (!(tn->flags & SYMBIOS_SCAN_LUNS)) tp->usrflags |= SYM_SCAN_LUNS_DISABLED; } /* * Get target set-up from Tekram format NVRAM. */ static void sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) { tcb_p tp = &np->target[target]; struct Tekram_target *tn = &nvram->target[target]; int i; if (tn->flags & TEKRAM_SYNC_NEGO) { i = tn->sync_index & 0xf; tp->tinfo.user.period = Tekram_sync[i]; } tp->tinfo.user.width = (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; if (tn->flags & TEKRAM_TAGGED_COMMANDS) { tp->usrtags = 2 << nvram->max_tags_index; } if (tn->flags & TEKRAM_DISCONNECT_ENABLE) tp->usrflags |= SYM_DISC_ENABLED; /* If any device does not support parity, we will not use this option */ if (!(tn->flags & TEKRAM_PARITY_CHECK)) np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ } #ifdef SYM_CONF_DEBUG_NVRAM /* * Dump Symbios format NVRAM for debugging purpose. */ static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) { int i; /* display Symbios nvram host data */ printf("%s: HOST ID=%d%s%s%s%s%s%s\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); /* display Symbios nvram drive data */ for (i = 0 ; i < 15 ; i++) { struct Symbios_target *tn = &nvram->target[i]; printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", sym_name(np), i, (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", tn->bus_width, tn->sync_period / 4, tn->timeout); } } /* * Dump TEKRAM format NVRAM for debugging purpose. */ static const u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) { int i, tags, boot_delay; char *rem; /* display Tekram nvram host data */ tags = 2 << nvram->max_tags_index; boot_delay = 0; if (nvram->boot_delay_index < 6) boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { default: case 0: rem = ""; break; case 1: rem = " REMOVABLE=boot device"; break; case 2: rem = " REMOVABLE=all"; break; } printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", rem, boot_delay, tags); /* display Tekram nvram drive data */ for (i = 0; i <= 15; i++) { int sync, j; struct Tekram_target *tn = &nvram->target[i]; j = tn->sync_index & 0xf; sync = Tekram_sync[j]; printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", sym_name(np), i, (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & TEKRAM_START_CMD) ? " START" : "", (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", sync); } } #endif /* SYM_CONF_DEBUG_NVRAM */ #endif /* SYM_CONF_NVRAM_SUPPORT */ /* * Try reading Symbios or Tekram NVRAM */ #ifdef SYM_CONF_NVRAM_SUPPORT static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); #endif static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Try to read SYMBIOS nvram. * Try to read TEKRAM nvram if Symbios nvram not found. */ if (SYM_SETUP_SYMBIOS_NVRAM && !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) { nvp->type = SYM_SYMBIOS_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Symbios_nvram(np, &nvp->data.Symbios); #endif } else if (SYM_SETUP_TEKRAM_NVRAM && !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) { nvp->type = SYM_TEKRAM_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Tekram_nvram(np, &nvp->data.Tekram); #endif } else nvp->type = 0; #else nvp->type = 0; #endif return nvp->type; } #ifdef SYM_CONF_NVRAM_SUPPORT /* * 24C16 EEPROM reading. * * GPOI0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ #define SET_BIT 0 #define CLR_BIT 1 #define SET_CLK 2 #define CLR_CLK 3 /* * Set/clear data/clock bit in GPIO0 */ static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, int bit_mode) { UDELAY (5); switch (bit_mode){ case SET_BIT: *gpreg |= write_bit; break; case CLR_BIT: *gpreg &= 0xfe; break; case SET_CLK: *gpreg |= 0x02; break; case CLR_CLK: *gpreg &= 0xfd; break; } OUTB (nc_gpreg, *gpreg); UDELAY (5); } /* * Send START condition to NVRAM to wake it up. */ static void S24C16_start(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 1, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); S24C16_set_bit(np, 0, gpreg, CLR_CLK); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! */ static void S24C16_stop(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 1, gpreg, SET_BIT); } /* * Read or write a bit to the NVRAM, * read if GPIO0 input else write if GPIO0 output */ static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, u_char *gpreg) { S24C16_set_bit(np, write_bit, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); if (read_bit) *read_bit = INB (nc_gpreg); S24C16_set_bit(np, 0, gpreg, CLR_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); } /* * Output an ACK to the NVRAM after reading, * change GPIO0 to output and when done back to an input */ static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl & 0xfe); S24C16_do_bit(np, 0, write_bit, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * Input an ACK from NVRAM after writing, * change GPIO0 to input and when done back to an output */ static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl | 0x01); S24C16_do_bit(np, read_bit, 1, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, * GPIO0 must already be set as an output */ static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl) { int x; for (x = 0; x < 8; x++) S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); S24C16_read_ack(np, ack_data, gpreg, gpcntl); } /* * READ a byte from the NVRAM and then send an ACK to say we have got it, * GPIO0 must already be set as an input */ static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl) { int x; u_char read_bit; *read_data = 0; for (x = 0; x < 8; x++) { S24C16_do_bit(np, &read_bit, 1, gpreg); *read_data |= ((read_bit & 0x01) << (7 - x)); } S24C16_write_ack(np, ack_data, gpreg, gpcntl); } /* * Read 'len' bytes starting at 'offset'. */ static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int retv = 1; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB (nc_gpreg, old_gpreg); OUTB (nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* activate NVRAM */ S24C16_start(np, &gpreg); /* write device code and random address MSB */ S24C16_write_byte(np, &ack_data, 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* write random address LSB */ S24C16_write_byte(np, &ack_data, offset & 0xff, &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* regenerate START state to set up for reading */ S24C16_start(np, &gpreg); /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ S24C16_write_byte(np, &ack_data, 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* now set up GPIO0 for inputting data */ gpcntl |= 0x01; OUTB (nc_gpcntl, gpcntl); /* input all requested data - only part of total NVRAM */ for (x = 0; x < len; x++) S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); /* finally put NVRAM back in inactive mode */ gpcntl &= 0xfe; OUTB (nc_gpcntl, gpcntl); S24C16_stop(np, &gpreg); retv = 0; out: /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } #undef SET_BIT /* 0 */ #undef CLR_BIT /* 1 */ #undef SET_CLK /* 2 */ #undef CLR_CLK /* 3 */ /* * Try reading Symbios NVRAM. * Return 0 if OK. */ static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) { static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; /* probe the 24c16 and read the SYMBIOS 24c16 area */ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) return 1; /* check valid NVRAM signature, verify byte count and checksum */ if (nvram->type != 0 || bcmp(nvram->trailer, Symbios_trailer, 6) || nvram->byte_count != len - 12) return 1; /* verify checksum */ for (x = 6, csum = 0; x < len - 6; x++) csum += data[x]; if (csum != nvram->checksum) return 1; return 0; } /* * 93C46 EEPROM reading. * * GPOI0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select * * Used by Tekram. */ /* * Pulse clock bit in GPIO0 */ static void T93C46_Clk(hcb_p np, u_char *gpreg) { OUTB (nc_gpreg, *gpreg | 0x04); UDELAY (2); OUTB (nc_gpreg, *gpreg); } /* * Read bit from NVRAM */ static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) { UDELAY (2); T93C46_Clk(np, gpreg); *read_bit = INB (nc_gpreg); } /* * Write bit to GPIO0 */ static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) { if (write_bit & 0x01) *gpreg |= 0x02; else *gpreg &= 0xfd; *gpreg |= 0x10; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! */ static void T93C46_Stop(hcb_p np, u_char *gpreg) { *gpreg &= 0xef; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send read command and address to NVRAM */ static void T93C46_Send_Command(hcb_p np, u_short write_data, u_char *read_bit, u_char *gpreg) { int x; /* send 9 bits, start bit (1), command (2), address (6) */ for (x = 0; x < 9; x++) T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); *read_bit = INB (nc_gpreg); } /* * READ 2 bytes from the NVRAM */ static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) { int x; u_char read_bit; *nvram_data = 0; for (x = 0; x < 16; x++) { T93C46_Read_Bit(np, &read_bit, gpreg); if (read_bit & 0x01) *nvram_data |= (0x01 << (15 - x)); else *nvram_data &= ~(0x01 << (15 - x)); } } /* * Read Tekram NvRAM data. */ static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) { u_char read_bit; int x; for (x = 0; x < len; x++) { /* output read command and address */ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); if (read_bit & 0x01) return 1; /* Bad */ T93C46_Read_Word(np, &data[x], gpreg); T93C46_Stop(np, gpreg); } return 0; } /* * Try reading 93C46 Tekram NVRAM. */ static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; int retv = 1; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 1/2/4 out */ gpreg = old_gpreg & 0xe9; OUTB (nc_gpreg, gpreg); gpcntl = (old_gpcntl & 0xe9) | 0x09; OUTB (nc_gpcntl, gpcntl); /* input all of NVRAM, 64 words */ retv = T93C46_Read_Data(np, (u_short *) nvram, sizeof(*nvram) / sizeof(short), &gpreg); /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } /* * Try reading Tekram NVRAM. * Return 0 if OK. */ static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) { u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; switch (np->device_id) { case PCI_ID_SYM53C885: case PCI_ID_SYM53C895: case PCI_ID_SYM53C896: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); break; case PCI_ID_SYM53C875: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); if (!x) break; default: x = sym_read_T93C46_nvram(np, nvram); break; } if (x) return 1; /* verify checksum */ for (x = 0, csum = 0; x < len - 1; x += 2) csum += data[x] + (data[x+1] << 8); if (csum != 0x1234) return 1; return 0; } #endif /* SYM_CONF_NVRAM_SUPPORT */ diff --git a/sys/dev/usb/controller/ehci.c b/sys/dev/usb/controller/ehci.c index eb4ccb9b5ca2..7f9547ca6abe 100644 --- a/sys/dev/usb/controller/ehci.c +++ b/sys/dev/usb/controller/ehci.c @@ -1,3974 +1,3973 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. * Copyright (c) 2004 The NetBSD Foundation, Inc. All rights reserved. * Copyright (c) 2004 Lennart Augustsson. All rights reserved. * Copyright (c) 2004 Charles M. Hannum. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB Enhanced Host Controller Driver, a.k.a. USB 2.0 controller. * * The EHCI 0.96 spec can be found at * http://developer.intel.com/technology/usb/download/ehci-r096.pdf * The EHCI 1.0 spec can be found at * http://developer.intel.com/technology/usb/download/ehci-r10.pdf * and the USB 2.0 spec at * http://www.usb.org/developers/docs/usb_20.zip * */ /* * TODO: * 1) command failures are not recovered correctly */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR ehcidebug #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ #include #include #define EHCI_BUS2SC(bus) \ ((ehci_softc_t *)(((uint8_t *)(bus)) - \ ((uint8_t *)&(((ehci_softc_t *)0)->sc_bus)))) #ifdef USB_DEBUG static int ehcidebug = 0; static int ehcinohighspeed = 0; static int ehciiaadbug = 0; static int ehcilostintrbug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ehci, CTLFLAG_RW, 0, "USB ehci"); SYSCTL_INT(_hw_usb_ehci, OID_AUTO, debug, CTLFLAG_RWTUN, &ehcidebug, 0, "Debug level"); SYSCTL_INT(_hw_usb_ehci, OID_AUTO, no_hs, CTLFLAG_RWTUN, &ehcinohighspeed, 0, "Disable High Speed USB"); SYSCTL_INT(_hw_usb_ehci, OID_AUTO, iaadbug, CTLFLAG_RWTUN, &ehciiaadbug, 0, "Enable doorbell bug workaround"); SYSCTL_INT(_hw_usb_ehci, OID_AUTO, lostintrbug, CTLFLAG_RWTUN, &ehcilostintrbug, 0, "Enable lost interrupt bug workaround"); static void ehci_dump_regs(ehci_softc_t *sc); static void ehci_dump_sqh(ehci_softc_t *sc, ehci_qh_t *sqh); #endif #define EHCI_INTR_ENDPT 1 static const struct usb_bus_methods ehci_bus_methods; static const struct usb_pipe_methods ehci_device_bulk_methods; static const struct usb_pipe_methods ehci_device_ctrl_methods; static const struct usb_pipe_methods ehci_device_intr_methods; static const struct usb_pipe_methods ehci_device_isoc_fs_methods; static const struct usb_pipe_methods ehci_device_isoc_hs_methods; static void ehci_do_poll(struct usb_bus *); static void ehci_device_done(struct usb_xfer *, usb_error_t); static uint8_t ehci_check_transfer(struct usb_xfer *); static void ehci_timeout(void *); static void ehci_poll_timeout(void *); static void ehci_root_intr(ehci_softc_t *sc); struct ehci_std_temp { ehci_softc_t *sc; struct usb_page_cache *pc; ehci_qtd_t *td; ehci_qtd_t *td_next; uint32_t average; uint32_t qtd_status; uint32_t len; uint16_t max_frame_size; uint8_t shortpkt; uint8_t auto_data_toggle; uint8_t setup_alt_next; uint8_t last_frame; }; void ehci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb) { ehci_softc_t *sc = EHCI_BUS2SC(bus); uint32_t i; cb(bus, &sc->sc_hw.pframes_pc, &sc->sc_hw.pframes_pg, sizeof(uint32_t) * EHCI_FRAMELIST_COUNT, EHCI_FRAMELIST_ALIGN); cb(bus, &sc->sc_hw.terminate_pc, &sc->sc_hw.terminate_pg, sizeof(struct ehci_qh_sub), EHCI_QH_ALIGN); cb(bus, &sc->sc_hw.async_start_pc, &sc->sc_hw.async_start_pg, sizeof(ehci_qh_t), EHCI_QH_ALIGN); for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) { cb(bus, sc->sc_hw.intr_start_pc + i, sc->sc_hw.intr_start_pg + i, sizeof(ehci_qh_t), EHCI_QH_ALIGN); } for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) { cb(bus, sc->sc_hw.isoc_hs_start_pc + i, sc->sc_hw.isoc_hs_start_pg + i, sizeof(ehci_itd_t), EHCI_ITD_ALIGN); } for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) { cb(bus, sc->sc_hw.isoc_fs_start_pc + i, sc->sc_hw.isoc_fs_start_pg + i, sizeof(ehci_sitd_t), EHCI_SITD_ALIGN); } } usb_error_t ehci_reset(ehci_softc_t *sc) { uint32_t hcr; int i; EOWRITE4(sc, EHCI_USBCMD, EHCI_CMD_HCRESET); for (i = 0; i < 100; i++) { usb_pause_mtx(NULL, hz / 128); hcr = EOREAD4(sc, EHCI_USBCMD) & EHCI_CMD_HCRESET; if (!hcr) { if (sc->sc_vendor_post_reset != NULL) sc->sc_vendor_post_reset(sc); return (0); } } device_printf(sc->sc_bus.bdev, "reset timeout\n"); return (USB_ERR_IOERROR); } static usb_error_t ehci_hcreset(ehci_softc_t *sc) { uint32_t hcr; int i; EOWRITE4(sc, EHCI_USBCMD, 0); /* Halt controller */ for (i = 0; i < 100; i++) { usb_pause_mtx(NULL, hz / 128); hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH; if (hcr) break; } if (!hcr) /* * Fall through and try reset anyway even though * Table 2-9 in the EHCI spec says this will result * in undefined behavior. */ device_printf(sc->sc_bus.bdev, "stop timeout\n"); return (ehci_reset(sc)); } static int ehci_init_sub(struct ehci_softc *sc) { struct usb_page_search buf_res; uint32_t cparams; uint32_t hcr; uint8_t i; cparams = EREAD4(sc, EHCI_HCCPARAMS); DPRINTF("cparams=0x%x\n", cparams); if (EHCI_HCC_64BIT(cparams)) { DPRINTF("HCC uses 64-bit structures\n"); /* MUST clear segment register if 64 bit capable */ EOWRITE4(sc, EHCI_CTRLDSSEGMENT, 0); } usbd_get_page(&sc->sc_hw.pframes_pc, 0, &buf_res); EOWRITE4(sc, EHCI_PERIODICLISTBASE, buf_res.physaddr); usbd_get_page(&sc->sc_hw.async_start_pc, 0, &buf_res); EOWRITE4(sc, EHCI_ASYNCLISTADDR, buf_res.physaddr | EHCI_LINK_QH); /* enable interrupts */ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs); /* turn on controller */ EOWRITE4(sc, EHCI_USBCMD, EHCI_CMD_ITC_1 | /* 1 microframes interrupt delay */ (EOREAD4(sc, EHCI_USBCMD) & EHCI_CMD_FLS_M) | EHCI_CMD_ASE | EHCI_CMD_PSE | EHCI_CMD_RS); /* Take over port ownership */ EOWRITE4(sc, EHCI_CONFIGFLAG, EHCI_CONF_CF); for (i = 0; i < 100; i++) { usb_pause_mtx(NULL, hz / 128); hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH; if (!hcr) { break; } } if (hcr) { device_printf(sc->sc_bus.bdev, "run timeout\n"); return (USB_ERR_IOERROR); } return (USB_ERR_NORMAL_COMPLETION); } usb_error_t ehci_init(ehci_softc_t *sc) { struct usb_page_search buf_res; uint32_t version; uint32_t sparams; uint16_t i; uint16_t x; uint16_t y; uint16_t bit; usb_error_t err = 0; DPRINTF("start\n"); usb_callout_init_mtx(&sc->sc_tmo_pcd, &sc->sc_bus.bus_mtx, 0); usb_callout_init_mtx(&sc->sc_tmo_poll, &sc->sc_bus.bus_mtx, 0); sc->sc_offs = EHCI_CAPLENGTH(EREAD4(sc, EHCI_CAPLEN_HCIVERSION)); #ifdef USB_DEBUG if (ehciiaadbug) sc->sc_flags |= EHCI_SCFLG_IAADBUG; if (ehcilostintrbug) sc->sc_flags |= EHCI_SCFLG_LOSTINTRBUG; if (ehcidebug > 2) { ehci_dump_regs(sc); } #endif version = EHCI_HCIVERSION(EREAD4(sc, EHCI_CAPLEN_HCIVERSION)); device_printf(sc->sc_bus.bdev, "EHCI version %x.%x\n", version >> 8, version & 0xff); sparams = EREAD4(sc, EHCI_HCSPARAMS); DPRINTF("sparams=0x%x\n", sparams); sc->sc_noport = EHCI_HCS_N_PORTS(sparams); sc->sc_bus.usbrev = USB_REV_2_0; if (!(sc->sc_flags & EHCI_SCFLG_DONTRESET)) { /* Reset the controller */ DPRINTF("%s: resetting\n", device_get_nameunit(sc->sc_bus.bdev)); err = ehci_hcreset(sc); if (err) { device_printf(sc->sc_bus.bdev, "reset timeout\n"); return (err); } } /* * use current frame-list-size selection 0: 1024*4 bytes 1: 512*4 * bytes 2: 256*4 bytes 3: unknown */ if (EHCI_CMD_FLS(EOREAD4(sc, EHCI_USBCMD)) == 3) { device_printf(sc->sc_bus.bdev, "invalid frame-list-size\n"); return (USB_ERR_IOERROR); } /* set up the bus struct */ sc->sc_bus.methods = &ehci_bus_methods; sc->sc_eintrs = EHCI_NORMAL_INTRS; if (1) { struct ehci_qh_sub *qh; usbd_get_page(&sc->sc_hw.terminate_pc, 0, &buf_res); qh = buf_res.buffer; sc->sc_terminate_self = htohc32(sc, buf_res.physaddr); /* init terminate TD */ qh->qtd_next = htohc32(sc, EHCI_LINK_TERMINATE); qh->qtd_altnext = htohc32(sc, EHCI_LINK_TERMINATE); qh->qtd_status = htohc32(sc, EHCI_QTD_HALTED); } for (i = 0; i < EHCI_VIRTUAL_FRAMELIST_COUNT; i++) { ehci_qh_t *qh; usbd_get_page(sc->sc_hw.intr_start_pc + i, 0, &buf_res); qh = buf_res.buffer; /* initialize page cache pointer */ qh->page_cache = sc->sc_hw.intr_start_pc + i; /* store a pointer to queue head */ sc->sc_intr_p_last[i] = qh; qh->qh_self = htohc32(sc, buf_res.physaddr) | htohc32(sc, EHCI_LINK_QH); qh->qh_endp = htohc32(sc, EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH)); qh->qh_endphub = htohc32(sc, EHCI_QH_SET_MULT(1)); qh->qh_curqtd = 0; qh->qh_qtd.qtd_next = htohc32(sc, EHCI_LINK_TERMINATE); qh->qh_qtd.qtd_altnext = htohc32(sc, EHCI_LINK_TERMINATE); qh->qh_qtd.qtd_status = htohc32(sc, EHCI_QTD_HALTED); } /* * the QHs are arranged to give poll intervals that are * powers of 2 times 1ms */ bit = EHCI_VIRTUAL_FRAMELIST_COUNT / 2; while (bit) { x = bit; while (x & bit) { ehci_qh_t *qh_x; ehci_qh_t *qh_y; y = (x ^ bit) | (bit / 2); qh_x = sc->sc_intr_p_last[x]; qh_y = sc->sc_intr_p_last[y]; /* * the next QH has half the poll interval */ qh_x->qh_link = qh_y->qh_self; x++; } bit >>= 1; } if (1) { ehci_qh_t *qh; qh = sc->sc_intr_p_last[0]; /* the last (1ms) QH terminates */ qh->qh_link = htohc32(sc, EHCI_LINK_TERMINATE); } for (i = 0; i < EHCI_VIRTUAL_FRAMELIST_COUNT; i++) { ehci_sitd_t *sitd; ehci_itd_t *itd; usbd_get_page(sc->sc_hw.isoc_fs_start_pc + i, 0, &buf_res); sitd = buf_res.buffer; /* initialize page cache pointer */ sitd->page_cache = sc->sc_hw.isoc_fs_start_pc + i; /* store a pointer to the transfer descriptor */ sc->sc_isoc_fs_p_last[i] = sitd; /* initialize full speed isochronous */ sitd->sitd_self = htohc32(sc, buf_res.physaddr) | htohc32(sc, EHCI_LINK_SITD); sitd->sitd_back = htohc32(sc, EHCI_LINK_TERMINATE); sitd->sitd_next = sc->sc_intr_p_last[i | (EHCI_VIRTUAL_FRAMELIST_COUNT / 2)]->qh_self; usbd_get_page(sc->sc_hw.isoc_hs_start_pc + i, 0, &buf_res); itd = buf_res.buffer; /* initialize page cache pointer */ itd->page_cache = sc->sc_hw.isoc_hs_start_pc + i; /* store a pointer to the transfer descriptor */ sc->sc_isoc_hs_p_last[i] = itd; /* initialize high speed isochronous */ itd->itd_self = htohc32(sc, buf_res.physaddr) | htohc32(sc, EHCI_LINK_ITD); itd->itd_next = sitd->sitd_self; } usbd_get_page(&sc->sc_hw.pframes_pc, 0, &buf_res); if (1) { uint32_t *pframes; pframes = buf_res.buffer; /* * execution order: * pframes -> high speed isochronous -> * full speed isochronous -> interrupt QH's */ for (i = 0; i < EHCI_FRAMELIST_COUNT; i++) { pframes[i] = sc->sc_isoc_hs_p_last [i & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1)]->itd_self; } } usbd_get_page(&sc->sc_hw.async_start_pc, 0, &buf_res); if (1) { ehci_qh_t *qh; qh = buf_res.buffer; /* initialize page cache pointer */ qh->page_cache = &sc->sc_hw.async_start_pc; /* store a pointer to the queue head */ sc->sc_async_p_last = qh; /* init dummy QH that starts the async list */ qh->qh_self = htohc32(sc, buf_res.physaddr) | htohc32(sc, EHCI_LINK_QH); /* fill the QH */ qh->qh_endp = htohc32(sc, EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH) | EHCI_QH_HRECL); qh->qh_endphub = htohc32(sc, EHCI_QH_SET_MULT(1)); qh->qh_link = qh->qh_self; qh->qh_curqtd = 0; /* fill the overlay qTD */ qh->qh_qtd.qtd_next = htohc32(sc, EHCI_LINK_TERMINATE); qh->qh_qtd.qtd_altnext = htohc32(sc, EHCI_LINK_TERMINATE); qh->qh_qtd.qtd_status = htohc32(sc, EHCI_QTD_HALTED); } /* flush all cache into memory */ usb_bus_mem_flush_all(&sc->sc_bus, &ehci_iterate_hw_softc); #ifdef USB_DEBUG if (ehcidebug) { ehci_dump_sqh(sc, sc->sc_async_p_last); } #endif /* finial setup */ err = ehci_init_sub(sc); if (!err) { /* catch any lost interrupts */ ehci_do_poll(&sc->sc_bus); } return (err); } /* * shut down the controller when the system is going down */ void ehci_detach(ehci_softc_t *sc) { USB_BUS_LOCK(&sc->sc_bus); usb_callout_stop(&sc->sc_tmo_pcd); usb_callout_stop(&sc->sc_tmo_poll); EOWRITE4(sc, EHCI_USBINTR, 0); USB_BUS_UNLOCK(&sc->sc_bus); if (ehci_hcreset(sc)) { DPRINTF("reset failed!\n"); } /* XXX let stray task complete */ usb_pause_mtx(NULL, hz / 20); usb_callout_drain(&sc->sc_tmo_pcd); usb_callout_drain(&sc->sc_tmo_poll); } static void ehci_suspend(ehci_softc_t *sc) { DPRINTF("stopping the HC\n"); /* reset HC */ ehci_hcreset(sc); } static void ehci_resume(ehci_softc_t *sc) { /* reset HC */ ehci_hcreset(sc); /* setup HC */ ehci_init_sub(sc); /* catch any lost interrupts */ ehci_do_poll(&sc->sc_bus); } #ifdef USB_DEBUG static void ehci_dump_regs(ehci_softc_t *sc) { uint32_t i; i = EOREAD4(sc, EHCI_USBCMD); printf("cmd=0x%08x\n", i); if (i & EHCI_CMD_ITC_1) printf(" EHCI_CMD_ITC_1\n"); if (i & EHCI_CMD_ITC_2) printf(" EHCI_CMD_ITC_2\n"); if (i & EHCI_CMD_ITC_4) printf(" EHCI_CMD_ITC_4\n"); if (i & EHCI_CMD_ITC_8) printf(" EHCI_CMD_ITC_8\n"); if (i & EHCI_CMD_ITC_16) printf(" EHCI_CMD_ITC_16\n"); if (i & EHCI_CMD_ITC_32) printf(" EHCI_CMD_ITC_32\n"); if (i & EHCI_CMD_ITC_64) printf(" EHCI_CMD_ITC_64\n"); if (i & EHCI_CMD_ASPME) printf(" EHCI_CMD_ASPME\n"); if (i & EHCI_CMD_ASPMC) printf(" EHCI_CMD_ASPMC\n"); if (i & EHCI_CMD_LHCR) printf(" EHCI_CMD_LHCR\n"); if (i & EHCI_CMD_IAAD) printf(" EHCI_CMD_IAAD\n"); if (i & EHCI_CMD_ASE) printf(" EHCI_CMD_ASE\n"); if (i & EHCI_CMD_PSE) printf(" EHCI_CMD_PSE\n"); if (i & EHCI_CMD_FLS_M) printf(" EHCI_CMD_FLS_M\n"); if (i & EHCI_CMD_HCRESET) printf(" EHCI_CMD_HCRESET\n"); if (i & EHCI_CMD_RS) printf(" EHCI_CMD_RS\n"); i = EOREAD4(sc, EHCI_USBSTS); printf("sts=0x%08x\n", i); if (i & EHCI_STS_ASS) printf(" EHCI_STS_ASS\n"); if (i & EHCI_STS_PSS) printf(" EHCI_STS_PSS\n"); if (i & EHCI_STS_REC) printf(" EHCI_STS_REC\n"); if (i & EHCI_STS_HCH) printf(" EHCI_STS_HCH\n"); if (i & EHCI_STS_IAA) printf(" EHCI_STS_IAA\n"); if (i & EHCI_STS_HSE) printf(" EHCI_STS_HSE\n"); if (i & EHCI_STS_FLR) printf(" EHCI_STS_FLR\n"); if (i & EHCI_STS_PCD) printf(" EHCI_STS_PCD\n"); if (i & EHCI_STS_ERRINT) printf(" EHCI_STS_ERRINT\n"); if (i & EHCI_STS_INT) printf(" EHCI_STS_INT\n"); printf("ien=0x%08x\n", EOREAD4(sc, EHCI_USBINTR)); printf("frindex=0x%08x ctrdsegm=0x%08x periodic=0x%08x async=0x%08x\n", EOREAD4(sc, EHCI_FRINDEX), EOREAD4(sc, EHCI_CTRLDSSEGMENT), EOREAD4(sc, EHCI_PERIODICLISTBASE), EOREAD4(sc, EHCI_ASYNCLISTADDR)); for (i = 1; i <= sc->sc_noport; i++) { printf("port %d status=0x%08x\n", i, EOREAD4(sc, EHCI_PORTSC(i))); } } static void ehci_dump_link(ehci_softc_t *sc, uint32_t link, int type) { link = hc32toh(sc, link); printf("0x%08x", link); if (link & EHCI_LINK_TERMINATE) printf(""); else { printf("<"); if (type) { switch (EHCI_LINK_TYPE(link)) { case EHCI_LINK_ITD: printf("ITD"); break; case EHCI_LINK_QH: printf("QH"); break; case EHCI_LINK_SITD: printf("SITD"); break; case EHCI_LINK_FSTN: printf("FSTN"); break; } } printf(">"); } } static void ehci_dump_qtd(ehci_softc_t *sc, ehci_qtd_t *qtd) { uint32_t s; printf(" next="); ehci_dump_link(sc, qtd->qtd_next, 0); printf(" altnext="); ehci_dump_link(sc, qtd->qtd_altnext, 0); printf("\n"); s = hc32toh(sc, qtd->qtd_status); printf(" status=0x%08x: toggle=%d bytes=0x%x ioc=%d c_page=0x%x\n", s, EHCI_QTD_GET_TOGGLE(s), EHCI_QTD_GET_BYTES(s), EHCI_QTD_GET_IOC(s), EHCI_QTD_GET_C_PAGE(s)); printf(" cerr=%d pid=%d stat=%s%s%s%s%s%s%s%s\n", EHCI_QTD_GET_CERR(s), EHCI_QTD_GET_PID(s), (s & EHCI_QTD_ACTIVE) ? "ACTIVE" : "NOT_ACTIVE", (s & EHCI_QTD_HALTED) ? "-HALTED" : "", (s & EHCI_QTD_BUFERR) ? "-BUFERR" : "", (s & EHCI_QTD_BABBLE) ? "-BABBLE" : "", (s & EHCI_QTD_XACTERR) ? "-XACTERR" : "", (s & EHCI_QTD_MISSEDMICRO) ? "-MISSED" : "", (s & EHCI_QTD_SPLITXSTATE) ? "-SPLIT" : "", (s & EHCI_QTD_PINGSTATE) ? "-PING" : ""); for (s = 0; s < 5; s++) { printf(" buffer[%d]=0x%08x\n", s, hc32toh(sc, qtd->qtd_buffer[s])); } for (s = 0; s < 5; s++) { printf(" buffer_hi[%d]=0x%08x\n", s, hc32toh(sc, qtd->qtd_buffer_hi[s])); } } static uint8_t ehci_dump_sqtd(ehci_softc_t *sc, ehci_qtd_t *sqtd) { uint8_t temp; usb_pc_cpu_invalidate(sqtd->page_cache); printf("QTD(%p) at 0x%08x:\n", sqtd, hc32toh(sc, sqtd->qtd_self)); ehci_dump_qtd(sc, sqtd); temp = (sqtd->qtd_next & htohc32(sc, EHCI_LINK_TERMINATE)) ? 1 : 0; return (temp); } static void ehci_dump_sqtds(ehci_softc_t *sc, ehci_qtd_t *sqtd) { uint16_t i; uint8_t stop; stop = 0; for (i = 0; sqtd && (i < 20) && !stop; sqtd = sqtd->obj_next, i++) { stop = ehci_dump_sqtd(sc, sqtd); } if (sqtd) { printf("dump aborted, too many TDs\n"); } } static void ehci_dump_sqh(ehci_softc_t *sc, ehci_qh_t *qh) { uint32_t endp; uint32_t endphub; usb_pc_cpu_invalidate(qh->page_cache); printf("QH(%p) at 0x%08x:\n", qh, hc32toh(sc, qh->qh_self) & ~0x1F); printf(" link="); ehci_dump_link(sc, qh->qh_link, 1); printf("\n"); endp = hc32toh(sc, qh->qh_endp); printf(" endp=0x%08x\n", endp); printf(" addr=0x%02x inact=%d endpt=%d eps=%d dtc=%d hrecl=%d\n", EHCI_QH_GET_ADDR(endp), EHCI_QH_GET_INACT(endp), EHCI_QH_GET_ENDPT(endp), EHCI_QH_GET_EPS(endp), EHCI_QH_GET_DTC(endp), EHCI_QH_GET_HRECL(endp)); printf(" mpl=0x%x ctl=%d nrl=%d\n", EHCI_QH_GET_MPL(endp), EHCI_QH_GET_CTL(endp), EHCI_QH_GET_NRL(endp)); endphub = hc32toh(sc, qh->qh_endphub); printf(" endphub=0x%08x\n", endphub); printf(" smask=0x%02x cmask=0x%02x huba=0x%02x port=%d mult=%d\n", EHCI_QH_GET_SMASK(endphub), EHCI_QH_GET_CMASK(endphub), EHCI_QH_GET_HUBA(endphub), EHCI_QH_GET_PORT(endphub), EHCI_QH_GET_MULT(endphub)); printf(" curqtd="); ehci_dump_link(sc, qh->qh_curqtd, 0); printf("\n"); printf("Overlay qTD:\n"); ehci_dump_qtd(sc, (void *)&qh->qh_qtd); } static void ehci_dump_sitd(ehci_softc_t *sc, ehci_sitd_t *sitd) { usb_pc_cpu_invalidate(sitd->page_cache); printf("SITD(%p) at 0x%08x\n", sitd, hc32toh(sc, sitd->sitd_self) & ~0x1F); printf(" next=0x%08x\n", hc32toh(sc, sitd->sitd_next)); printf(" portaddr=0x%08x dir=%s addr=%d endpt=0x%x port=0x%x huba=0x%x\n", hc32toh(sc, sitd->sitd_portaddr), (sitd->sitd_portaddr & htohc32(sc, EHCI_SITD_SET_DIR_IN)) ? "in" : "out", EHCI_SITD_GET_ADDR(hc32toh(sc, sitd->sitd_portaddr)), EHCI_SITD_GET_ENDPT(hc32toh(sc, sitd->sitd_portaddr)), EHCI_SITD_GET_PORT(hc32toh(sc, sitd->sitd_portaddr)), EHCI_SITD_GET_HUBA(hc32toh(sc, sitd->sitd_portaddr))); printf(" mask=0x%08x\n", hc32toh(sc, sitd->sitd_mask)); printf(" status=0x%08x <%s> len=0x%x\n", hc32toh(sc, sitd->sitd_status), (sitd->sitd_status & htohc32(sc, EHCI_SITD_ACTIVE)) ? "ACTIVE" : "", EHCI_SITD_GET_LEN(hc32toh(sc, sitd->sitd_status))); printf(" back=0x%08x, bp=0x%08x,0x%08x,0x%08x,0x%08x\n", hc32toh(sc, sitd->sitd_back), hc32toh(sc, sitd->sitd_bp[0]), hc32toh(sc, sitd->sitd_bp[1]), hc32toh(sc, sitd->sitd_bp_hi[0]), hc32toh(sc, sitd->sitd_bp_hi[1])); } static void ehci_dump_itd(ehci_softc_t *sc, ehci_itd_t *itd) { usb_pc_cpu_invalidate(itd->page_cache); printf("ITD(%p) at 0x%08x\n", itd, hc32toh(sc, itd->itd_self) & ~0x1F); printf(" next=0x%08x\n", hc32toh(sc, itd->itd_next)); printf(" status[0]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[0]), (itd->itd_status[0] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[1]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[1]), (itd->itd_status[1] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[2]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[2]), (itd->itd_status[2] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[3]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[3]), (itd->itd_status[3] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[4]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[4]), (itd->itd_status[4] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[5]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[5]), (itd->itd_status[5] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[6]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[6]), (itd->itd_status[6] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" status[7]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[7]), (itd->itd_status[7] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : ""); printf(" bp[0]=0x%08x\n", hc32toh(sc, itd->itd_bp[0])); printf(" addr=0x%02x; endpt=0x%01x\n", EHCI_ITD_GET_ADDR(hc32toh(sc, itd->itd_bp[0])), EHCI_ITD_GET_ENDPT(hc32toh(sc, itd->itd_bp[0]))); printf(" bp[1]=0x%08x\n", hc32toh(sc, itd->itd_bp[1])); printf(" dir=%s; mpl=0x%02x\n", (hc32toh(sc, itd->itd_bp[1]) & EHCI_ITD_SET_DIR_IN) ? "in" : "out", EHCI_ITD_GET_MPL(hc32toh(sc, itd->itd_bp[1]))); printf(" bp[2..6]=0x%08x,0x%08x,0x%08x,0x%08x,0x%08x\n", hc32toh(sc, itd->itd_bp[2]), hc32toh(sc, itd->itd_bp[3]), hc32toh(sc, itd->itd_bp[4]), hc32toh(sc, itd->itd_bp[5]), hc32toh(sc, itd->itd_bp[6])); printf(" bp_hi=0x%08x,0x%08x,0x%08x,0x%08x,\n" " 0x%08x,0x%08x,0x%08x\n", hc32toh(sc, itd->itd_bp_hi[0]), hc32toh(sc, itd->itd_bp_hi[1]), hc32toh(sc, itd->itd_bp_hi[2]), hc32toh(sc, itd->itd_bp_hi[3]), hc32toh(sc, itd->itd_bp_hi[4]), hc32toh(sc, itd->itd_bp_hi[5]), hc32toh(sc, itd->itd_bp_hi[6])); } static void ehci_dump_isoc(ehci_softc_t *sc) { ehci_itd_t *itd; ehci_sitd_t *sitd; uint16_t max = 1000; uint16_t pos; pos = (EOREAD4(sc, EHCI_FRINDEX) / 8) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); printf("%s: isochronous dump from frame 0x%03x:\n", __FUNCTION__, pos); itd = sc->sc_isoc_hs_p_last[pos]; sitd = sc->sc_isoc_fs_p_last[pos]; while (itd && max && max--) { ehci_dump_itd(sc, itd); itd = itd->prev; } while (sitd && max && max--) { ehci_dump_sitd(sc, sitd); sitd = sitd->prev; } } #endif static void ehci_transfer_intr_enqueue(struct usb_xfer *xfer) { /* check for early completion */ if (ehci_check_transfer(xfer)) { return; } /* put transfer on interrupt queue */ usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer); /* start timeout, if any */ if (xfer->timeout != 0) { usbd_transfer_timeout_ms(xfer, &ehci_timeout, xfer->timeout); } } #define EHCI_APPEND_FS_TD(std,last) (last) = _ehci_append_fs_td(std,last) static ehci_sitd_t * _ehci_append_fs_td(ehci_sitd_t *std, ehci_sitd_t *last) { DPRINTFN(11, "%p to %p\n", std, last); /* (sc->sc_bus.mtx) must be locked */ std->next = last->next; std->sitd_next = last->sitd_next; std->prev = last; usb_pc_cpu_flush(std->page_cache); /* * the last->next->prev is never followed: std->next->prev = std; */ last->next = std; last->sitd_next = std->sitd_self; usb_pc_cpu_flush(last->page_cache); return (std); } #define EHCI_APPEND_HS_TD(std,last) (last) = _ehci_append_hs_td(std,last) static ehci_itd_t * _ehci_append_hs_td(ehci_itd_t *std, ehci_itd_t *last) { DPRINTFN(11, "%p to %p\n", std, last); /* (sc->sc_bus.mtx) must be locked */ std->next = last->next; std->itd_next = last->itd_next; std->prev = last; usb_pc_cpu_flush(std->page_cache); /* * the last->next->prev is never followed: std->next->prev = std; */ last->next = std; last->itd_next = std->itd_self; usb_pc_cpu_flush(last->page_cache); return (std); } #define EHCI_APPEND_QH(sqh,last) (last) = _ehci_append_qh(sqh,last) static ehci_qh_t * _ehci_append_qh(ehci_qh_t *sqh, ehci_qh_t *last) { DPRINTFN(11, "%p to %p\n", sqh, last); if (sqh->prev != NULL) { /* should not happen */ DPRINTFN(0, "QH already linked!\n"); return (last); } /* (sc->sc_bus.mtx) must be locked */ sqh->next = last->next; sqh->qh_link = last->qh_link; sqh->prev = last; usb_pc_cpu_flush(sqh->page_cache); /* * the last->next->prev is never followed: sqh->next->prev = sqh; */ last->next = sqh; last->qh_link = sqh->qh_self; usb_pc_cpu_flush(last->page_cache); return (sqh); } #define EHCI_REMOVE_FS_TD(std,last) (last) = _ehci_remove_fs_td(std,last) static ehci_sitd_t * _ehci_remove_fs_td(ehci_sitd_t *std, ehci_sitd_t *last) { DPRINTFN(11, "%p from %p\n", std, last); /* (sc->sc_bus.mtx) must be locked */ std->prev->next = std->next; std->prev->sitd_next = std->sitd_next; usb_pc_cpu_flush(std->prev->page_cache); if (std->next) { std->next->prev = std->prev; usb_pc_cpu_flush(std->next->page_cache); } return ((last == std) ? std->prev : last); } #define EHCI_REMOVE_HS_TD(std,last) (last) = _ehci_remove_hs_td(std,last) static ehci_itd_t * _ehci_remove_hs_td(ehci_itd_t *std, ehci_itd_t *last) { DPRINTFN(11, "%p from %p\n", std, last); /* (sc->sc_bus.mtx) must be locked */ std->prev->next = std->next; std->prev->itd_next = std->itd_next; usb_pc_cpu_flush(std->prev->page_cache); if (std->next) { std->next->prev = std->prev; usb_pc_cpu_flush(std->next->page_cache); } return ((last == std) ? std->prev : last); } #define EHCI_REMOVE_QH(sqh,last) (last) = _ehci_remove_qh(sqh,last) static ehci_qh_t * _ehci_remove_qh(ehci_qh_t *sqh, ehci_qh_t *last) { DPRINTFN(11, "%p from %p\n", sqh, last); /* (sc->sc_bus.mtx) must be locked */ /* only remove if not removed from a queue */ if (sqh->prev) { sqh->prev->next = sqh->next; sqh->prev->qh_link = sqh->qh_link; usb_pc_cpu_flush(sqh->prev->page_cache); if (sqh->next) { sqh->next->prev = sqh->prev; usb_pc_cpu_flush(sqh->next->page_cache); } last = ((last == sqh) ? sqh->prev : last); sqh->prev = 0; usb_pc_cpu_flush(sqh->page_cache); } return (last); } static void ehci_data_toggle_update(struct usb_xfer *xfer, uint16_t actlen, uint16_t xlen) { uint16_t rem; uint8_t dt; /* count number of full packets */ dt = (actlen / xfer->max_packet_size) & 1; /* compute remainder */ rem = actlen % xfer->max_packet_size; if (rem > 0) dt ^= 1; /* short packet at the end */ else if (actlen != xlen) dt ^= 1; /* zero length packet at the end */ else if (xlen == 0) dt ^= 1; /* zero length transfer */ xfer->endpoint->toggle_next ^= dt; } static usb_error_t ehci_non_isoc_done_sub(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_qtd_t *td; ehci_qtd_t *td_alt_next; uint32_t status; uint16_t len; td = xfer->td_transfer_cache; td_alt_next = td->alt_next; if (xfer->aframes != xfer->nframes) { usbd_xfer_set_frame_len(xfer, xfer->aframes, 0); } while (1) { usb_pc_cpu_invalidate(td->page_cache); status = hc32toh(sc, td->qtd_status); len = EHCI_QTD_GET_BYTES(status); /* * Verify the status length and * add the length to "frlengths[]": */ if (len > td->len) { /* should not happen */ DPRINTF("Invalid status length, " "0x%04x/0x%04x bytes\n", len, td->len); status |= EHCI_QTD_HALTED; } else if (xfer->aframes != xfer->nframes) { xfer->frlengths[xfer->aframes] += td->len - len; /* manually update data toggle */ ehci_data_toggle_update(xfer, td->len - len, td->len); } /* Check for last transfer */ if (((void *)td) == xfer->td_transfer_last) { td = NULL; break; } /* Check for transfer error */ if (status & EHCI_QTD_HALTED) { /* the transfer is finished */ td = NULL; break; } /* Check for short transfer */ if (len > 0) { if (xfer->flags_int.short_frames_ok) { /* follow alt next */ td = td->alt_next; } else { /* the transfer is finished */ td = NULL; } break; } td = td->obj_next; if (td->alt_next != td_alt_next) { /* this USB frame is complete */ break; } } /* update transfer cache */ xfer->td_transfer_cache = td; #ifdef USB_DEBUG if (status & EHCI_QTD_STATERRS) { DPRINTFN(11, "error, addr=%d, endpt=0x%02x, frame=0x%02x" "status=%s%s%s%s%s%s%s%s\n", xfer->address, xfer->endpointno, xfer->aframes, (status & EHCI_QTD_ACTIVE) ? "[ACTIVE]" : "[NOT_ACTIVE]", (status & EHCI_QTD_HALTED) ? "[HALTED]" : "", (status & EHCI_QTD_BUFERR) ? "[BUFERR]" : "", (status & EHCI_QTD_BABBLE) ? "[BABBLE]" : "", (status & EHCI_QTD_XACTERR) ? "[XACTERR]" : "", (status & EHCI_QTD_MISSEDMICRO) ? "[MISSED]" : "", (status & EHCI_QTD_SPLITXSTATE) ? "[SPLIT]" : "", (status & EHCI_QTD_PINGSTATE) ? "[PING]" : ""); } #endif if (status & EHCI_QTD_HALTED) { if ((xfer->xroot->udev->parent_hs_hub != NULL) || (xfer->xroot->udev->address != 0)) { /* try to separate I/O errors from STALL */ if (EHCI_QTD_GET_CERR(status) == 0) return (USB_ERR_IOERROR); } return (USB_ERR_STALLED); } return (USB_ERR_NORMAL_COMPLETION); } static void ehci_non_isoc_done(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_qh_t *qh; uint32_t status; usb_error_t err = 0; DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", xfer, xfer->endpoint); #ifdef USB_DEBUG if (ehcidebug > 10) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_dump_sqtds(sc, xfer->td_transfer_first); } #endif /* extract data toggle directly from the QH's overlay area */ qh = xfer->qh_start[xfer->flags_int.curr_dma_set]; usb_pc_cpu_invalidate(qh->page_cache); status = hc32toh(sc, qh->qh_qtd.qtd_status); /* reset scanner */ xfer->td_transfer_cache = xfer->td_transfer_first; if (xfer->flags_int.control_xfr) { if (xfer->flags_int.control_hdr) { err = ehci_non_isoc_done_sub(xfer); } xfer->aframes = 1; if (xfer->td_transfer_cache == NULL) { goto done; } } while (xfer->aframes != xfer->nframes) { err = ehci_non_isoc_done_sub(xfer); xfer->aframes++; if (xfer->td_transfer_cache == NULL) { goto done; } } if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) { err = ehci_non_isoc_done_sub(xfer); } done: ehci_device_done(xfer, err); } /*------------------------------------------------------------------------* * ehci_check_transfer * * Return values: * 0: USB transfer is not finished * Else: USB transfer is finished *------------------------------------------------------------------------*/ static uint8_t ehci_check_transfer(struct usb_xfer *xfer) { const struct usb_pipe_methods *methods = xfer->endpoint->methods; ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); uint32_t status; DPRINTFN(13, "xfer=%p checking transfer\n", xfer); if (methods == &ehci_device_isoc_fs_methods) { ehci_sitd_t *td; /* isochronous full speed transfer */ td = xfer->td_transfer_last; usb_pc_cpu_invalidate(td->page_cache); status = hc32toh(sc, td->sitd_status); /* also check if first is complete */ td = xfer->td_transfer_first; usb_pc_cpu_invalidate(td->page_cache); status |= hc32toh(sc, td->sitd_status); if (!(status & EHCI_SITD_ACTIVE)) { ehci_device_done(xfer, USB_ERR_NORMAL_COMPLETION); goto transferred; } } else if (methods == &ehci_device_isoc_hs_methods) { ehci_itd_t *td; /* isochronous high speed transfer */ /* check last transfer */ td = xfer->td_transfer_last; usb_pc_cpu_invalidate(td->page_cache); status = td->itd_status[0]; status |= td->itd_status[1]; status |= td->itd_status[2]; status |= td->itd_status[3]; status |= td->itd_status[4]; status |= td->itd_status[5]; status |= td->itd_status[6]; status |= td->itd_status[7]; /* also check first transfer */ td = xfer->td_transfer_first; usb_pc_cpu_invalidate(td->page_cache); status |= td->itd_status[0]; status |= td->itd_status[1]; status |= td->itd_status[2]; status |= td->itd_status[3]; status |= td->itd_status[4]; status |= td->itd_status[5]; status |= td->itd_status[6]; status |= td->itd_status[7]; /* if no transactions are active we continue */ if (!(status & htohc32(sc, EHCI_ITD_ACTIVE))) { ehci_device_done(xfer, USB_ERR_NORMAL_COMPLETION); goto transferred; } } else { ehci_qtd_t *td; ehci_qh_t *qh; /* non-isochronous transfer */ /* * check whether there is an error somewhere in the middle, * or whether there was a short packet (SPD and not ACTIVE) */ td = xfer->td_transfer_cache; qh = xfer->qh_start[xfer->flags_int.curr_dma_set]; usb_pc_cpu_invalidate(qh->page_cache); status = hc32toh(sc, qh->qh_qtd.qtd_status); if (status & EHCI_QTD_ACTIVE) { /* transfer is pending */ goto done; } while (1) { usb_pc_cpu_invalidate(td->page_cache); status = hc32toh(sc, td->qtd_status); /* * Check if there is an active TD which * indicates that the transfer isn't done. */ if (status & EHCI_QTD_ACTIVE) { /* update cache */ xfer->td_transfer_cache = td; goto done; } /* * last transfer descriptor makes the transfer done */ if (((void *)td) == xfer->td_transfer_last) { break; } /* * any kind of error makes the transfer done */ if (status & EHCI_QTD_HALTED) { break; } /* * if there is no alternate next transfer, a short * packet also makes the transfer done */ if (EHCI_QTD_GET_BYTES(status)) { if (xfer->flags_int.short_frames_ok) { /* follow alt next */ if (td->alt_next) { td = td->alt_next; continue; } } /* transfer is done */ break; } td = td->obj_next; } ehci_non_isoc_done(xfer); goto transferred; } done: DPRINTFN(13, "xfer=%p is still active\n", xfer); return (0); transferred: return (1); } static void ehci_pcd_enable(ehci_softc_t *sc) { USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); sc->sc_eintrs |= EHCI_STS_PCD; EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs); /* acknowledge any PCD interrupt */ EOWRITE4(sc, EHCI_USBSTS, EHCI_STS_PCD); ehci_root_intr(sc); } static void ehci_interrupt_poll(ehci_softc_t *sc) { struct usb_xfer *xfer; repeat: TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { /* * check if transfer is transferred */ if (ehci_check_transfer(xfer)) { /* queue has been modified */ goto repeat; } } } /* * Some EHCI chips from VIA / ATI seem to trigger interrupts before * writing back the qTD status, or miss signalling occasionally under * heavy load. If the host machine is too fast, we can miss * transaction completion - when we scan the active list the * transaction still seems to be active. This generally exhibits * itself as a umass stall that never recovers. * * We work around this behaviour by setting up this callback after any * softintr that completes with transactions still pending, giving us * another chance to check for completion after the writeback has * taken place. */ static void ehci_poll_timeout(void *arg) { ehci_softc_t *sc = arg; DPRINTFN(3, "\n"); ehci_interrupt_poll(sc); } /*------------------------------------------------------------------------* * ehci_interrupt - EHCI interrupt handler * * NOTE: Do not access "sc->sc_bus.bdev" inside the interrupt handler, * hence the interrupt handler will be setup before "sc->sc_bus.bdev" * is present ! *------------------------------------------------------------------------*/ void ehci_interrupt(ehci_softc_t *sc) { uint32_t status; USB_BUS_LOCK(&sc->sc_bus); DPRINTFN(16, "real interrupt\n"); #ifdef USB_DEBUG if (ehcidebug > 15) { ehci_dump_regs(sc); } #endif status = EHCI_STS_INTRS(EOREAD4(sc, EHCI_USBSTS)); if (status == 0) { /* the interrupt was not for us */ goto done; } if (!(status & sc->sc_eintrs)) { goto done; } EOWRITE4(sc, EHCI_USBSTS, status); /* acknowledge */ status &= sc->sc_eintrs; if (status & EHCI_STS_HSE) { printf("%s: unrecoverable error, " "controller halted\n", __FUNCTION__); #ifdef USB_DEBUG ehci_dump_regs(sc); ehci_dump_isoc(sc); #endif } if (status & EHCI_STS_PCD) { /* * Disable PCD interrupt for now, because it will be * on until the port has been reset. */ sc->sc_eintrs &= ~EHCI_STS_PCD; EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs); ehci_root_intr(sc); /* do not allow RHSC interrupts > 1 per second */ usb_callout_reset(&sc->sc_tmo_pcd, hz, (void *)&ehci_pcd_enable, sc); } status &= ~(EHCI_STS_INT | EHCI_STS_ERRINT | EHCI_STS_PCD | EHCI_STS_IAA); if (status != 0) { /* block unprocessed interrupts */ sc->sc_eintrs &= ~status; EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs); printf("%s: blocking interrupts 0x%x\n", __FUNCTION__, status); } /* poll all the USB transfers */ ehci_interrupt_poll(sc); if (sc->sc_flags & EHCI_SCFLG_LOSTINTRBUG) { usb_callout_reset(&sc->sc_tmo_poll, hz / 128, (void *)&ehci_poll_timeout, sc); } done: USB_BUS_UNLOCK(&sc->sc_bus); } /* * called when a request does not complete */ static void ehci_timeout(void *arg) { struct usb_xfer *xfer = arg; DPRINTF("xfer=%p\n", xfer); USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); /* transfer is transferred */ ehci_device_done(xfer, USB_ERR_TIMEOUT); } static void ehci_do_poll(struct usb_bus *bus) { ehci_softc_t *sc = EHCI_BUS2SC(bus); USB_BUS_LOCK(&sc->sc_bus); ehci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } static void ehci_setup_standard_chain_sub(struct ehci_std_temp *temp) { struct usb_page_search buf_res; ehci_qtd_t *td; ehci_qtd_t *td_next; ehci_qtd_t *td_alt_next; uint32_t buf_offset; uint32_t average; uint32_t len_old; uint32_t terminate; uint32_t qtd_altnext; uint8_t shortpkt_old; uint8_t precompute; terminate = temp->sc->sc_terminate_self; qtd_altnext = temp->sc->sc_terminate_self; td_alt_next = NULL; buf_offset = 0; shortpkt_old = temp->shortpkt; len_old = temp->len; precompute = 1; restart: td = temp->td; td_next = temp->td_next; while (1) { if (temp->len == 0) { if (temp->shortpkt) { break; } /* send a Zero Length Packet, ZLP, last */ temp->shortpkt = 1; average = 0; } else { average = temp->average; if (temp->len < average) { if (temp->len % temp->max_frame_size) { temp->shortpkt = 1; } average = temp->len; } } if (td_next == NULL) { panic("%s: out of EHCI transfer descriptors!", __FUNCTION__); } /* get next TD */ td = td_next; td_next = td->obj_next; /* check if we are pre-computing */ if (precompute) { /* update remaining length */ temp->len -= average; continue; } /* fill out current TD */ td->qtd_status = temp->qtd_status | htohc32(temp->sc, EHCI_QTD_IOC | EHCI_QTD_SET_BYTES(average)); if (average == 0) { if (temp->auto_data_toggle == 0) { /* update data toggle, ZLP case */ temp->qtd_status ^= htohc32(temp->sc, EHCI_QTD_TOGGLE_MASK); } td->len = 0; /* properly reset reserved fields */ td->qtd_buffer[0] = 0; td->qtd_buffer[1] = 0; td->qtd_buffer[2] = 0; td->qtd_buffer[3] = 0; td->qtd_buffer[4] = 0; td->qtd_buffer_hi[0] = 0; td->qtd_buffer_hi[1] = 0; td->qtd_buffer_hi[2] = 0; td->qtd_buffer_hi[3] = 0; td->qtd_buffer_hi[4] = 0; } else { uint8_t x; if (temp->auto_data_toggle == 0) { /* update data toggle */ - if (((average + temp->max_frame_size - 1) / - temp->max_frame_size) & 1) { + if (howmany(average, temp->max_frame_size) & 1) { temp->qtd_status ^= htohc32(temp->sc, EHCI_QTD_TOGGLE_MASK); } } td->len = average; /* update remaining length */ temp->len -= average; /* fill out buffer pointers */ usbd_get_page(temp->pc, buf_offset, &buf_res); td->qtd_buffer[0] = htohc32(temp->sc, buf_res.physaddr); td->qtd_buffer_hi[0] = 0; x = 1; while (average > EHCI_PAGE_SIZE) { average -= EHCI_PAGE_SIZE; buf_offset += EHCI_PAGE_SIZE; usbd_get_page(temp->pc, buf_offset, &buf_res); td->qtd_buffer[x] = htohc32(temp->sc, buf_res.physaddr & (~0xFFF)); td->qtd_buffer_hi[x] = 0; x++; } /* * NOTE: The "average" variable is never zero after * exiting the loop above ! * * NOTE: We have to subtract one from the offset to * ensure that we are computing the physical address * of a valid page ! */ buf_offset += average; usbd_get_page(temp->pc, buf_offset - 1, &buf_res); td->qtd_buffer[x] = htohc32(temp->sc, buf_res.physaddr & (~0xFFF)); td->qtd_buffer_hi[x] = 0; /* properly reset reserved fields */ while (++x < EHCI_QTD_NBUFFERS) { td->qtd_buffer[x] = 0; td->qtd_buffer_hi[x] = 0; } } if (td_next) { /* link the current TD with the next one */ td->qtd_next = td_next->qtd_self; } td->qtd_altnext = qtd_altnext; td->alt_next = td_alt_next; usb_pc_cpu_flush(td->page_cache); } if (precompute) { precompute = 0; /* setup alt next pointer, if any */ if (temp->last_frame) { td_alt_next = NULL; qtd_altnext = terminate; } else { /* we use this field internally */ td_alt_next = td_next; if (temp->setup_alt_next) { qtd_altnext = td_next->qtd_self; } else { qtd_altnext = terminate; } } /* restore */ temp->shortpkt = shortpkt_old; temp->len = len_old; goto restart; } temp->td = td; temp->td_next = td_next; } static void ehci_setup_standard_chain(struct usb_xfer *xfer, ehci_qh_t **qh_last) { struct ehci_std_temp temp; const struct usb_pipe_methods *methods; ehci_qh_t *qh; ehci_qtd_t *td; uint32_t qh_endp; uint32_t qh_endphub; uint32_t x; DPRINTFN(9, "addr=%d endpt=%d sumlen=%d speed=%d\n", xfer->address, UE_GET_ADDR(xfer->endpointno), xfer->sumlen, usbd_get_speed(xfer->xroot->udev)); temp.average = xfer->max_hc_frame_size; temp.max_frame_size = xfer->max_frame_size; temp.sc = EHCI_BUS2SC(xfer->xroot->bus); /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; xfer->td_transfer_first = td; xfer->td_transfer_cache = td; temp.td = NULL; temp.td_next = td; temp.qtd_status = 0; temp.last_frame = 0; temp.setup_alt_next = xfer->flags_int.short_frames_ok; if (xfer->flags_int.control_xfr) { if (xfer->endpoint->toggle_next) { /* DATA1 is next */ temp.qtd_status |= htohc32(temp.sc, EHCI_QTD_SET_TOGGLE(1)); } temp.auto_data_toggle = 0; } else { temp.auto_data_toggle = 1; } if ((xfer->xroot->udev->parent_hs_hub != NULL) || (xfer->xroot->udev->address != 0)) { /* max 3 retries */ temp.qtd_status |= htohc32(temp.sc, EHCI_QTD_SET_CERR(3)); } /* check if we should prepend a setup message */ if (xfer->flags_int.control_xfr) { if (xfer->flags_int.control_hdr) { xfer->endpoint->toggle_next = 0; temp.qtd_status &= htohc32(temp.sc, EHCI_QTD_SET_CERR(3)); temp.qtd_status |= htohc32(temp.sc, EHCI_QTD_ACTIVE | EHCI_QTD_SET_PID(EHCI_QTD_PID_SETUP) | EHCI_QTD_SET_TOGGLE(0)); temp.len = xfer->frlengths[0]; temp.pc = xfer->frbuffers + 0; temp.shortpkt = temp.len ? 1 : 0; /* check for last frame */ if (xfer->nframes == 1) { /* no STATUS stage yet, SETUP is last */ if (xfer->flags_int.control_act) { temp.last_frame = 1; temp.setup_alt_next = 0; } } ehci_setup_standard_chain_sub(&temp); } x = 1; } else { x = 0; } while (x != xfer->nframes) { /* DATA0 / DATA1 message */ temp.len = xfer->frlengths[x]; temp.pc = xfer->frbuffers + x; x++; if (x == xfer->nframes) { if (xfer->flags_int.control_xfr) { /* no STATUS stage yet, DATA is last */ if (xfer->flags_int.control_act) { temp.last_frame = 1; temp.setup_alt_next = 0; } } else { temp.last_frame = 1; temp.setup_alt_next = 0; } } /* keep previous data toggle and error count */ temp.qtd_status &= htohc32(temp.sc, EHCI_QTD_SET_CERR(3) | EHCI_QTD_SET_TOGGLE(1)); if (temp.len == 0) { /* make sure that we send an USB packet */ temp.shortpkt = 0; } else { /* regular data transfer */ temp.shortpkt = (xfer->flags.force_short_xfer) ? 0 : 1; } /* set endpoint direction */ temp.qtd_status |= (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) ? htohc32(temp.sc, EHCI_QTD_ACTIVE | EHCI_QTD_SET_PID(EHCI_QTD_PID_IN)) : htohc32(temp.sc, EHCI_QTD_ACTIVE | EHCI_QTD_SET_PID(EHCI_QTD_PID_OUT)); ehci_setup_standard_chain_sub(&temp); } /* check if we should append a status stage */ if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) { /* * Send a DATA1 message and invert the current endpoint * direction. */ temp.qtd_status &= htohc32(temp.sc, EHCI_QTD_SET_CERR(3) | EHCI_QTD_SET_TOGGLE(1)); temp.qtd_status |= (UE_GET_DIR(xfer->endpointno) == UE_DIR_OUT) ? htohc32(temp.sc, EHCI_QTD_ACTIVE | EHCI_QTD_SET_PID(EHCI_QTD_PID_IN) | EHCI_QTD_SET_TOGGLE(1)) : htohc32(temp.sc, EHCI_QTD_ACTIVE | EHCI_QTD_SET_PID(EHCI_QTD_PID_OUT) | EHCI_QTD_SET_TOGGLE(1)); temp.len = 0; temp.pc = NULL; temp.shortpkt = 0; temp.last_frame = 1; temp.setup_alt_next = 0; ehci_setup_standard_chain_sub(&temp); } td = temp.td; /* the last TD terminates the transfer: */ td->qtd_next = htohc32(temp.sc, EHCI_LINK_TERMINATE); td->qtd_altnext = htohc32(temp.sc, EHCI_LINK_TERMINATE); usb_pc_cpu_flush(td->page_cache); /* must have at least one frame! */ xfer->td_transfer_last = td; #ifdef USB_DEBUG if (ehcidebug > 8) { DPRINTF("nexttog=%d; data before transfer:\n", xfer->endpoint->toggle_next); ehci_dump_sqtds(temp.sc, xfer->td_transfer_first); } #endif methods = xfer->endpoint->methods; qh = xfer->qh_start[xfer->flags_int.curr_dma_set]; /* the "qh_link" field is filled when the QH is added */ qh_endp = (EHCI_QH_SET_ADDR(xfer->address) | EHCI_QH_SET_ENDPT(UE_GET_ADDR(xfer->endpointno)) | EHCI_QH_SET_MPL(xfer->max_packet_size)); if (usbd_get_speed(xfer->xroot->udev) == USB_SPEED_HIGH) { qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH); if (methods != &ehci_device_intr_methods) qh_endp |= EHCI_QH_SET_NRL(8); } else { if (usbd_get_speed(xfer->xroot->udev) == USB_SPEED_FULL) { qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_FULL); } else { qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_LOW); } if (methods == &ehci_device_ctrl_methods) { qh_endp |= EHCI_QH_CTL; } if (methods != &ehci_device_intr_methods) { /* Only try one time per microframe! */ qh_endp |= EHCI_QH_SET_NRL(1); } } if (temp.auto_data_toggle == 0) { /* software computes the data toggle */ qh_endp |= EHCI_QH_DTC; } qh->qh_endp = htohc32(temp.sc, qh_endp); qh_endphub = (EHCI_QH_SET_MULT(xfer->max_packet_count & 3) | EHCI_QH_SET_CMASK(xfer->endpoint->usb_cmask) | EHCI_QH_SET_SMASK(xfer->endpoint->usb_smask) | EHCI_QH_SET_HUBA(xfer->xroot->udev->hs_hub_addr) | EHCI_QH_SET_PORT(xfer->xroot->udev->hs_port_no)); qh->qh_endphub = htohc32(temp.sc, qh_endphub); qh->qh_curqtd = 0; /* fill the overlay qTD */ if (temp.auto_data_toggle && xfer->endpoint->toggle_next) { /* DATA1 is next */ qh->qh_qtd.qtd_status = htohc32(temp.sc, EHCI_QTD_SET_TOGGLE(1)); } else { qh->qh_qtd.qtd_status = 0; } td = xfer->td_transfer_first; qh->qh_qtd.qtd_next = td->qtd_self; qh->qh_qtd.qtd_altnext = htohc32(temp.sc, EHCI_LINK_TERMINATE); /* properly reset reserved fields */ qh->qh_qtd.qtd_buffer[0] = 0; qh->qh_qtd.qtd_buffer[1] = 0; qh->qh_qtd.qtd_buffer[2] = 0; qh->qh_qtd.qtd_buffer[3] = 0; qh->qh_qtd.qtd_buffer[4] = 0; qh->qh_qtd.qtd_buffer_hi[0] = 0; qh->qh_qtd.qtd_buffer_hi[1] = 0; qh->qh_qtd.qtd_buffer_hi[2] = 0; qh->qh_qtd.qtd_buffer_hi[3] = 0; qh->qh_qtd.qtd_buffer_hi[4] = 0; usb_pc_cpu_flush(qh->page_cache); if (xfer->xroot->udev->flags.self_suspended == 0) { EHCI_APPEND_QH(qh, *qh_last); } } static void ehci_root_intr(ehci_softc_t *sc) { uint16_t i; uint16_t m; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* clear any old interrupt data */ memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata)); /* set bits */ m = (sc->sc_noport + 1); if (m > (8 * sizeof(sc->sc_hub_idata))) { m = (8 * sizeof(sc->sc_hub_idata)); } for (i = 1; i < m; i++) { /* pick out CHANGE bits from the status register */ if (EOREAD4(sc, EHCI_PORTSC(i)) & EHCI_PS_CLEAR) { sc->sc_hub_idata[i / 8] |= 1 << (i % 8); DPRINTF("port %d changed\n", i); } } uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata, sizeof(sc->sc_hub_idata)); } static void ehci_isoc_fs_done(ehci_softc_t *sc, struct usb_xfer *xfer) { uint32_t nframes = xfer->nframes; uint32_t status; uint32_t *plen = xfer->frlengths; uint16_t len = 0; ehci_sitd_t *td = xfer->td_transfer_first; ehci_sitd_t **pp_last = &sc->sc_isoc_fs_p_last[xfer->qh_pos]; DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", xfer, xfer->endpoint); while (nframes--) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } if (pp_last >= &sc->sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) { pp_last = &sc->sc_isoc_fs_p_last[0]; } #ifdef USB_DEBUG if (ehcidebug > 15) { DPRINTF("isoc FS-TD\n"); ehci_dump_sitd(sc, td); } #endif usb_pc_cpu_invalidate(td->page_cache); status = hc32toh(sc, td->sitd_status); len = EHCI_SITD_GET_LEN(status); DPRINTFN(2, "status=0x%08x, rem=%u\n", status, len); if (*plen >= len) { len = *plen - len; } else { len = 0; } *plen = len; /* remove FS-TD from schedule */ EHCI_REMOVE_FS_TD(td, *pp_last); pp_last++; plen++; td = td->obj_next; } xfer->aframes = xfer->nframes; } static void ehci_isoc_hs_done(ehci_softc_t *sc, struct usb_xfer *xfer) { uint32_t nframes = xfer->nframes; uint32_t status; uint32_t *plen = xfer->frlengths; uint16_t len = 0; uint8_t td_no = 0; ehci_itd_t *td = xfer->td_transfer_first; ehci_itd_t **pp_last = &sc->sc_isoc_hs_p_last[xfer->qh_pos]; DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", xfer, xfer->endpoint); while (nframes) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } if (pp_last >= &sc->sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) { pp_last = &sc->sc_isoc_hs_p_last[0]; } #ifdef USB_DEBUG if (ehcidebug > 15) { DPRINTF("isoc HS-TD\n"); ehci_dump_itd(sc, td); } #endif usb_pc_cpu_invalidate(td->page_cache); status = hc32toh(sc, td->itd_status[td_no]); len = EHCI_ITD_GET_LEN(status); DPRINTFN(2, "status=0x%08x, len=%u\n", status, len); if (xfer->endpoint->usb_smask & (1 << td_no)) { if (*plen >= len) { /* * The length is valid. NOTE: The * complete length is written back * into the status field, and not the * remainder like with other transfer * descriptor types. */ } else { /* Invalid length - truncate */ len = 0; } *plen = len; plen++; nframes--; } td_no++; if ((td_no == 8) || (nframes == 0)) { /* remove HS-TD from schedule */ EHCI_REMOVE_HS_TD(td, *pp_last); pp_last++; td_no = 0; td = td->obj_next; } } xfer->aframes = xfer->nframes; } /* NOTE: "done" can be run two times in a row, * from close and from interrupt */ static void ehci_device_done(struct usb_xfer *xfer, usb_error_t error) { const struct usb_pipe_methods *methods = xfer->endpoint->methods; ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n", xfer, xfer->endpoint, error); if ((methods == &ehci_device_bulk_methods) || (methods == &ehci_device_ctrl_methods)) { #ifdef USB_DEBUG if (ehcidebug > 8) { DPRINTF("nexttog=%d; data after transfer:\n", xfer->endpoint->toggle_next); ehci_dump_sqtds(sc, xfer->td_transfer_first); } #endif EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_async_p_last); } if (methods == &ehci_device_intr_methods) { EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_intr_p_last[xfer->qh_pos]); } /* * Only finish isochronous transfers once which will update * "xfer->frlengths". */ if (xfer->td_transfer_first && xfer->td_transfer_last) { if (methods == &ehci_device_isoc_fs_methods) { ehci_isoc_fs_done(sc, xfer); } if (methods == &ehci_device_isoc_hs_methods) { ehci_isoc_hs_done(sc, xfer); } xfer->td_transfer_first = NULL; xfer->td_transfer_last = NULL; } /* dequeue transfer and start next transfer */ usbd_transfer_done(xfer, error); } /*------------------------------------------------------------------------* * ehci bulk support *------------------------------------------------------------------------*/ static void ehci_device_bulk_open(struct usb_xfer *xfer) { return; } static void ehci_device_bulk_close(struct usb_xfer *xfer) { ehci_device_done(xfer, USB_ERR_CANCELLED); } static void ehci_device_bulk_enter(struct usb_xfer *xfer) { return; } static void ehci_doorbell_async(struct ehci_softc *sc) { uint32_t temp; /* * XXX Performance quirk: Some Host Controllers have a too low * interrupt rate. Issue an IAAD to stimulate the Host * Controller after queueing the BULK transfer. * * XXX Force the host controller to refresh any QH caches. */ temp = EOREAD4(sc, EHCI_USBCMD); if (!(temp & EHCI_CMD_IAAD)) EOWRITE4(sc, EHCI_USBCMD, temp | EHCI_CMD_IAAD); } static void ehci_device_bulk_start(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ehci_setup_standard_chain(xfer, &sc->sc_async_p_last); /* put transfer on interrupt queue */ ehci_transfer_intr_enqueue(xfer); /* * XXX Certain nVidia chipsets choke when using the IAAD * feature too frequently. */ if (sc->sc_flags & EHCI_SCFLG_IAADBUG) return; ehci_doorbell_async(sc); } static const struct usb_pipe_methods ehci_device_bulk_methods = { .open = ehci_device_bulk_open, .close = ehci_device_bulk_close, .enter = ehci_device_bulk_enter, .start = ehci_device_bulk_start, }; /*------------------------------------------------------------------------* * ehci control support *------------------------------------------------------------------------*/ static void ehci_device_ctrl_open(struct usb_xfer *xfer) { return; } static void ehci_device_ctrl_close(struct usb_xfer *xfer) { ehci_device_done(xfer, USB_ERR_CANCELLED); } static void ehci_device_ctrl_enter(struct usb_xfer *xfer) { return; } static void ehci_device_ctrl_start(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ehci_setup_standard_chain(xfer, &sc->sc_async_p_last); /* put transfer on interrupt queue */ ehci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ehci_device_ctrl_methods = { .open = ehci_device_ctrl_open, .close = ehci_device_ctrl_close, .enter = ehci_device_ctrl_enter, .start = ehci_device_ctrl_start, }; /*------------------------------------------------------------------------* * ehci interrupt support *------------------------------------------------------------------------*/ static void ehci_device_intr_open(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); uint16_t best; uint16_t bit; uint16_t x; usb_hs_bandwidth_alloc(xfer); /* * Find the best QH position corresponding to the given interval: */ best = 0; bit = EHCI_VIRTUAL_FRAMELIST_COUNT / 2; while (bit) { if (xfer->interval >= bit) { x = bit; best = bit; while (x & bit) { if (sc->sc_intr_stat[x] < sc->sc_intr_stat[best]) { best = x; } x++; } break; } bit >>= 1; } sc->sc_intr_stat[best]++; xfer->qh_pos = best; DPRINTFN(3, "best=%d interval=%d\n", best, xfer->interval); } static void ehci_device_intr_close(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); sc->sc_intr_stat[xfer->qh_pos]--; ehci_device_done(xfer, USB_ERR_CANCELLED); /* bandwidth must be freed after device done */ usb_hs_bandwidth_free(xfer); } static void ehci_device_intr_enter(struct usb_xfer *xfer) { return; } static void ehci_device_intr_start(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ehci_setup_standard_chain(xfer, &sc->sc_intr_p_last[xfer->qh_pos]); /* put transfer on interrupt queue */ ehci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ehci_device_intr_methods = { .open = ehci_device_intr_open, .close = ehci_device_intr_close, .enter = ehci_device_intr_enter, .start = ehci_device_intr_start, }; /*------------------------------------------------------------------------* * ehci full speed isochronous support *------------------------------------------------------------------------*/ static void ehci_device_isoc_fs_open(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_sitd_t *td; uint32_t sitd_portaddr; uint8_t ds; sitd_portaddr = EHCI_SITD_SET_ADDR(xfer->address) | EHCI_SITD_SET_ENDPT(UE_GET_ADDR(xfer->endpointno)) | EHCI_SITD_SET_HUBA(xfer->xroot->udev->hs_hub_addr) | EHCI_SITD_SET_PORT(xfer->xroot->udev->hs_port_no); if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) sitd_portaddr |= EHCI_SITD_SET_DIR_IN; sitd_portaddr = htohc32(sc, sitd_portaddr); /* initialize all TD's */ for (ds = 0; ds != 2; ds++) { for (td = xfer->td_start[ds]; td; td = td->obj_next) { td->sitd_portaddr = sitd_portaddr; /* * TODO: make some kind of automatic * SMASK/CMASK selection based on micro-frame * usage * * micro-frame usage (8 microframes per 1ms) */ td->sitd_back = htohc32(sc, EHCI_LINK_TERMINATE); usb_pc_cpu_flush(td->page_cache); } } } static void ehci_device_isoc_fs_close(struct usb_xfer *xfer) { ehci_device_done(xfer, USB_ERR_CANCELLED); } static void ehci_device_isoc_fs_enter(struct usb_xfer *xfer) { struct usb_page_search buf_res; ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_sitd_t *td; ehci_sitd_t *td_last = NULL; ehci_sitd_t **pp_last; uint32_t *plen; uint32_t buf_offset; uint32_t nframes; uint32_t temp; uint32_t sitd_mask; uint16_t tlen; uint8_t sa; uint8_t sb; #ifdef USB_DEBUG uint8_t once = 1; #endif DPRINTFN(6, "xfer=%p next=%d nframes=%d\n", xfer, xfer->endpoint->isoc_next, xfer->nframes); /* get the current frame index */ nframes = EOREAD4(sc, EHCI_FRINDEX) / 8; /* * check if the frame index is within the window where the frames * will be inserted */ buf_offset = (nframes - xfer->endpoint->isoc_next) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); if ((xfer->endpoint->is_synced == 0) || (buf_offset < xfer->nframes)) { /* * If there is data underflow or the pipe queue is empty we * schedule the transfer a few frames ahead of the current * frame position. Else two isochronous transfers might * overlap. */ xfer->endpoint->isoc_next = (nframes + 3) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); xfer->endpoint->is_synced = 1; DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next); } /* * compute how many milliseconds the insertion is ahead of the * current frame position: */ buf_offset = (xfer->endpoint->isoc_next - nframes) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); /* * pre-compute when the isochronous transfer will be finished: */ xfer->isoc_time_complete = usb_isoc_time_expand(&sc->sc_bus, nframes) + buf_offset + xfer->nframes; /* get the real number of frames */ nframes = xfer->nframes; buf_offset = 0; plen = xfer->frlengths; /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; xfer->td_transfer_first = td; pp_last = &sc->sc_isoc_fs_p_last[xfer->endpoint->isoc_next]; /* store starting position */ xfer->qh_pos = xfer->endpoint->isoc_next; while (nframes--) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } if (pp_last >= &sc->sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) pp_last = &sc->sc_isoc_fs_p_last[0]; /* reuse sitd_portaddr and sitd_back from last transfer */ if (*plen > xfer->max_frame_size) { #ifdef USB_DEBUG if (once) { once = 0; printf("%s: frame length(%d) exceeds %d " "bytes (frame truncated)\n", __FUNCTION__, *plen, xfer->max_frame_size); } #endif *plen = xfer->max_frame_size; } /* allocate a slot */ sa = usbd_fs_isoc_schedule_alloc_slot(xfer, xfer->isoc_time_complete - nframes - 1); if (sa == 255) { /* * Schedule is FULL, set length to zero: */ *plen = 0; sa = USB_FS_ISOC_UFRAME_MAX - 1; } if (*plen) { /* * only call "usbd_get_page()" when we have a * non-zero length */ usbd_get_page(xfer->frbuffers, buf_offset, &buf_res); td->sitd_bp[0] = htohc32(sc, buf_res.physaddr); buf_offset += *plen; /* * NOTE: We need to subtract one from the offset so * that we are on a valid page! */ usbd_get_page(xfer->frbuffers, buf_offset - 1, &buf_res); temp = buf_res.physaddr & ~0xFFF; } else { td->sitd_bp[0] = 0; temp = 0; } if (UE_GET_DIR(xfer->endpointno) == UE_DIR_OUT) { tlen = *plen; if (tlen <= 188) { temp |= 1; /* T-count = 1, TP = ALL */ tlen = 1; } else { tlen += 187; tlen /= 188; temp |= tlen; /* T-count = [1..6] */ temp |= 8; /* TP = Begin */ } tlen += sa; if (tlen >= 8) { sb = 0; } else { sb = (1 << tlen); } sa = (1 << sa); sa = (sb - sa) & 0x3F; sb = 0; } else { sb = (-(4 << sa)) & 0xFE; sa = (1 << sa) & 0x3F; } sitd_mask = (EHCI_SITD_SET_SMASK(sa) | EHCI_SITD_SET_CMASK(sb)); td->sitd_bp[1] = htohc32(sc, temp); td->sitd_mask = htohc32(sc, sitd_mask); if (nframes == 0) { td->sitd_status = htohc32(sc, EHCI_SITD_IOC | EHCI_SITD_ACTIVE | EHCI_SITD_SET_LEN(*plen)); } else { td->sitd_status = htohc32(sc, EHCI_SITD_ACTIVE | EHCI_SITD_SET_LEN(*plen)); } usb_pc_cpu_flush(td->page_cache); #ifdef USB_DEBUG if (ehcidebug > 15) { DPRINTF("FS-TD %d\n", nframes); ehci_dump_sitd(sc, td); } #endif /* insert TD into schedule */ EHCI_APPEND_FS_TD(td, *pp_last); pp_last++; plen++; td_last = td; td = td->obj_next; } xfer->td_transfer_last = td_last; /* update isoc_next */ xfer->endpoint->isoc_next = (pp_last - &sc->sc_isoc_fs_p_last[0]) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); /* * We don't allow cancelling of the SPLIT transaction USB FULL * speed transfer, because it disturbs the bandwidth * computation algorithm. */ xfer->flags_int.can_cancel_immed = 0; } static void ehci_device_isoc_fs_start(struct usb_xfer *xfer) { /* * We don't allow cancelling of the SPLIT transaction USB FULL * speed transfer, because it disturbs the bandwidth * computation algorithm. */ xfer->flags_int.can_cancel_immed = 0; /* set a default timeout */ if (xfer->timeout == 0) xfer->timeout = 500; /* ms */ /* put transfer on interrupt queue */ ehci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ehci_device_isoc_fs_methods = { .open = ehci_device_isoc_fs_open, .close = ehci_device_isoc_fs_close, .enter = ehci_device_isoc_fs_enter, .start = ehci_device_isoc_fs_start, }; /*------------------------------------------------------------------------* * ehci high speed isochronous support *------------------------------------------------------------------------*/ static void ehci_device_isoc_hs_open(struct usb_xfer *xfer) { ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_itd_t *td; uint32_t temp; uint8_t ds; usb_hs_bandwidth_alloc(xfer); /* initialize all TD's */ for (ds = 0; ds != 2; ds++) { for (td = xfer->td_start[ds]; td; td = td->obj_next) { /* set TD inactive */ td->itd_status[0] = 0; td->itd_status[1] = 0; td->itd_status[2] = 0; td->itd_status[3] = 0; td->itd_status[4] = 0; td->itd_status[5] = 0; td->itd_status[6] = 0; td->itd_status[7] = 0; /* set endpoint and address */ td->itd_bp[0] = htohc32(sc, EHCI_ITD_SET_ADDR(xfer->address) | EHCI_ITD_SET_ENDPT(UE_GET_ADDR(xfer->endpointno))); temp = EHCI_ITD_SET_MPL(xfer->max_packet_size & 0x7FF); /* set direction */ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) { temp |= EHCI_ITD_SET_DIR_IN; } /* set maximum packet size */ td->itd_bp[1] = htohc32(sc, temp); /* set transfer multiplier */ td->itd_bp[2] = htohc32(sc, xfer->max_packet_count & 3); usb_pc_cpu_flush(td->page_cache); } } } static void ehci_device_isoc_hs_close(struct usb_xfer *xfer) { ehci_device_done(xfer, USB_ERR_CANCELLED); /* bandwidth must be freed after device done */ usb_hs_bandwidth_free(xfer); } static void ehci_device_isoc_hs_enter(struct usb_xfer *xfer) { struct usb_page_search buf_res; ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus); ehci_itd_t *td; ehci_itd_t *td_last = NULL; ehci_itd_t **pp_last; bus_size_t page_addr; uint32_t *plen; uint32_t status; uint32_t buf_offset; uint32_t nframes; uint32_t itd_offset[8 + 1]; uint8_t x; uint8_t td_no; uint8_t page_no; uint8_t shift = usbd_xfer_get_fps_shift(xfer); #ifdef USB_DEBUG uint8_t once = 1; #endif DPRINTFN(6, "xfer=%p next=%d nframes=%d shift=%d\n", xfer, xfer->endpoint->isoc_next, xfer->nframes, (int)shift); /* get the current frame index */ nframes = EOREAD4(sc, EHCI_FRINDEX) / 8; /* * check if the frame index is within the window where the frames * will be inserted */ buf_offset = (nframes - xfer->endpoint->isoc_next) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); if ((xfer->endpoint->is_synced == 0) || (buf_offset < (((xfer->nframes << shift) + 7) / 8))) { /* * If there is data underflow or the pipe queue is empty we * schedule the transfer a few frames ahead of the current * frame position. Else two isochronous transfers might * overlap. */ xfer->endpoint->isoc_next = (nframes + 3) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); xfer->endpoint->is_synced = 1; DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next); } /* * compute how many milliseconds the insertion is ahead of the * current frame position: */ buf_offset = (xfer->endpoint->isoc_next - nframes) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); /* * pre-compute when the isochronous transfer will be finished: */ xfer->isoc_time_complete = usb_isoc_time_expand(&sc->sc_bus, nframes) + buf_offset + (((xfer->nframes << shift) + 7) / 8); /* get the real number of frames */ nframes = xfer->nframes; buf_offset = 0; td_no = 0; plen = xfer->frlengths; /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; xfer->td_transfer_first = td; pp_last = &sc->sc_isoc_hs_p_last[xfer->endpoint->isoc_next]; /* store starting position */ xfer->qh_pos = xfer->endpoint->isoc_next; while (nframes) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } if (pp_last >= &sc->sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) { pp_last = &sc->sc_isoc_hs_p_last[0]; } /* range check */ if (*plen > xfer->max_frame_size) { #ifdef USB_DEBUG if (once) { once = 0; printf("%s: frame length(%d) exceeds %d bytes " "(frame truncated)\n", __FUNCTION__, *plen, xfer->max_frame_size); } #endif *plen = xfer->max_frame_size; } if (xfer->endpoint->usb_smask & (1 << td_no)) { status = (EHCI_ITD_SET_LEN(*plen) | EHCI_ITD_ACTIVE | EHCI_ITD_SET_PG(0)); td->itd_status[td_no] = htohc32(sc, status); itd_offset[td_no] = buf_offset; buf_offset += *plen; plen++; nframes --; } else { td->itd_status[td_no] = 0; /* not active */ itd_offset[td_no] = buf_offset; } td_no++; if ((td_no == 8) || (nframes == 0)) { /* the rest of the transfers are not active, if any */ for (x = td_no; x != 8; x++) { td->itd_status[x] = 0; /* not active */ } /* check if there is any data to be transferred */ if (itd_offset[0] != buf_offset) { page_no = 0; itd_offset[td_no] = buf_offset; /* get first page offset */ usbd_get_page(xfer->frbuffers, itd_offset[0], &buf_res); /* get page address */ page_addr = buf_res.physaddr & ~0xFFF; /* update page address */ td->itd_bp[0] &= htohc32(sc, 0xFFF); td->itd_bp[0] |= htohc32(sc, page_addr); for (x = 0; x != td_no; x++) { /* set page number and page offset */ status = (EHCI_ITD_SET_PG(page_no) | (buf_res.physaddr & 0xFFF)); td->itd_status[x] |= htohc32(sc, status); /* get next page offset */ if (itd_offset[x + 1] == buf_offset) { /* * We subtract one so that * we don't go off the last * page! */ usbd_get_page(xfer->frbuffers, buf_offset - 1, &buf_res); } else { usbd_get_page(xfer->frbuffers, itd_offset[x + 1], &buf_res); } /* check if we need a new page */ if ((buf_res.physaddr ^ page_addr) & ~0xFFF) { /* new page needed */ page_addr = buf_res.physaddr & ~0xFFF; if (page_no == 6) { panic("%s: too many pages\n", __FUNCTION__); } page_no++; /* update page address */ td->itd_bp[page_no] &= htohc32(sc, 0xFFF); td->itd_bp[page_no] |= htohc32(sc, page_addr); } } } /* set IOC bit if we are complete */ if (nframes == 0) { td->itd_status[td_no - 1] |= htohc32(sc, EHCI_ITD_IOC); } usb_pc_cpu_flush(td->page_cache); #ifdef USB_DEBUG if (ehcidebug > 15) { DPRINTF("HS-TD %d\n", nframes); ehci_dump_itd(sc, td); } #endif /* insert TD into schedule */ EHCI_APPEND_HS_TD(td, *pp_last); pp_last++; td_no = 0; td_last = td; td = td->obj_next; } } xfer->td_transfer_last = td_last; /* update isoc_next */ xfer->endpoint->isoc_next = (pp_last - &sc->sc_isoc_hs_p_last[0]) & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1); } static void ehci_device_isoc_hs_start(struct usb_xfer *xfer) { /* put transfer on interrupt queue */ ehci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ehci_device_isoc_hs_methods = { .open = ehci_device_isoc_hs_open, .close = ehci_device_isoc_hs_close, .enter = ehci_device_isoc_hs_enter, .start = ehci_device_isoc_hs_start, }; /*------------------------------------------------------------------------* * ehci root control support *------------------------------------------------------------------------* * Simulate a hardware hub by handling all the necessary requests. *------------------------------------------------------------------------*/ static const struct usb_device_descriptor ehci_devd = { sizeof(struct usb_device_descriptor), UDESC_DEVICE, /* type */ {0x00, 0x02}, /* USB version */ UDCLASS_HUB, /* class */ UDSUBCLASS_HUB, /* subclass */ UDPROTO_HSHUBSTT, /* protocol */ 64, /* max packet */ {0}, {0}, {0x00, 0x01}, /* device id */ 1, 2, 0, /* string indicies */ 1 /* # of configurations */ }; static const struct usb_device_qualifier ehci_odevd = { sizeof(struct usb_device_qualifier), UDESC_DEVICE_QUALIFIER, /* type */ {0x00, 0x02}, /* USB version */ UDCLASS_HUB, /* class */ UDSUBCLASS_HUB, /* subclass */ UDPROTO_FSHUB, /* protocol */ 0, /* max packet */ 0, /* # of configurations */ 0 }; static const struct ehci_config_desc ehci_confd = { .confd = { .bLength = sizeof(struct usb_config_descriptor), .bDescriptorType = UDESC_CONFIG, .wTotalLength[0] = sizeof(ehci_confd), .bNumInterface = 1, .bConfigurationValue = 1, .iConfiguration = 0, .bmAttributes = UC_SELF_POWERED, .bMaxPower = 0 /* max power */ }, .ifcd = { .bLength = sizeof(struct usb_interface_descriptor), .bDescriptorType = UDESC_INTERFACE, .bNumEndpoints = 1, .bInterfaceClass = UICLASS_HUB, .bInterfaceSubClass = UISUBCLASS_HUB, .bInterfaceProtocol = 0, }, .endpd = { .bLength = sizeof(struct usb_endpoint_descriptor), .bDescriptorType = UDESC_ENDPOINT, .bEndpointAddress = UE_DIR_IN | EHCI_INTR_ENDPT, .bmAttributes = UE_INTERRUPT, .wMaxPacketSize[0] = 8, /* max packet (63 ports) */ .bInterval = 255, }, }; static const struct usb_hub_descriptor ehci_hubd = { .bDescLength = 0, /* dynamic length */ .bDescriptorType = UDESC_HUB, }; uint16_t ehci_get_port_speed_portsc(struct ehci_softc *sc, uint16_t index) { uint32_t v; v = EOREAD4(sc, EHCI_PORTSC(index)); v = (v >> EHCI_PORTSC_PSPD_SHIFT) & EHCI_PORTSC_PSPD_MASK; if (v == EHCI_PORT_SPEED_HIGH) return (UPS_HIGH_SPEED); if (v == EHCI_PORT_SPEED_LOW) return (UPS_LOW_SPEED); return (0); } uint16_t ehci_get_port_speed_hostc(struct ehci_softc *sc, uint16_t index) { uint32_t v; v = EOREAD4(sc, EHCI_HOSTC(index)); v = (v >> EHCI_HOSTC_PSPD_SHIFT) & EHCI_HOSTC_PSPD_MASK; if (v == EHCI_PORT_SPEED_HIGH) return (UPS_HIGH_SPEED); if (v == EHCI_PORT_SPEED_LOW) return (UPS_LOW_SPEED); return (0); } static void ehci_disown(ehci_softc_t *sc, uint16_t index, uint8_t lowspeed) { uint32_t port; uint32_t v; DPRINTF("index=%d lowspeed=%d\n", index, lowspeed); port = EHCI_PORTSC(index); v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR; EOWRITE4(sc, port, v | EHCI_PS_PO); } static usb_error_t ehci_roothub_exec(struct usb_device *udev, struct usb_device_request *req, const void **pptr, uint16_t *plength) { ehci_softc_t *sc = EHCI_BUS2SC(udev->bus); const char *str_ptr; const void *ptr; uint32_t port; uint32_t v; uint16_t len; uint16_t i; uint16_t value; uint16_t index; usb_error_t err; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* buffer reset */ ptr = (const void *)&sc->sc_hub_desc; len = 0; err = 0; value = UGETW(req->wValue); index = UGETW(req->wIndex); DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x " "wValue=0x%04x wIndex=0x%04x\n", req->bmRequestType, req->bRequest, UGETW(req->wLength), value, index); #define C(x,y) ((x) | ((y) << 8)) switch (C(req->bRequest, req->bmRequestType)) { case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): /* * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops * for the integrated root hub. */ break; case C(UR_GET_CONFIG, UT_READ_DEVICE): len = 1; sc->sc_hub_desc.temp[0] = sc->sc_conf; break; case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): switch (value >> 8) { case UDESC_DEVICE: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(ehci_devd); ptr = (const void *)&ehci_devd; break; /* * We can't really operate at another speed, * but the specification says we need this * descriptor: */ case UDESC_DEVICE_QUALIFIER: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(ehci_odevd); ptr = (const void *)&ehci_odevd; break; case UDESC_CONFIG: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(ehci_confd); ptr = (const void *)&ehci_confd; break; case UDESC_STRING: switch (value & 0xff) { case 0: /* Language table */ str_ptr = "\001"; break; case 1: /* Vendor */ str_ptr = sc->sc_vendor; break; case 2: /* Product */ str_ptr = "EHCI root HUB"; break; default: str_ptr = ""; break; } len = usb_make_str_desc( sc->sc_hub_desc.temp, sizeof(sc->sc_hub_desc.temp), str_ptr); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_GET_INTERFACE, UT_READ_INTERFACE): len = 1; sc->sc_hub_desc.temp[0] = 0; break; case C(UR_GET_STATUS, UT_READ_DEVICE): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED); break; case C(UR_GET_STATUS, UT_READ_INTERFACE): case C(UR_GET_STATUS, UT_READ_ENDPOINT): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, 0); break; case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): if (value >= EHCI_MAX_DEVICES) { err = USB_ERR_IOERROR; goto done; } sc->sc_addr = value; break; case C(UR_SET_CONFIG, UT_WRITE_DEVICE): if ((value != 0) && (value != 1)) { err = USB_ERR_IOERROR; goto done; } sc->sc_conf = value; break; case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_DEVICE): case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): err = USB_ERR_IOERROR; goto done; case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): break; case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): break; /* Hub requests */ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): DPRINTFN(9, "UR_CLEAR_PORT_FEATURE\n"); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = EHCI_PORTSC(index); v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR; switch (value) { case UHF_PORT_ENABLE: EOWRITE4(sc, port, v & ~EHCI_PS_PE); break; case UHF_PORT_SUSPEND: if ((v & EHCI_PS_SUSP) && (!(v & EHCI_PS_FPR))) { /* * waking up a High Speed device is rather * complicated if */ EOWRITE4(sc, port, v | EHCI_PS_FPR); } /* wait 20ms for resume sequence to complete */ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 50); EOWRITE4(sc, port, v & ~(EHCI_PS_SUSP | EHCI_PS_FPR | (3 << 10) /* High Speed */ )); /* 4ms settle time */ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 250); break; case UHF_PORT_POWER: EOWRITE4(sc, port, v & ~EHCI_PS_PP); break; case UHF_PORT_TEST: DPRINTFN(3, "clear port test " "%d\n", index); break; case UHF_PORT_INDICATOR: DPRINTFN(3, "clear port ind " "%d\n", index); EOWRITE4(sc, port, v & ~EHCI_PS_PIC); break; case UHF_C_PORT_CONNECTION: EOWRITE4(sc, port, v | EHCI_PS_CSC); break; case UHF_C_PORT_ENABLE: EOWRITE4(sc, port, v | EHCI_PS_PEC); break; case UHF_C_PORT_SUSPEND: EOWRITE4(sc, port, v | EHCI_PS_SUSP); break; case UHF_C_PORT_OVER_CURRENT: EOWRITE4(sc, port, v | EHCI_PS_OCC); break; case UHF_C_PORT_RESET: sc->sc_isreset = 0; break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } v = EREAD4(sc, EHCI_HCSPARAMS); sc->sc_hub_desc.hubd = ehci_hubd; sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport; if (EHCI_HCS_PPC(v)) i = UHD_PWR_INDIVIDUAL; else i = UHD_PWR_NO_SWITCH; if (EHCI_HCS_P_INDICATOR(v)) i |= UHD_PORT_IND; USETW(sc->sc_hub_desc.hubd.wHubCharacteristics, i); /* XXX can't find out? */ sc->sc_hub_desc.hubd.bPwrOn2PwrGood = 200; /* XXX don't know if ports are removable or not */ sc->sc_hub_desc.hubd.bDescLength = 8 + ((sc->sc_noport + 7) / 8); len = sc->sc_hub_desc.hubd.bDescLength; break; case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): len = 16; memset(sc->sc_hub_desc.temp, 0, 16); break; case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): DPRINTFN(9, "get port status i=%d\n", index); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } v = EOREAD4(sc, EHCI_PORTSC(index)); DPRINTFN(9, "port status=0x%04x\n", v); if (sc->sc_flags & EHCI_SCFLG_TT) { if (sc->sc_vendor_get_port_speed != NULL) { i = sc->sc_vendor_get_port_speed(sc, index); } else { device_printf(sc->sc_bus.bdev, "EHCI_SCFLG_TT quirk is set but " "sc_vendor_get_hub_speed() is NULL\n"); i = UPS_HIGH_SPEED; } } else { i = UPS_HIGH_SPEED; } if (v & EHCI_PS_CS) i |= UPS_CURRENT_CONNECT_STATUS; if (v & EHCI_PS_PE) i |= UPS_PORT_ENABLED; if ((v & EHCI_PS_SUSP) && !(v & EHCI_PS_FPR)) i |= UPS_SUSPEND; if (v & EHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; if (v & EHCI_PS_PR) i |= UPS_RESET; if (v & EHCI_PS_PP) i |= UPS_PORT_POWER; USETW(sc->sc_hub_desc.ps.wPortStatus, i); i = 0; if (v & EHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; if (v & EHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; if (v & EHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; if (v & EHCI_PS_FPR) i |= UPS_C_SUSPEND; if (sc->sc_isreset) i |= UPS_C_PORT_RESET; USETW(sc->sc_hub_desc.ps.wPortChange, i); len = sizeof(sc->sc_hub_desc.ps); break; case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): err = USB_ERR_IOERROR; goto done; case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = EHCI_PORTSC(index); v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR; switch (value) { case UHF_PORT_ENABLE: EOWRITE4(sc, port, v | EHCI_PS_PE); break; case UHF_PORT_SUSPEND: EOWRITE4(sc, port, v | EHCI_PS_SUSP); break; case UHF_PORT_RESET: DPRINTFN(6, "reset port %d\n", index); #ifdef USB_DEBUG if (ehcinohighspeed) { /* * Connect USB device to companion * controller. */ ehci_disown(sc, index, 1); break; } #endif if (EHCI_PS_IS_LOWSPEED(v) && (sc->sc_flags & EHCI_SCFLG_TT) == 0) { /* Low speed device, give up ownership. */ ehci_disown(sc, index, 1); break; } /* Start reset sequence. */ v &= ~(EHCI_PS_PE | EHCI_PS_PR); EOWRITE4(sc, port, v | EHCI_PS_PR); /* Wait for reset to complete. */ usb_pause_mtx(&sc->sc_bus.bus_mtx, USB_MS_TO_TICKS(usb_port_root_reset_delay)); /* Terminate reset sequence. */ if (!(sc->sc_flags & EHCI_SCFLG_NORESTERM)) EOWRITE4(sc, port, v); /* Wait for HC to complete reset. */ usb_pause_mtx(&sc->sc_bus.bus_mtx, USB_MS_TO_TICKS(EHCI_PORT_RESET_COMPLETE)); v = EOREAD4(sc, port); DPRINTF("ehci after reset, status=0x%08x\n", v); if (v & EHCI_PS_PR) { device_printf(sc->sc_bus.bdev, "port reset timeout\n"); err = USB_ERR_TIMEOUT; goto done; } if (!(v & EHCI_PS_PE) && (sc->sc_flags & EHCI_SCFLG_TT) == 0) { /* Not a high speed device, give up ownership.*/ ehci_disown(sc, index, 0); break; } sc->sc_isreset = 1; DPRINTF("ehci port %d reset, status = 0x%08x\n", index, v); break; case UHF_PORT_POWER: DPRINTFN(3, "set port power %d\n", index); EOWRITE4(sc, port, v | EHCI_PS_PP); break; case UHF_PORT_TEST: DPRINTFN(3, "set port test %d\n", index); break; case UHF_PORT_INDICATOR: DPRINTFN(3, "set port ind %d\n", index); EOWRITE4(sc, port, v | EHCI_PS_PIC); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): break; default: err = USB_ERR_IOERROR; goto done; } done: *plength = len; *pptr = ptr; return (err); } static void ehci_xfer_setup(struct usb_setup_params *parm) { struct usb_page_search page_info; struct usb_page_cache *pc; ehci_softc_t *sc; struct usb_xfer *xfer; void *last_obj; uint32_t nqtd; uint32_t nqh; uint32_t nsitd; uint32_t nitd; uint32_t n; sc = EHCI_BUS2SC(parm->udev->bus); xfer = parm->curr_xfer; nqtd = 0; nqh = 0; nsitd = 0; nitd = 0; /* * compute maximum number of some structures */ if (parm->methods == &ehci_device_ctrl_methods) { /* * The proof for the "nqtd" formula is illustrated like * this: * * +------------------------------------+ * | | * | |remainder -> | * | +-----+---+ | * | | xxx | x | frm 0 | * | +-----+---++ | * | | xxx | xx | frm 1 | * | +-----+----+ | * | ... | * +------------------------------------+ * * "xxx" means a completely full USB transfer descriptor * * "x" and "xx" means a short USB packet * * For the remainder of an USB transfer modulo * "max_data_length" we need two USB transfer descriptors. * One to transfer the remaining data and one to finalise * with a zero length packet in case the "force_short_xfer" * flag is set. We only need two USB transfer descriptors in * the case where the transfer length of the first one is a * factor of "max_frame_size". The rest of the needed USB * transfer descriptors is given by the buffer size divided * by the maximum data payload. */ parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 1; parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nqh = 1; nqtd = ((2 * xfer->nframes) + 1 /* STATUS */ + (xfer->max_data_length / xfer->max_hc_frame_size)); } else if (parm->methods == &ehci_device_bulk_methods) { parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 1; parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nqh = 1; nqtd = ((2 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); } else if (parm->methods == &ehci_device_intr_methods) { if (parm->speed == USB_SPEED_HIGH) { parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 3; } else if (parm->speed == USB_SPEED_FULL) { parm->hc_max_packet_size = USB_FS_BYTES_PER_HS_UFRAME; parm->hc_max_packet_count = 1; } else { parm->hc_max_packet_size = USB_FS_BYTES_PER_HS_UFRAME / 8; parm->hc_max_packet_count = 1; } parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nqh = 1; nqtd = ((2 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); } else if (parm->methods == &ehci_device_isoc_fs_methods) { parm->hc_max_packet_size = 0x3FF; parm->hc_max_packet_count = 1; parm->hc_max_frame_size = 0x3FF; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nsitd = xfer->nframes; } else if (parm->methods == &ehci_device_isoc_hs_methods) { parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 3; parm->hc_max_frame_size = 0xC00; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nitd = ((xfer->nframes + 7) / 8) << usbd_xfer_get_fps_shift(xfer); } else { parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 1; parm->hc_max_frame_size = 0x400; usbd_transfer_setup_sub(parm); } alloc_dma_set: if (parm->err) { return; } /* * Allocate queue heads and transfer descriptors */ last_obj = NULL; if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ehci_itd_t), EHCI_ITD_ALIGN, nitd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nitd; n++) { ehci_itd_t *td; usbd_get_page(pc + n, 0, &page_info); td = page_info.buffer; /* init TD */ td->itd_self = htohc32(sc, page_info.physaddr | EHCI_LINK_ITD); td->obj_next = last_obj; td->page_cache = pc + n; last_obj = td; usb_pc_cpu_flush(pc + n); } } if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ehci_sitd_t), EHCI_SITD_ALIGN, nsitd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nsitd; n++) { ehci_sitd_t *td; usbd_get_page(pc + n, 0, &page_info); td = page_info.buffer; /* init TD */ td->sitd_self = htohc32(sc, page_info.physaddr | EHCI_LINK_SITD); td->obj_next = last_obj; td->page_cache = pc + n; last_obj = td; usb_pc_cpu_flush(pc + n); } } if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ehci_qtd_t), EHCI_QTD_ALIGN, nqtd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nqtd; n++) { ehci_qtd_t *qtd; usbd_get_page(pc + n, 0, &page_info); qtd = page_info.buffer; /* init TD */ qtd->qtd_self = htohc32(sc, page_info.physaddr); qtd->obj_next = last_obj; qtd->page_cache = pc + n; last_obj = qtd; usb_pc_cpu_flush(pc + n); } } xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj; last_obj = NULL; if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ehci_qh_t), EHCI_QH_ALIGN, nqh)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nqh; n++) { ehci_qh_t *qh; usbd_get_page(pc + n, 0, &page_info); qh = page_info.buffer; /* init QH */ qh->qh_self = htohc32(sc, page_info.physaddr | EHCI_LINK_QH); qh->obj_next = last_obj; qh->page_cache = pc + n; last_obj = qh; usb_pc_cpu_flush(pc + n); } } xfer->qh_start[xfer->flags_int.curr_dma_set] = last_obj; if (!xfer->flags_int.curr_dma_set) { xfer->flags_int.curr_dma_set = 1; goto alloc_dma_set; } } static void ehci_xfer_unsetup(struct usb_xfer *xfer) { return; } static void ehci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc, struct usb_endpoint *ep) { ehci_softc_t *sc = EHCI_BUS2SC(udev->bus); DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d (%d)\n", ep, udev->address, edesc->bEndpointAddress, udev->flags.usb_mode, sc->sc_addr); if (udev->device_index != sc->sc_addr) { if ((udev->speed != USB_SPEED_HIGH) && ((udev->hs_hub_addr == 0) || (udev->hs_port_no == 0) || (udev->parent_hs_hub == NULL) || (udev->parent_hs_hub->hub == NULL))) { /* We need a transaction translator */ goto done; } switch (edesc->bmAttributes & UE_XFERTYPE) { case UE_CONTROL: ep->methods = &ehci_device_ctrl_methods; break; case UE_INTERRUPT: ep->methods = &ehci_device_intr_methods; break; case UE_ISOCHRONOUS: if (udev->speed == USB_SPEED_HIGH) { ep->methods = &ehci_device_isoc_hs_methods; } else if (udev->speed == USB_SPEED_FULL) { ep->methods = &ehci_device_isoc_fs_methods; } break; case UE_BULK: ep->methods = &ehci_device_bulk_methods; break; default: /* do nothing */ break; } } done: return; } static void ehci_get_dma_delay(struct usb_device *udev, uint32_t *pus) { /* * Wait until the hardware has finished any possible use of * the transfer descriptor(s) and QH */ *pus = (1125); /* microseconds */ } static void ehci_device_resume(struct usb_device *udev) { ehci_softc_t *sc = EHCI_BUS2SC(udev->bus); struct usb_xfer *xfer; const struct usb_pipe_methods *methods; DPRINTF("\n"); USB_BUS_LOCK(udev->bus); TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { if (xfer->xroot->udev == udev) { methods = xfer->endpoint->methods; if ((methods == &ehci_device_bulk_methods) || (methods == &ehci_device_ctrl_methods)) { EHCI_APPEND_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_async_p_last); } if (methods == &ehci_device_intr_methods) { EHCI_APPEND_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_intr_p_last[xfer->qh_pos]); } } } USB_BUS_UNLOCK(udev->bus); return; } static void ehci_device_suspend(struct usb_device *udev) { ehci_softc_t *sc = EHCI_BUS2SC(udev->bus); struct usb_xfer *xfer; const struct usb_pipe_methods *methods; DPRINTF("\n"); USB_BUS_LOCK(udev->bus); TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { if (xfer->xroot->udev == udev) { methods = xfer->endpoint->methods; if ((methods == &ehci_device_bulk_methods) || (methods == &ehci_device_ctrl_methods)) { EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_async_p_last); } if (methods == &ehci_device_intr_methods) { EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set], sc->sc_intr_p_last[xfer->qh_pos]); } } } USB_BUS_UNLOCK(udev->bus); } static void ehci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state) { struct ehci_softc *sc = EHCI_BUS2SC(bus); switch (state) { case USB_HW_POWER_SUSPEND: case USB_HW_POWER_SHUTDOWN: ehci_suspend(sc); break; case USB_HW_POWER_RESUME: ehci_resume(sc); break; default: break; } } static void ehci_set_hw_power(struct usb_bus *bus) { ehci_softc_t *sc = EHCI_BUS2SC(bus); uint32_t temp; uint32_t flags; DPRINTF("\n"); USB_BUS_LOCK(bus); flags = bus->hw_power_state; temp = EOREAD4(sc, EHCI_USBCMD); temp &= ~(EHCI_CMD_ASE | EHCI_CMD_PSE); if (flags & (USB_HW_POWER_CONTROL | USB_HW_POWER_BULK)) { DPRINTF("Async is active\n"); temp |= EHCI_CMD_ASE; } if (flags & (USB_HW_POWER_INTERRUPT | USB_HW_POWER_ISOC)) { DPRINTF("Periodic is active\n"); temp |= EHCI_CMD_PSE; } EOWRITE4(sc, EHCI_USBCMD, temp); USB_BUS_UNLOCK(bus); return; } static void ehci_start_dma_delay_second(struct usb_xfer *xfer) { struct ehci_softc *sc = EHCI_BUS2SC(xfer->xroot->bus); DPRINTF("\n"); /* trigger doorbell */ ehci_doorbell_async(sc); /* give the doorbell 4ms */ usbd_transfer_timeout_ms(xfer, (void (*)(void *))&usb_dma_delay_done_cb, 4); } /* * Ring the doorbell twice before freeing any DMA descriptors. Some host * controllers apparently cache the QH descriptors and need a message * that the cache needs to be discarded. */ static void ehci_start_dma_delay(struct usb_xfer *xfer) { struct ehci_softc *sc = EHCI_BUS2SC(xfer->xroot->bus); DPRINTF("\n"); /* trigger doorbell */ ehci_doorbell_async(sc); /* give the doorbell 4ms */ usbd_transfer_timeout_ms(xfer, (void (*)(void *))&ehci_start_dma_delay_second, 4); } static const struct usb_bus_methods ehci_bus_methods = { .endpoint_init = ehci_ep_init, .xfer_setup = ehci_xfer_setup, .xfer_unsetup = ehci_xfer_unsetup, .get_dma_delay = ehci_get_dma_delay, .device_resume = ehci_device_resume, .device_suspend = ehci_device_suspend, .set_hw_power = ehci_set_hw_power, .set_hw_power_sleep = ehci_set_hw_power_sleep, .roothub_exec = ehci_roothub_exec, .xfer_poll = ehci_do_poll, .start_dma_delay = ehci_start_dma_delay, }; diff --git a/sys/dev/usb/controller/ohci.c b/sys/dev/usb/controller/ohci.c index 206c75314d0c..93785c085bed 100644 --- a/sys/dev/usb/controller/ohci.c +++ b/sys/dev/usb/controller/ohci.c @@ -1,2734 +1,2734 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved. * Copyright (c) 1998 Lennart Augustsson. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB Open Host Controller driver. * * OHCI spec: http://www.compaq.com/productinfo/development/openhci.html * USB spec: http://www.usb.org/developers/docs/usbspec.zip */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR ohcidebug #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ #include #include #define OHCI_BUS2SC(bus) \ ((ohci_softc_t *)(((uint8_t *)(bus)) - \ ((uint8_t *)&(((ohci_softc_t *)0)->sc_bus)))) #ifdef USB_DEBUG static int ohcidebug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ohci, CTLFLAG_RW, 0, "USB ohci"); SYSCTL_INT(_hw_usb_ohci, OID_AUTO, debug, CTLFLAG_RWTUN, &ohcidebug, 0, "ohci debug level"); static void ohci_dumpregs(ohci_softc_t *); static void ohci_dump_tds(ohci_td_t *); static uint8_t ohci_dump_td(ohci_td_t *); static void ohci_dump_ed(ohci_ed_t *); static uint8_t ohci_dump_itd(ohci_itd_t *); static void ohci_dump_itds(ohci_itd_t *); #endif #define OBARR(sc) bus_space_barrier((sc)->sc_io_tag, (sc)->sc_io_hdl, 0, (sc)->sc_io_size, \ BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE) #define OWRITE1(sc, r, x) \ do { OBARR(sc); bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0) #define OWRITE2(sc, r, x) \ do { OBARR(sc); bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0) #define OWRITE4(sc, r, x) \ do { OBARR(sc); bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0) #define OREAD1(sc, r) (OBARR(sc), bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (r))) #define OREAD2(sc, r) (OBARR(sc), bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (r))) #define OREAD4(sc, r) (OBARR(sc), bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (r))) #define OHCI_INTR_ENDPT 1 static const struct usb_bus_methods ohci_bus_methods; static const struct usb_pipe_methods ohci_device_bulk_methods; static const struct usb_pipe_methods ohci_device_ctrl_methods; static const struct usb_pipe_methods ohci_device_intr_methods; static const struct usb_pipe_methods ohci_device_isoc_methods; static void ohci_do_poll(struct usb_bus *bus); static void ohci_device_done(struct usb_xfer *xfer, usb_error_t error); static void ohci_timeout(void *arg); static uint8_t ohci_check_transfer(struct usb_xfer *xfer); static void ohci_root_intr(ohci_softc_t *sc); struct ohci_std_temp { struct usb_page_cache *pc; ohci_td_t *td; ohci_td_t *td_next; uint32_t average; uint32_t td_flags; uint32_t len; uint16_t max_frame_size; uint8_t shortpkt; uint8_t setup_alt_next; uint8_t last_frame; }; static struct ohci_hcca * ohci_get_hcca(ohci_softc_t *sc) { usb_pc_cpu_invalidate(&sc->sc_hw.hcca_pc); return (sc->sc_hcca_p); } void ohci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb) { struct ohci_softc *sc = OHCI_BUS2SC(bus); uint32_t i; cb(bus, &sc->sc_hw.hcca_pc, &sc->sc_hw.hcca_pg, sizeof(ohci_hcca_t), OHCI_HCCA_ALIGN); cb(bus, &sc->sc_hw.ctrl_start_pc, &sc->sc_hw.ctrl_start_pg, sizeof(ohci_ed_t), OHCI_ED_ALIGN); cb(bus, &sc->sc_hw.bulk_start_pc, &sc->sc_hw.bulk_start_pg, sizeof(ohci_ed_t), OHCI_ED_ALIGN); cb(bus, &sc->sc_hw.isoc_start_pc, &sc->sc_hw.isoc_start_pg, sizeof(ohci_ed_t), OHCI_ED_ALIGN); for (i = 0; i != OHCI_NO_EDS; i++) { cb(bus, sc->sc_hw.intr_start_pc + i, sc->sc_hw.intr_start_pg + i, sizeof(ohci_ed_t), OHCI_ED_ALIGN); } } static usb_error_t ohci_controller_init(ohci_softc_t *sc, int do_suspend) { struct usb_page_search buf_res; uint32_t i; uint32_t ctl; uint32_t ival; uint32_t hcr; uint32_t fm; uint32_t per; uint32_t desca; /* Determine in what context we are running. */ ctl = OREAD4(sc, OHCI_CONTROL); if (ctl & OHCI_IR) { /* SMM active, request change */ DPRINTF("SMM active, request owner change\n"); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { usb_pause_mtx(NULL, hz / 1000); ctl = OREAD4(sc, OHCI_CONTROL); } if (ctl & OHCI_IR) { device_printf(sc->sc_bus.bdev, "SMM does not respond, resetting\n"); OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET); goto reset; } } else { DPRINTF("cold started\n"); reset: /* controller was cold started */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_BUS_RESET_DELAY)); } /* * This reset should not be necessary according to the OHCI spec, but * without it some controllers do not start. */ DPRINTF("%s: resetting\n", device_get_nameunit(sc->sc_bus.bdev)); OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET); usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_BUS_RESET_DELAY)); /* we now own the host controller and the bus has been reset */ ival = OHCI_GET_IVAL(OREAD4(sc, OHCI_FM_INTERVAL)); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_HCR); /* Reset HC */ /* nominal time for a reset is 10 us */ for (i = 0; i < 10; i++) { DELAY(10); hcr = OREAD4(sc, OHCI_COMMAND_STATUS) & OHCI_HCR; if (!hcr) { break; } } if (hcr) { device_printf(sc->sc_bus.bdev, "reset timeout\n"); return (USB_ERR_IOERROR); } #ifdef USB_DEBUG if (ohcidebug > 15) { ohci_dumpregs(sc); } #endif if (do_suspend) { OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_SUSPEND); return (USB_ERR_NORMAL_COMPLETION); } /* The controller is now in SUSPEND state, we have 2ms to finish. */ /* set up HC registers */ usbd_get_page(&sc->sc_hw.hcca_pc, 0, &buf_res); OWRITE4(sc, OHCI_HCCA, buf_res.physaddr); usbd_get_page(&sc->sc_hw.ctrl_start_pc, 0, &buf_res); OWRITE4(sc, OHCI_CONTROL_HEAD_ED, buf_res.physaddr); usbd_get_page(&sc->sc_hw.bulk_start_pc, 0, &buf_res); OWRITE4(sc, OHCI_BULK_HEAD_ED, buf_res.physaddr); /* disable all interrupts and then switch on all desired interrupts */ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); OWRITE4(sc, OHCI_INTERRUPT_ENABLE, sc->sc_eintrs | OHCI_MIE); /* switch on desired functional features */ ctl = OREAD4(sc, OHCI_CONTROL); ctl &= ~(OHCI_CBSR_MASK | OHCI_LES | OHCI_HCFS_MASK | OHCI_IR); ctl |= OHCI_PLE | OHCI_IE | OHCI_CLE | OHCI_BLE | OHCI_RATIO_1_4 | OHCI_HCFS_OPERATIONAL; /* And finally start it! */ OWRITE4(sc, OHCI_CONTROL, ctl); /* * The controller is now OPERATIONAL. Set a some final * registers that should be set earlier, but that the * controller ignores when in the SUSPEND state. */ fm = (OREAD4(sc, OHCI_FM_INTERVAL) & OHCI_FIT) ^ OHCI_FIT; fm |= OHCI_FSMPS(ival) | ival; OWRITE4(sc, OHCI_FM_INTERVAL, fm); per = OHCI_PERIODIC(ival); /* 90% periodic */ OWRITE4(sc, OHCI_PERIODIC_START, per); /* Fiddle the No OverCurrent Protection bit to avoid chip bug. */ desca = OREAD4(sc, OHCI_RH_DESCRIPTOR_A); OWRITE4(sc, OHCI_RH_DESCRIPTOR_A, desca | OHCI_NOCP); OWRITE4(sc, OHCI_RH_STATUS, OHCI_LPSC); /* Enable port power */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(OHCI_ENABLE_POWER_DELAY)); OWRITE4(sc, OHCI_RH_DESCRIPTOR_A, desca); /* * The AMD756 requires a delay before re-reading the register, * otherwise it will occasionally report 0 ports. */ sc->sc_noport = 0; for (i = 0; (i < 10) && (sc->sc_noport == 0); i++) { usb_pause_mtx(NULL, USB_MS_TO_TICKS(OHCI_READ_DESC_DELAY)); sc->sc_noport = OHCI_GET_NDP(OREAD4(sc, OHCI_RH_DESCRIPTOR_A)); } #ifdef USB_DEBUG if (ohcidebug > 5) { ohci_dumpregs(sc); } #endif return (USB_ERR_NORMAL_COMPLETION); } static struct ohci_ed * ohci_init_ed(struct usb_page_cache *pc) { struct usb_page_search buf_res; struct ohci_ed *ed; usbd_get_page(pc, 0, &buf_res); ed = buf_res.buffer; ed->ed_self = htole32(buf_res.physaddr); ed->ed_flags = htole32(OHCI_ED_SKIP); ed->page_cache = pc; return (ed); } usb_error_t ohci_init(ohci_softc_t *sc) { struct usb_page_search buf_res; uint16_t i; uint16_t bit; uint16_t x; uint16_t y; DPRINTF("start\n"); sc->sc_eintrs = OHCI_NORMAL_INTRS; /* * Setup all ED's */ sc->sc_ctrl_p_last = ohci_init_ed(&sc->sc_hw.ctrl_start_pc); sc->sc_bulk_p_last = ohci_init_ed(&sc->sc_hw.bulk_start_pc); sc->sc_isoc_p_last = ohci_init_ed(&sc->sc_hw.isoc_start_pc); for (i = 0; i != OHCI_NO_EDS; i++) { sc->sc_intr_p_last[i] = ohci_init_ed(sc->sc_hw.intr_start_pc + i); } /* * the QHs are arranged to give poll intervals that are * powers of 2 times 1ms */ bit = OHCI_NO_EDS / 2; while (bit) { x = bit; while (x & bit) { ohci_ed_t *ed_x; ohci_ed_t *ed_y; y = (x ^ bit) | (bit / 2); /* * the next QH has half the poll interval */ ed_x = sc->sc_intr_p_last[x]; ed_y = sc->sc_intr_p_last[y]; ed_x->next = NULL; ed_x->ed_next = ed_y->ed_self; x++; } bit >>= 1; } if (1) { ohci_ed_t *ed_int; ohci_ed_t *ed_isc; ed_int = sc->sc_intr_p_last[0]; ed_isc = sc->sc_isoc_p_last; /* the last (1ms) QH */ ed_int->next = ed_isc; ed_int->ed_next = ed_isc->ed_self; } usbd_get_page(&sc->sc_hw.hcca_pc, 0, &buf_res); sc->sc_hcca_p = buf_res.buffer; /* * Fill HCCA interrupt table. The bit reversal is to get * the tree set up properly to spread the interrupts. */ for (i = 0; i != OHCI_NO_INTRS; i++) { sc->sc_hcca_p->hcca_interrupt_table[i] = sc->sc_intr_p_last[i | (OHCI_NO_EDS / 2)]->ed_self; } /* flush all cache into memory */ usb_bus_mem_flush_all(&sc->sc_bus, &ohci_iterate_hw_softc); /* set up the bus struct */ sc->sc_bus.methods = &ohci_bus_methods; usb_callout_init_mtx(&sc->sc_tmo_rhsc, &sc->sc_bus.bus_mtx, 0); #ifdef USB_DEBUG if (ohcidebug > 15) { for (i = 0; i != OHCI_NO_EDS; i++) { printf("ed#%d ", i); ohci_dump_ed(sc->sc_intr_p_last[i]); } printf("iso "); ohci_dump_ed(sc->sc_isoc_p_last); } #endif sc->sc_bus.usbrev = USB_REV_1_0; if (ohci_controller_init(sc, 0) != 0) return (USB_ERR_INVAL); /* catch any lost interrupts */ ohci_do_poll(&sc->sc_bus); return (USB_ERR_NORMAL_COMPLETION); } /* * shut down the controller when the system is going down */ void ohci_detach(struct ohci_softc *sc) { USB_BUS_LOCK(&sc->sc_bus); usb_callout_stop(&sc->sc_tmo_rhsc); OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET); USB_BUS_UNLOCK(&sc->sc_bus); /* XXX let stray task complete */ usb_pause_mtx(NULL, hz / 20); usb_callout_drain(&sc->sc_tmo_rhsc); } static void ohci_suspend(ohci_softc_t *sc) { DPRINTF("\n"); #ifdef USB_DEBUG if (ohcidebug > 2) ohci_dumpregs(sc); #endif /* reset HC and leave it suspended */ ohci_controller_init(sc, 1); } static void ohci_resume(ohci_softc_t *sc) { DPRINTF("\n"); #ifdef USB_DEBUG if (ohcidebug > 2) ohci_dumpregs(sc); #endif /* some broken BIOSes never initialize the Controller chip */ ohci_controller_init(sc, 0); /* catch any lost interrupts */ ohci_do_poll(&sc->sc_bus); } #ifdef USB_DEBUG static void ohci_dumpregs(ohci_softc_t *sc) { struct ohci_hcca *hcca; DPRINTF("ohci_dumpregs: rev=0x%08x control=0x%08x command=0x%08x\n", OREAD4(sc, OHCI_REVISION), OREAD4(sc, OHCI_CONTROL), OREAD4(sc, OHCI_COMMAND_STATUS)); DPRINTF(" intrstat=0x%08x intre=0x%08x intrd=0x%08x\n", OREAD4(sc, OHCI_INTERRUPT_STATUS), OREAD4(sc, OHCI_INTERRUPT_ENABLE), OREAD4(sc, OHCI_INTERRUPT_DISABLE)); DPRINTF(" hcca=0x%08x percur=0x%08x ctrlhd=0x%08x\n", OREAD4(sc, OHCI_HCCA), OREAD4(sc, OHCI_PERIOD_CURRENT_ED), OREAD4(sc, OHCI_CONTROL_HEAD_ED)); DPRINTF(" ctrlcur=0x%08x bulkhd=0x%08x bulkcur=0x%08x\n", OREAD4(sc, OHCI_CONTROL_CURRENT_ED), OREAD4(sc, OHCI_BULK_HEAD_ED), OREAD4(sc, OHCI_BULK_CURRENT_ED)); DPRINTF(" done=0x%08x fmival=0x%08x fmrem=0x%08x\n", OREAD4(sc, OHCI_DONE_HEAD), OREAD4(sc, OHCI_FM_INTERVAL), OREAD4(sc, OHCI_FM_REMAINING)); DPRINTF(" fmnum=0x%08x perst=0x%08x lsthrs=0x%08x\n", OREAD4(sc, OHCI_FM_NUMBER), OREAD4(sc, OHCI_PERIODIC_START), OREAD4(sc, OHCI_LS_THRESHOLD)); DPRINTF(" desca=0x%08x descb=0x%08x stat=0x%08x\n", OREAD4(sc, OHCI_RH_DESCRIPTOR_A), OREAD4(sc, OHCI_RH_DESCRIPTOR_B), OREAD4(sc, OHCI_RH_STATUS)); DPRINTF(" port1=0x%08x port2=0x%08x\n", OREAD4(sc, OHCI_RH_PORT_STATUS(1)), OREAD4(sc, OHCI_RH_PORT_STATUS(2))); hcca = ohci_get_hcca(sc); DPRINTF(" HCCA: frame_number=0x%04x done_head=0x%08x\n", le32toh(hcca->hcca_frame_number), le32toh(hcca->hcca_done_head)); } static void ohci_dump_tds(ohci_td_t *std) { for (; std; std = std->obj_next) { if (ohci_dump_td(std)) { break; } } } static uint8_t ohci_dump_td(ohci_td_t *std) { uint32_t td_flags; uint8_t temp; usb_pc_cpu_invalidate(std->page_cache); td_flags = le32toh(std->td_flags); temp = (std->td_next == 0); printf("TD(%p) at 0x%08x: %s%s%s%s%s delay=%d ec=%d " "cc=%d\ncbp=0x%08x next=0x%08x be=0x%08x\n", std, le32toh(std->td_self), (td_flags & OHCI_TD_R) ? "-R" : "", (td_flags & OHCI_TD_OUT) ? "-OUT" : "", (td_flags & OHCI_TD_IN) ? "-IN" : "", ((td_flags & OHCI_TD_TOGGLE_MASK) == OHCI_TD_TOGGLE_1) ? "-TOG1" : "", ((td_flags & OHCI_TD_TOGGLE_MASK) == OHCI_TD_TOGGLE_0) ? "-TOG0" : "", OHCI_TD_GET_DI(td_flags), OHCI_TD_GET_EC(td_flags), OHCI_TD_GET_CC(td_flags), le32toh(std->td_cbp), le32toh(std->td_next), le32toh(std->td_be)); return (temp); } static uint8_t ohci_dump_itd(ohci_itd_t *sitd) { uint32_t itd_flags; uint16_t i; uint8_t temp; usb_pc_cpu_invalidate(sitd->page_cache); itd_flags = le32toh(sitd->itd_flags); temp = (sitd->itd_next == 0); printf("ITD(%p) at 0x%08x: sf=%d di=%d fc=%d cc=%d\n" "bp0=0x%08x next=0x%08x be=0x%08x\n", sitd, le32toh(sitd->itd_self), OHCI_ITD_GET_SF(itd_flags), OHCI_ITD_GET_DI(itd_flags), OHCI_ITD_GET_FC(itd_flags), OHCI_ITD_GET_CC(itd_flags), le32toh(sitd->itd_bp0), le32toh(sitd->itd_next), le32toh(sitd->itd_be)); for (i = 0; i < OHCI_ITD_NOFFSET; i++) { printf("offs[%d]=0x%04x ", i, (uint32_t)le16toh(sitd->itd_offset[i])); } printf("\n"); return (temp); } static void ohci_dump_itds(ohci_itd_t *sitd) { for (; sitd; sitd = sitd->obj_next) { if (ohci_dump_itd(sitd)) { break; } } } static void ohci_dump_ed(ohci_ed_t *sed) { uint32_t ed_flags; uint32_t ed_headp; usb_pc_cpu_invalidate(sed->page_cache); ed_flags = le32toh(sed->ed_flags); ed_headp = le32toh(sed->ed_headp); printf("ED(%p) at 0x%08x: addr=%d endpt=%d maxp=%d flags=%s%s%s%s%s\n" "tailp=0x%08x headflags=%s%s headp=0x%08x nexted=0x%08x\n", sed, le32toh(sed->ed_self), OHCI_ED_GET_FA(ed_flags), OHCI_ED_GET_EN(ed_flags), OHCI_ED_GET_MAXP(ed_flags), (ed_flags & OHCI_ED_DIR_OUT) ? "-OUT" : "", (ed_flags & OHCI_ED_DIR_IN) ? "-IN" : "", (ed_flags & OHCI_ED_SPEED) ? "-LOWSPEED" : "", (ed_flags & OHCI_ED_SKIP) ? "-SKIP" : "", (ed_flags & OHCI_ED_FORMAT_ISO) ? "-ISO" : "", le32toh(sed->ed_tailp), (ed_headp & OHCI_HALTED) ? "-HALTED" : "", (ed_headp & OHCI_TOGGLECARRY) ? "-CARRY" : "", le32toh(sed->ed_headp), le32toh(sed->ed_next)); } #endif static void ohci_transfer_intr_enqueue(struct usb_xfer *xfer) { /* check for early completion */ if (ohci_check_transfer(xfer)) { return; } /* put transfer on interrupt queue */ usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer); /* start timeout, if any */ if (xfer->timeout != 0) { usbd_transfer_timeout_ms(xfer, &ohci_timeout, xfer->timeout); } } #define OHCI_APPEND_QH(sed,last) (last) = _ohci_append_qh(sed,last) static ohci_ed_t * _ohci_append_qh(ohci_ed_t *sed, ohci_ed_t *last) { DPRINTFN(11, "%p to %p\n", sed, last); if (sed->prev != NULL) { /* should not happen */ DPRINTFN(0, "ED already linked!\n"); return (last); } /* (sc->sc_bus.bus_mtx) must be locked */ sed->next = last->next; sed->ed_next = last->ed_next; sed->ed_tailp = 0; sed->prev = last; usb_pc_cpu_flush(sed->page_cache); /* * the last->next->prev is never followed: sed->next->prev = sed; */ last->next = sed; last->ed_next = sed->ed_self; usb_pc_cpu_flush(last->page_cache); return (sed); } #define OHCI_REMOVE_QH(sed,last) (last) = _ohci_remove_qh(sed,last) static ohci_ed_t * _ohci_remove_qh(ohci_ed_t *sed, ohci_ed_t *last) { DPRINTFN(11, "%p from %p\n", sed, last); /* (sc->sc_bus.bus_mtx) must be locked */ /* only remove if not removed from a queue */ if (sed->prev) { sed->prev->next = sed->next; sed->prev->ed_next = sed->ed_next; usb_pc_cpu_flush(sed->prev->page_cache); if (sed->next) { sed->next->prev = sed->prev; usb_pc_cpu_flush(sed->next->page_cache); } last = ((last == sed) ? sed->prev : last); sed->prev = 0; usb_pc_cpu_flush(sed->page_cache); } return (last); } static void ohci_isoc_done(struct usb_xfer *xfer) { uint8_t nframes; uint32_t *plen = xfer->frlengths; volatile uint16_t *olen; uint16_t len = 0; ohci_itd_t *td = xfer->td_transfer_first; while (1) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } #ifdef USB_DEBUG if (ohcidebug > 5) { DPRINTF("isoc TD\n"); ohci_dump_itd(td); } #endif usb_pc_cpu_invalidate(td->page_cache); nframes = td->frames; olen = &td->itd_offset[0]; if (nframes > 8) { nframes = 8; } while (nframes--) { len = le16toh(*olen); if ((len >> 12) == OHCI_CC_NOT_ACCESSED) { len = 0; } else { len &= ((1 << 12) - 1); } if (len > *plen) { len = 0;/* invalid length */ } *plen = len; plen++; olen++; } if (((void *)td) == xfer->td_transfer_last) { break; } td = td->obj_next; } xfer->aframes = xfer->nframes; ohci_device_done(xfer, USB_ERR_NORMAL_COMPLETION); } #ifdef USB_DEBUG static const char *const ohci_cc_strs[] = { "NO_ERROR", "CRC", "BIT_STUFFING", "DATA_TOGGLE_MISMATCH", "STALL", "DEVICE_NOT_RESPONDING", "PID_CHECK_FAILURE", "UNEXPECTED_PID", "DATA_OVERRUN", "DATA_UNDERRUN", "BUFFER_OVERRUN", "BUFFER_UNDERRUN", "reserved", "reserved", "NOT_ACCESSED", "NOT_ACCESSED" }; #endif static usb_error_t ohci_non_isoc_done_sub(struct usb_xfer *xfer) { ohci_td_t *td; ohci_td_t *td_alt_next; uint32_t temp; uint32_t phy_start; uint32_t phy_end; uint32_t td_flags; uint16_t cc; td = xfer->td_transfer_cache; td_alt_next = td->alt_next; td_flags = 0; if (xfer->aframes != xfer->nframes) { usbd_xfer_set_frame_len(xfer, xfer->aframes, 0); } while (1) { usb_pc_cpu_invalidate(td->page_cache); phy_start = le32toh(td->td_cbp); td_flags = le32toh(td->td_flags); cc = OHCI_TD_GET_CC(td_flags); if (phy_start) { /* * short transfer - compute the number of remaining * bytes in the hardware buffer: */ phy_end = le32toh(td->td_be); temp = (OHCI_PAGE(phy_start ^ phy_end) ? (OHCI_PAGE_SIZE + 1) : 0x0001); temp += OHCI_PAGE_OFFSET(phy_end); temp -= OHCI_PAGE_OFFSET(phy_start); if (temp > td->len) { /* guard against corruption */ cc = OHCI_CC_STALL; } else if (xfer->aframes != xfer->nframes) { /* * Sum up total transfer length * in "frlengths[]": */ xfer->frlengths[xfer->aframes] += td->len - temp; } } else { if (xfer->aframes != xfer->nframes) { /* transfer was complete */ xfer->frlengths[xfer->aframes] += td->len; } } /* Check for last transfer */ if (((void *)td) == xfer->td_transfer_last) { td = NULL; break; } /* Check transfer status */ if (cc) { /* the transfer is finished */ td = NULL; break; } /* Check for short transfer */ if (phy_start) { if (xfer->flags_int.short_frames_ok) { /* follow alt next */ td = td->alt_next; } else { /* the transfer is finished */ td = NULL; } break; } td = td->obj_next; if (td->alt_next != td_alt_next) { /* this USB frame is complete */ break; } } /* update transfer cache */ xfer->td_transfer_cache = td; DPRINTFN(16, "error cc=%d (%s)\n", cc, ohci_cc_strs[cc]); return ((cc == 0) ? USB_ERR_NORMAL_COMPLETION : (cc == OHCI_CC_STALL) ? USB_ERR_STALLED : USB_ERR_IOERROR); } static void ohci_non_isoc_done(struct usb_xfer *xfer) { usb_error_t err = 0; DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", xfer, xfer->endpoint); #ifdef USB_DEBUG if (ohcidebug > 10) { ohci_dump_tds(xfer->td_transfer_first); } #endif /* reset scanner */ xfer->td_transfer_cache = xfer->td_transfer_first; if (xfer->flags_int.control_xfr) { if (xfer->flags_int.control_hdr) { err = ohci_non_isoc_done_sub(xfer); } xfer->aframes = 1; if (xfer->td_transfer_cache == NULL) { goto done; } } while (xfer->aframes != xfer->nframes) { err = ohci_non_isoc_done_sub(xfer); xfer->aframes++; if (xfer->td_transfer_cache == NULL) { goto done; } } if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) { err = ohci_non_isoc_done_sub(xfer); } done: ohci_device_done(xfer, err); } /*------------------------------------------------------------------------* * ohci_check_transfer_sub *------------------------------------------------------------------------*/ static void ohci_check_transfer_sub(struct usb_xfer *xfer) { ohci_td_t *td; ohci_ed_t *ed; uint32_t phy_start; uint32_t td_flags; uint32_t td_next; uint16_t cc; td = xfer->td_transfer_cache; while (1) { usb_pc_cpu_invalidate(td->page_cache); phy_start = le32toh(td->td_cbp); td_flags = le32toh(td->td_flags); td_next = le32toh(td->td_next); /* Check for last transfer */ if (((void *)td) == xfer->td_transfer_last) { /* the transfer is finished */ td = NULL; break; } /* Check transfer status */ cc = OHCI_TD_GET_CC(td_flags); if (cc) { /* the transfer is finished */ td = NULL; break; } /* * Check if we reached the last packet * or if there is a short packet: */ if (((td_next & (~0xF)) == OHCI_TD_NEXT_END) || phy_start) { /* follow alt next */ td = td->alt_next; break; } td = td->obj_next; } /* update transfer cache */ xfer->td_transfer_cache = td; if (td) { ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; ed->ed_headp = td->td_self; usb_pc_cpu_flush(ed->page_cache); DPRINTFN(13, "xfer=%p following alt next\n", xfer); /* * Make sure that the OHCI re-scans the schedule by * writing the BLF and CLF bits: */ if (xfer->xroot->udev->flags.self_suspended) { /* nothing to do */ } else if (xfer->endpoint->methods == &ohci_device_bulk_methods) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF); } else if (xfer->endpoint->methods == &ohci_device_ctrl_methods) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF); } } } /*------------------------------------------------------------------------* * ohci_check_transfer * * Return values: * 0: USB transfer is not finished * Else: USB transfer is finished *------------------------------------------------------------------------*/ static uint8_t ohci_check_transfer(struct usb_xfer *xfer) { ohci_ed_t *ed; uint32_t ed_headp; uint32_t ed_tailp; DPRINTFN(13, "xfer=%p checking transfer\n", xfer); ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; usb_pc_cpu_invalidate(ed->page_cache); ed_headp = le32toh(ed->ed_headp); ed_tailp = le32toh(ed->ed_tailp); if ((ed_headp & OHCI_HALTED) || (((ed_headp ^ ed_tailp) & (~0xF)) == 0)) { if (xfer->endpoint->methods == &ohci_device_isoc_methods) { /* isochronous transfer */ ohci_isoc_done(xfer); } else { if (xfer->flags_int.short_frames_ok) { ohci_check_transfer_sub(xfer); if (xfer->td_transfer_cache) { /* not finished yet */ return (0); } } /* store data-toggle */ if (ed_headp & OHCI_TOGGLECARRY) { xfer->endpoint->toggle_next = 1; } else { xfer->endpoint->toggle_next = 0; } /* non-isochronous transfer */ ohci_non_isoc_done(xfer); } return (1); } DPRINTFN(13, "xfer=%p is still active\n", xfer); return (0); } static void ohci_rhsc_enable(ohci_softc_t *sc) { DPRINTFN(5, "\n"); USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); sc->sc_eintrs |= OHCI_RHSC; OWRITE4(sc, OHCI_INTERRUPT_ENABLE, OHCI_RHSC); /* acknowledge any RHSC interrupt */ OWRITE4(sc, OHCI_INTERRUPT_STATUS, OHCI_RHSC); ohci_root_intr(sc); } static void ohci_interrupt_poll(ohci_softc_t *sc) { struct usb_xfer *xfer; repeat: TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { /* * check if transfer is transferred */ if (ohci_check_transfer(xfer)) { /* queue has been modified */ goto repeat; } } } /*------------------------------------------------------------------------* * ohci_interrupt - OHCI interrupt handler * * NOTE: Do not access "sc->sc_bus.bdev" inside the interrupt handler, * hence the interrupt handler will be setup before "sc->sc_bus.bdev" * is present ! *------------------------------------------------------------------------*/ void ohci_interrupt(ohci_softc_t *sc) { struct ohci_hcca *hcca; uint32_t status; uint32_t done; USB_BUS_LOCK(&sc->sc_bus); hcca = ohci_get_hcca(sc); DPRINTFN(16, "real interrupt\n"); #ifdef USB_DEBUG if (ohcidebug > 15) { ohci_dumpregs(sc); } #endif done = le32toh(hcca->hcca_done_head); /* * The LSb of done is used to inform the HC Driver that an interrupt * condition exists for both the Done list and for another event * recorded in HcInterruptStatus. On an interrupt from the HC, the * HC Driver checks the HccaDoneHead Value. If this value is 0, then * the interrupt was caused by other than the HccaDoneHead update * and the HcInterruptStatus register needs to be accessed to * determine that exact interrupt cause. If HccaDoneHead is nonzero, * then a Done list update interrupt is indicated and if the LSb of * done is nonzero, then an additional interrupt event is indicated * and HcInterruptStatus should be checked to determine its cause. */ if (done != 0) { status = 0; if (done & ~OHCI_DONE_INTRS) { status |= OHCI_WDH; } if (done & OHCI_DONE_INTRS) { status |= OREAD4(sc, OHCI_INTERRUPT_STATUS); } hcca->hcca_done_head = 0; usb_pc_cpu_flush(&sc->sc_hw.hcca_pc); } else { status = OREAD4(sc, OHCI_INTERRUPT_STATUS) & ~OHCI_WDH; } status &= ~OHCI_MIE; if (status == 0) { /* * nothing to be done (PCI shared * interrupt) */ goto done; } OWRITE4(sc, OHCI_INTERRUPT_STATUS, status); /* Acknowledge */ status &= sc->sc_eintrs; if (status == 0) { goto done; } if (status & (OHCI_SO | OHCI_RD | OHCI_UE | OHCI_RHSC)) { #if 0 if (status & OHCI_SO) { /* XXX do what */ } #endif if (status & OHCI_RD) { printf("%s: resume detect\n", __FUNCTION__); /* XXX process resume detect */ } if (status & OHCI_UE) { printf("%s: unrecoverable error, " "controller halted\n", __FUNCTION__); OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET); /* XXX what else */ } if (status & OHCI_RHSC) { /* * Disable RHSC interrupt for now, because it will be * on until the port has been reset. */ sc->sc_eintrs &= ~OHCI_RHSC; OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_RHSC); ohci_root_intr(sc); /* do not allow RHSC interrupts > 1 per second */ usb_callout_reset(&sc->sc_tmo_rhsc, hz, (void *)&ohci_rhsc_enable, sc); } } status &= ~(OHCI_RHSC | OHCI_WDH | OHCI_SO); if (status != 0) { /* Block unprocessed interrupts. XXX */ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, status); sc->sc_eintrs &= ~status; printf("%s: blocking intrs 0x%x\n", __FUNCTION__, status); } /* poll all the USB transfers */ ohci_interrupt_poll(sc); done: USB_BUS_UNLOCK(&sc->sc_bus); } /* * called when a request does not complete */ static void ohci_timeout(void *arg) { struct usb_xfer *xfer = arg; DPRINTF("xfer=%p\n", xfer); USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); /* transfer is transferred */ ohci_device_done(xfer, USB_ERR_TIMEOUT); } static void ohci_do_poll(struct usb_bus *bus) { struct ohci_softc *sc = OHCI_BUS2SC(bus); USB_BUS_LOCK(&sc->sc_bus); ohci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } static void ohci_setup_standard_chain_sub(struct ohci_std_temp *temp) { struct usb_page_search buf_res; ohci_td_t *td; ohci_td_t *td_next; ohci_td_t *td_alt_next; uint32_t buf_offset; uint32_t average; uint32_t len_old; uint8_t shortpkt_old; uint8_t precompute; td_alt_next = NULL; buf_offset = 0; shortpkt_old = temp->shortpkt; len_old = temp->len; precompute = 1; /* software is used to detect short incoming transfers */ if ((temp->td_flags & htole32(OHCI_TD_DP_MASK)) == htole32(OHCI_TD_IN)) { temp->td_flags |= htole32(OHCI_TD_R); } else { temp->td_flags &= ~htole32(OHCI_TD_R); } restart: td = temp->td; td_next = temp->td_next; while (1) { if (temp->len == 0) { if (temp->shortpkt) { break; } /* send a Zero Length Packet, ZLP, last */ temp->shortpkt = 1; average = 0; } else { average = temp->average; if (temp->len < average) { if (temp->len % temp->max_frame_size) { temp->shortpkt = 1; } average = temp->len; } } if (td_next == NULL) { panic("%s: out of OHCI transfer descriptors!", __FUNCTION__); } /* get next TD */ td = td_next; td_next = td->obj_next; /* check if we are pre-computing */ if (precompute) { /* update remaining length */ temp->len -= average; continue; } /* fill out current TD */ td->td_flags = temp->td_flags; /* the next TD uses TOGGLE_CARRY */ temp->td_flags &= ~htole32(OHCI_TD_TOGGLE_MASK); if (average == 0) { /* * The buffer start and end phys addresses should be * 0x0 for a zero length packet. */ td->td_cbp = 0; td->td_be = 0; td->len = 0; } else { usbd_get_page(temp->pc, buf_offset, &buf_res); td->td_cbp = htole32(buf_res.physaddr); buf_offset += (average - 1); usbd_get_page(temp->pc, buf_offset, &buf_res); td->td_be = htole32(buf_res.physaddr); buf_offset++; td->len = average; /* update remaining length */ temp->len -= average; } if ((td_next == td_alt_next) && temp->setup_alt_next) { /* we need to receive these frames one by one ! */ td->td_flags &= htole32(~OHCI_TD_INTR_MASK); td->td_flags |= htole32(OHCI_TD_SET_DI(1)); td->td_next = htole32(OHCI_TD_NEXT_END); } else { if (td_next) { /* link the current TD with the next one */ td->td_next = td_next->td_self; } } td->alt_next = td_alt_next; usb_pc_cpu_flush(td->page_cache); } if (precompute) { precompute = 0; /* setup alt next pointer, if any */ if (temp->last_frame) { /* no alternate next */ td_alt_next = NULL; } else { /* we use this field internally */ td_alt_next = td_next; } /* restore */ temp->shortpkt = shortpkt_old; temp->len = len_old; goto restart; } temp->td = td; temp->td_next = td_next; } static void ohci_setup_standard_chain(struct usb_xfer *xfer, ohci_ed_t **ed_last) { struct ohci_std_temp temp; const struct usb_pipe_methods *methods; ohci_ed_t *ed; ohci_td_t *td; uint32_t ed_flags; uint32_t x; DPRINTFN(9, "addr=%d endpt=%d sumlen=%d speed=%d\n", xfer->address, UE_GET_ADDR(xfer->endpointno), xfer->sumlen, usbd_get_speed(xfer->xroot->udev)); temp.average = xfer->max_hc_frame_size; temp.max_frame_size = xfer->max_frame_size; /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; xfer->td_transfer_first = td; xfer->td_transfer_cache = td; temp.td = NULL; temp.td_next = td; temp.last_frame = 0; temp.setup_alt_next = xfer->flags_int.short_frames_ok; methods = xfer->endpoint->methods; /* check if we should prepend a setup message */ if (xfer->flags_int.control_xfr) { if (xfer->flags_int.control_hdr) { temp.td_flags = htole32(OHCI_TD_SETUP | OHCI_TD_NOCC | OHCI_TD_TOGGLE_0 | OHCI_TD_NOINTR); temp.len = xfer->frlengths[0]; temp.pc = xfer->frbuffers + 0; temp.shortpkt = temp.len ? 1 : 0; /* check for last frame */ if (xfer->nframes == 1) { /* no STATUS stage yet, SETUP is last */ if (xfer->flags_int.control_act) { temp.last_frame = 1; temp.setup_alt_next = 0; } } ohci_setup_standard_chain_sub(&temp); /* * XXX assume that the setup message is * contained within one USB packet: */ xfer->endpoint->toggle_next = 1; } x = 1; } else { x = 0; } temp.td_flags = htole32(OHCI_TD_NOCC | OHCI_TD_NOINTR); /* set data toggle */ if (xfer->endpoint->toggle_next) { temp.td_flags |= htole32(OHCI_TD_TOGGLE_1); } else { temp.td_flags |= htole32(OHCI_TD_TOGGLE_0); } /* set endpoint direction */ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) { temp.td_flags |= htole32(OHCI_TD_IN); } else { temp.td_flags |= htole32(OHCI_TD_OUT); } while (x != xfer->nframes) { /* DATA0 / DATA1 message */ temp.len = xfer->frlengths[x]; temp.pc = xfer->frbuffers + x; x++; if (x == xfer->nframes) { if (xfer->flags_int.control_xfr) { /* no STATUS stage yet, DATA is last */ if (xfer->flags_int.control_act) { temp.last_frame = 1; temp.setup_alt_next = 0; } } else { temp.last_frame = 1; temp.setup_alt_next = 0; } } if (temp.len == 0) { /* make sure that we send an USB packet */ temp.shortpkt = 0; } else { /* regular data transfer */ temp.shortpkt = (xfer->flags.force_short_xfer) ? 0 : 1; } ohci_setup_standard_chain_sub(&temp); } /* check if we should append a status stage */ if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) { /* * Send a DATA1 message and invert the current endpoint * direction. */ /* set endpoint direction and data toggle */ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) { temp.td_flags = htole32(OHCI_TD_OUT | OHCI_TD_NOCC | OHCI_TD_TOGGLE_1 | OHCI_TD_SET_DI(1)); } else { temp.td_flags = htole32(OHCI_TD_IN | OHCI_TD_NOCC | OHCI_TD_TOGGLE_1 | OHCI_TD_SET_DI(1)); } temp.len = 0; temp.pc = NULL; temp.shortpkt = 0; temp.last_frame = 1; temp.setup_alt_next = 0; ohci_setup_standard_chain_sub(&temp); } td = temp.td; /* Ensure that last TD is terminating: */ td->td_next = htole32(OHCI_TD_NEXT_END); td->td_flags &= ~htole32(OHCI_TD_INTR_MASK); td->td_flags |= htole32(OHCI_TD_SET_DI(1)); usb_pc_cpu_flush(td->page_cache); /* must have at least one frame! */ xfer->td_transfer_last = td; #ifdef USB_DEBUG if (ohcidebug > 8) { DPRINTF("nexttog=%d; data before transfer:\n", xfer->endpoint->toggle_next); ohci_dump_tds(xfer->td_transfer_first); } #endif ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; ed_flags = (OHCI_ED_SET_FA(xfer->address) | OHCI_ED_SET_EN(UE_GET_ADDR(xfer->endpointno)) | OHCI_ED_SET_MAXP(xfer->max_frame_size)); ed_flags |= (OHCI_ED_FORMAT_GEN | OHCI_ED_DIR_TD); if (xfer->xroot->udev->speed == USB_SPEED_LOW) { ed_flags |= OHCI_ED_SPEED; } ed->ed_flags = htole32(ed_flags); td = xfer->td_transfer_first; ed->ed_headp = td->td_self; if (xfer->xroot->udev->flags.self_suspended == 0) { /* the append function will flush the endpoint descriptor */ OHCI_APPEND_QH(ed, *ed_last); if (methods == &ohci_device_bulk_methods) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF); } if (methods == &ohci_device_ctrl_methods) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF); } } else { usb_pc_cpu_flush(ed->page_cache); } } static void ohci_root_intr(ohci_softc_t *sc) { uint32_t hstatus; uint16_t i; uint16_t m; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* clear any old interrupt data */ memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata)); hstatus = OREAD4(sc, OHCI_RH_STATUS); DPRINTF("sc=%p hstatus=0x%08x\n", sc, hstatus); /* set bits */ m = (sc->sc_noport + 1); if (m > (8 * sizeof(sc->sc_hub_idata))) { m = (8 * sizeof(sc->sc_hub_idata)); } for (i = 1; i < m; i++) { /* pick out CHANGE bits from the status register */ if (OREAD4(sc, OHCI_RH_PORT_STATUS(i)) >> 16) { sc->sc_hub_idata[i / 8] |= 1 << (i % 8); DPRINTF("port %d changed\n", i); } } uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata, sizeof(sc->sc_hub_idata)); } /* NOTE: "done" can be run two times in a row, * from close and from interrupt */ static void ohci_device_done(struct usb_xfer *xfer, usb_error_t error) { const struct usb_pipe_methods *methods = xfer->endpoint->methods; ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); ohci_ed_t *ed; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n", xfer, xfer->endpoint, error); ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; if (ed) { usb_pc_cpu_invalidate(ed->page_cache); } if (methods == &ohci_device_bulk_methods) { OHCI_REMOVE_QH(ed, sc->sc_bulk_p_last); } if (methods == &ohci_device_ctrl_methods) { OHCI_REMOVE_QH(ed, sc->sc_ctrl_p_last); } if (methods == &ohci_device_intr_methods) { OHCI_REMOVE_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]); } if (methods == &ohci_device_isoc_methods) { OHCI_REMOVE_QH(ed, sc->sc_isoc_p_last); } xfer->td_transfer_first = NULL; xfer->td_transfer_last = NULL; /* dequeue transfer and start next transfer */ usbd_transfer_done(xfer, error); } /*------------------------------------------------------------------------* * ohci bulk support *------------------------------------------------------------------------*/ static void ohci_device_bulk_open(struct usb_xfer *xfer) { return; } static void ohci_device_bulk_close(struct usb_xfer *xfer) { ohci_device_done(xfer, USB_ERR_CANCELLED); } static void ohci_device_bulk_enter(struct usb_xfer *xfer) { return; } static void ohci_device_bulk_start(struct usb_xfer *xfer) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ohci_setup_standard_chain(xfer, &sc->sc_bulk_p_last); /* put transfer on interrupt queue */ ohci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ohci_device_bulk_methods = { .open = ohci_device_bulk_open, .close = ohci_device_bulk_close, .enter = ohci_device_bulk_enter, .start = ohci_device_bulk_start, }; /*------------------------------------------------------------------------* * ohci control support *------------------------------------------------------------------------*/ static void ohci_device_ctrl_open(struct usb_xfer *xfer) { return; } static void ohci_device_ctrl_close(struct usb_xfer *xfer) { ohci_device_done(xfer, USB_ERR_CANCELLED); } static void ohci_device_ctrl_enter(struct usb_xfer *xfer) { return; } static void ohci_device_ctrl_start(struct usb_xfer *xfer) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ohci_setup_standard_chain(xfer, &sc->sc_ctrl_p_last); /* put transfer on interrupt queue */ ohci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ohci_device_ctrl_methods = { .open = ohci_device_ctrl_open, .close = ohci_device_ctrl_close, .enter = ohci_device_ctrl_enter, .start = ohci_device_ctrl_start, }; /*------------------------------------------------------------------------* * ohci interrupt support *------------------------------------------------------------------------*/ static void ohci_device_intr_open(struct usb_xfer *xfer) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); uint16_t best; uint16_t bit; uint16_t x; best = 0; bit = OHCI_NO_EDS / 2; while (bit) { if (xfer->interval >= bit) { x = bit; best = bit; while (x & bit) { if (sc->sc_intr_stat[x] < sc->sc_intr_stat[best]) { best = x; } x++; } break; } bit >>= 1; } sc->sc_intr_stat[best]++; xfer->qh_pos = best; DPRINTFN(3, "best=%d interval=%d\n", best, xfer->interval); } static void ohci_device_intr_close(struct usb_xfer *xfer) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); sc->sc_intr_stat[xfer->qh_pos]--; ohci_device_done(xfer, USB_ERR_CANCELLED); } static void ohci_device_intr_enter(struct usb_xfer *xfer) { return; } static void ohci_device_intr_start(struct usb_xfer *xfer) { ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); /* setup TD's and QH */ ohci_setup_standard_chain(xfer, &sc->sc_intr_p_last[xfer->qh_pos]); /* put transfer on interrupt queue */ ohci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ohci_device_intr_methods = { .open = ohci_device_intr_open, .close = ohci_device_intr_close, .enter = ohci_device_intr_enter, .start = ohci_device_intr_start, }; /*------------------------------------------------------------------------* * ohci isochronous support *------------------------------------------------------------------------*/ static void ohci_device_isoc_open(struct usb_xfer *xfer) { return; } static void ohci_device_isoc_close(struct usb_xfer *xfer) { /**/ ohci_device_done(xfer, USB_ERR_CANCELLED); } static void ohci_device_isoc_enter(struct usb_xfer *xfer) { struct usb_page_search buf_res; ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus); struct ohci_hcca *hcca; uint32_t buf_offset; uint32_t nframes; uint32_t ed_flags; uint32_t *plen; uint16_t itd_offset[OHCI_ITD_NOFFSET]; uint16_t length; uint8_t ncur; ohci_itd_t *td; ohci_itd_t *td_last = NULL; ohci_ed_t *ed; hcca = ohci_get_hcca(sc); nframes = le32toh(hcca->hcca_frame_number); DPRINTFN(6, "xfer=%p isoc_next=%u nframes=%u hcca_fn=%u\n", xfer, xfer->endpoint->isoc_next, xfer->nframes, nframes); if ((xfer->endpoint->is_synced == 0) || (((nframes - xfer->endpoint->isoc_next) & 0xFFFF) < xfer->nframes) || (((xfer->endpoint->isoc_next - nframes) & 0xFFFF) >= 128)) { /* * If there is data underflow or the pipe queue is empty we * schedule the transfer a few frames ahead of the current * frame position. Else two isochronous transfers might * overlap. */ xfer->endpoint->isoc_next = (nframes + 3) & 0xFFFF; xfer->endpoint->is_synced = 1; DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next); } /* * compute how many milliseconds the insertion is ahead of the * current frame position: */ buf_offset = ((xfer->endpoint->isoc_next - nframes) & 0xFFFF); /* * pre-compute when the isochronous transfer will be finished: */ xfer->isoc_time_complete = (usb_isoc_time_expand(&sc->sc_bus, nframes) + buf_offset + xfer->nframes); /* get the real number of frames */ nframes = xfer->nframes; buf_offset = 0; plen = xfer->frlengths; /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; xfer->td_transfer_first = td; ncur = 0; length = 0; while (nframes--) { if (td == NULL) { panic("%s:%d: out of TD's\n", __FUNCTION__, __LINE__); } itd_offset[ncur] = length; buf_offset += *plen; length += *plen; plen++; ncur++; if ( /* check if the ITD is full */ (ncur == OHCI_ITD_NOFFSET) || /* check if we have put more than 4K into the ITD */ (length & 0xF000) || /* check if it is the last frame */ (nframes == 0)) { /* fill current ITD */ td->itd_flags = htole32( OHCI_ITD_NOCC | OHCI_ITD_SET_SF(xfer->endpoint->isoc_next) | OHCI_ITD_NOINTR | OHCI_ITD_SET_FC(ncur)); td->frames = ncur; xfer->endpoint->isoc_next += ncur; if (length == 0) { /* all zero */ td->itd_bp0 = 0; td->itd_be = ~0; while (ncur--) { td->itd_offset[ncur] = htole16(OHCI_ITD_MK_OFFS(0)); } } else { usbd_get_page(xfer->frbuffers, buf_offset - length, &buf_res); length = OHCI_PAGE_MASK(buf_res.physaddr); buf_res.physaddr = OHCI_PAGE(buf_res.physaddr); td->itd_bp0 = htole32(buf_res.physaddr); usbd_get_page(xfer->frbuffers, buf_offset - 1, &buf_res); td->itd_be = htole32(buf_res.physaddr); while (ncur--) { itd_offset[ncur] += length; itd_offset[ncur] = OHCI_ITD_MK_OFFS(itd_offset[ncur]); td->itd_offset[ncur] = htole16(itd_offset[ncur]); } } ncur = 0; length = 0; td_last = td; td = td->obj_next; if (td) { /* link the last TD with the next one */ td_last->itd_next = td->itd_self; } usb_pc_cpu_flush(td_last->page_cache); } } /* update the last TD */ td_last->itd_flags &= ~htole32(OHCI_ITD_NOINTR); td_last->itd_flags |= htole32(OHCI_ITD_SET_DI(0)); td_last->itd_next = 0; usb_pc_cpu_flush(td_last->page_cache); xfer->td_transfer_last = td_last; #ifdef USB_DEBUG if (ohcidebug > 8) { DPRINTF("data before transfer:\n"); ohci_dump_itds(xfer->td_transfer_first); } #endif ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) ed_flags = (OHCI_ED_DIR_IN | OHCI_ED_FORMAT_ISO); else ed_flags = (OHCI_ED_DIR_OUT | OHCI_ED_FORMAT_ISO); ed_flags |= (OHCI_ED_SET_FA(xfer->address) | OHCI_ED_SET_EN(UE_GET_ADDR(xfer->endpointno)) | OHCI_ED_SET_MAXP(xfer->max_frame_size)); if (xfer->xroot->udev->speed == USB_SPEED_LOW) { ed_flags |= OHCI_ED_SPEED; } ed->ed_flags = htole32(ed_flags); td = xfer->td_transfer_first; ed->ed_headp = td->itd_self; /* isochronous transfers are not affected by suspend / resume */ /* the append function will flush the endpoint descriptor */ OHCI_APPEND_QH(ed, sc->sc_isoc_p_last); } static void ohci_device_isoc_start(struct usb_xfer *xfer) { /* put transfer on interrupt queue */ ohci_transfer_intr_enqueue(xfer); } static const struct usb_pipe_methods ohci_device_isoc_methods = { .open = ohci_device_isoc_open, .close = ohci_device_isoc_close, .enter = ohci_device_isoc_enter, .start = ohci_device_isoc_start, }; /*------------------------------------------------------------------------* * ohci root control support *------------------------------------------------------------------------* * Simulate a hardware hub by handling all the necessary requests. *------------------------------------------------------------------------*/ static const struct usb_device_descriptor ohci_devd = { sizeof(struct usb_device_descriptor), UDESC_DEVICE, /* type */ {0x00, 0x01}, /* USB version */ UDCLASS_HUB, /* class */ UDSUBCLASS_HUB, /* subclass */ UDPROTO_FSHUB, /* protocol */ 64, /* max packet */ {0}, {0}, {0x00, 0x01}, /* device id */ 1, 2, 0, /* string indicies */ 1 /* # of configurations */ }; static const struct ohci_config_desc ohci_confd = { .confd = { .bLength = sizeof(struct usb_config_descriptor), .bDescriptorType = UDESC_CONFIG, .wTotalLength[0] = sizeof(ohci_confd), .bNumInterface = 1, .bConfigurationValue = 1, .iConfiguration = 0, .bmAttributes = UC_SELF_POWERED, .bMaxPower = 0, /* max power */ }, .ifcd = { .bLength = sizeof(struct usb_interface_descriptor), .bDescriptorType = UDESC_INTERFACE, .bNumEndpoints = 1, .bInterfaceClass = UICLASS_HUB, .bInterfaceSubClass = UISUBCLASS_HUB, .bInterfaceProtocol = 0, }, .endpd = { .bLength = sizeof(struct usb_endpoint_descriptor), .bDescriptorType = UDESC_ENDPOINT, .bEndpointAddress = UE_DIR_IN | OHCI_INTR_ENDPT, .bmAttributes = UE_INTERRUPT, .wMaxPacketSize[0] = 32,/* max packet (255 ports) */ .bInterval = 255, }, }; static const struct usb_hub_descriptor ohci_hubd = { .bDescLength = 0, /* dynamic length */ .bDescriptorType = UDESC_HUB, }; static usb_error_t ohci_roothub_exec(struct usb_device *udev, struct usb_device_request *req, const void **pptr, uint16_t *plength) { ohci_softc_t *sc = OHCI_BUS2SC(udev->bus); const void *ptr; const char *str_ptr; uint32_t port; uint32_t v; uint16_t len; uint16_t value; uint16_t index; uint8_t l; usb_error_t err; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* buffer reset */ ptr = (const void *)&sc->sc_hub_desc.temp; len = 0; err = 0; value = UGETW(req->wValue); index = UGETW(req->wIndex); DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x " "wValue=0x%04x wIndex=0x%04x\n", req->bmRequestType, req->bRequest, UGETW(req->wLength), value, index); #define C(x,y) ((x) | ((y) << 8)) switch (C(req->bRequest, req->bmRequestType)) { case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): /* * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops * for the integrated root hub. */ break; case C(UR_GET_CONFIG, UT_READ_DEVICE): len = 1; sc->sc_hub_desc.temp[0] = sc->sc_conf; break; case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): switch (value >> 8) { case UDESC_DEVICE: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(ohci_devd); ptr = (const void *)&ohci_devd; break; case UDESC_CONFIG: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(ohci_confd); ptr = (const void *)&ohci_confd; break; case UDESC_STRING: switch (value & 0xff) { case 0: /* Language table */ str_ptr = "\001"; break; case 1: /* Vendor */ str_ptr = sc->sc_vendor; break; case 2: /* Product */ str_ptr = "OHCI root HUB"; break; default: str_ptr = ""; break; } len = usb_make_str_desc( sc->sc_hub_desc.temp, sizeof(sc->sc_hub_desc.temp), str_ptr); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_GET_INTERFACE, UT_READ_INTERFACE): len = 1; sc->sc_hub_desc.temp[0] = 0; break; case C(UR_GET_STATUS, UT_READ_DEVICE): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED); break; case C(UR_GET_STATUS, UT_READ_INTERFACE): case C(UR_GET_STATUS, UT_READ_ENDPOINT): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, 0); break; case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): if (value >= OHCI_MAX_DEVICES) { err = USB_ERR_IOERROR; goto done; } sc->sc_addr = value; break; case C(UR_SET_CONFIG, UT_WRITE_DEVICE): if ((value != 0) && (value != 1)) { err = USB_ERR_IOERROR; goto done; } sc->sc_conf = value; break; case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_DEVICE): case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): err = USB_ERR_IOERROR; goto done; case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): break; case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): break; /* Hub requests */ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): DPRINTFN(9, "UR_CLEAR_PORT_FEATURE " "port=%d feature=%d\n", index, value); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = OHCI_RH_PORT_STATUS(index); switch (value) { case UHF_PORT_ENABLE: OWRITE4(sc, port, UPS_CURRENT_CONNECT_STATUS); break; case UHF_PORT_SUSPEND: OWRITE4(sc, port, UPS_OVERCURRENT_INDICATOR); break; case UHF_PORT_POWER: /* Yes, writing to the LOW_SPEED bit clears power. */ OWRITE4(sc, port, UPS_LOW_SPEED); break; case UHF_C_PORT_CONNECTION: OWRITE4(sc, port, UPS_C_CONNECT_STATUS << 16); break; case UHF_C_PORT_ENABLE: OWRITE4(sc, port, UPS_C_PORT_ENABLED << 16); break; case UHF_C_PORT_SUSPEND: OWRITE4(sc, port, UPS_C_SUSPEND << 16); break; case UHF_C_PORT_OVER_CURRENT: OWRITE4(sc, port, UPS_C_OVERCURRENT_INDICATOR << 16); break; case UHF_C_PORT_RESET: OWRITE4(sc, port, UPS_C_PORT_RESET << 16); break; default: err = USB_ERR_IOERROR; goto done; } switch (value) { case UHF_C_PORT_CONNECTION: case UHF_C_PORT_ENABLE: case UHF_C_PORT_SUSPEND: case UHF_C_PORT_OVER_CURRENT: case UHF_C_PORT_RESET: /* enable RHSC interrupt if condition is cleared. */ if ((OREAD4(sc, port) >> 16) == 0) ohci_rhsc_enable(sc); break; default: break; } break; case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } v = OREAD4(sc, OHCI_RH_DESCRIPTOR_A); sc->sc_hub_desc.hubd = ohci_hubd; sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport; USETW(sc->sc_hub_desc.hubd.wHubCharacteristics, (v & OHCI_NPS ? UHD_PWR_NO_SWITCH : v & OHCI_PSM ? UHD_PWR_GANGED : UHD_PWR_INDIVIDUAL) /* XXX overcurrent */ ); sc->sc_hub_desc.hubd.bPwrOn2PwrGood = OHCI_GET_POTPGT(v); v = OREAD4(sc, OHCI_RH_DESCRIPTOR_B); for (l = 0; l < sc->sc_noport; l++) { if (v & 1) { sc->sc_hub_desc.hubd.DeviceRemovable[l / 8] |= (1 << (l % 8)); } v >>= 1; } sc->sc_hub_desc.hubd.bDescLength = 8 + ((sc->sc_noport + 7) / 8); len = sc->sc_hub_desc.hubd.bDescLength; break; case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): len = 16; memset(sc->sc_hub_desc.temp, 0, 16); break; case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): DPRINTFN(9, "get port status i=%d\n", index); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } v = OREAD4(sc, OHCI_RH_PORT_STATUS(index)); DPRINTFN(9, "port status=0x%04x\n", v); v &= ~UPS_PORT_MODE_DEVICE; /* force host mode */ USETW(sc->sc_hub_desc.ps.wPortStatus, v); USETW(sc->sc_hub_desc.ps.wPortChange, v >> 16); len = sizeof(sc->sc_hub_desc.ps); break; case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): err = USB_ERR_IOERROR; goto done; case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = OHCI_RH_PORT_STATUS(index); switch (value) { case UHF_PORT_ENABLE: OWRITE4(sc, port, UPS_PORT_ENABLED); break; case UHF_PORT_SUSPEND: OWRITE4(sc, port, UPS_SUSPEND); break; case UHF_PORT_RESET: DPRINTFN(6, "reset port %d\n", index); OWRITE4(sc, port, UPS_RESET); for (v = 0;; v++) { if (v < 12) { usb_pause_mtx(&sc->sc_bus.bus_mtx, USB_MS_TO_TICKS(usb_port_root_reset_delay)); if ((OREAD4(sc, port) & UPS_RESET) == 0) { break; } } else { err = USB_ERR_TIMEOUT; goto done; } } DPRINTFN(9, "ohci port %d reset, status = 0x%04x\n", index, OREAD4(sc, port)); break; case UHF_PORT_POWER: DPRINTFN(3, "set port power %d\n", index); OWRITE4(sc, port, UPS_PORT_POWER); break; default: err = USB_ERR_IOERROR; goto done; } break; default: err = USB_ERR_IOERROR; goto done; } done: *plength = len; *pptr = ptr; return (err); } static void ohci_xfer_setup(struct usb_setup_params *parm) { struct usb_page_search page_info; struct usb_page_cache *pc; ohci_softc_t *sc; struct usb_xfer *xfer; void *last_obj; uint32_t ntd; uint32_t nitd; uint32_t nqh; uint32_t n; sc = OHCI_BUS2SC(parm->udev->bus); xfer = parm->curr_xfer; parm->hc_max_packet_size = 0x500; parm->hc_max_packet_count = 1; parm->hc_max_frame_size = OHCI_PAGE_SIZE; /* * calculate ntd and nqh */ if (parm->methods == &ohci_device_ctrl_methods) { xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nitd = 0; ntd = ((2 * xfer->nframes) + 1 /* STATUS */ + (xfer->max_data_length / xfer->max_hc_frame_size)); nqh = 1; } else if (parm->methods == &ohci_device_bulk_methods) { xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nitd = 0; ntd = ((2 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); nqh = 1; } else if (parm->methods == &ohci_device_intr_methods) { xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nitd = 0; ntd = ((2 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); nqh = 1; } else if (parm->methods == &ohci_device_isoc_methods) { xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); nitd = ((xfer->max_data_length / OHCI_PAGE_SIZE) + - ((xfer->nframes + OHCI_ITD_NOFFSET - 1) / OHCI_ITD_NOFFSET) + + howmany(xfer->nframes, OHCI_ITD_NOFFSET) + 1 /* EXTRA */ ); ntd = 0; nqh = 1; } else { usbd_transfer_setup_sub(parm); nitd = 0; ntd = 0; nqh = 0; } alloc_dma_set: if (parm->err) { return; } last_obj = NULL; if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ohci_td_t), OHCI_TD_ALIGN, ntd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != ntd; n++) { ohci_td_t *td; usbd_get_page(pc + n, 0, &page_info); td = page_info.buffer; /* init TD */ td->td_self = htole32(page_info.physaddr); td->obj_next = last_obj; td->page_cache = pc + n; last_obj = td; usb_pc_cpu_flush(pc + n); } } if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ohci_itd_t), OHCI_ITD_ALIGN, nitd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nitd; n++) { ohci_itd_t *itd; usbd_get_page(pc + n, 0, &page_info); itd = page_info.buffer; /* init TD */ itd->itd_self = htole32(page_info.physaddr); itd->obj_next = last_obj; itd->page_cache = pc + n; last_obj = itd; usb_pc_cpu_flush(pc + n); } } xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj; last_obj = NULL; if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(ohci_ed_t), OHCI_ED_ALIGN, nqh)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != nqh; n++) { ohci_ed_t *ed; usbd_get_page(pc + n, 0, &page_info); ed = page_info.buffer; /* init QH */ ed->ed_self = htole32(page_info.physaddr); ed->obj_next = last_obj; ed->page_cache = pc + n; last_obj = ed; usb_pc_cpu_flush(pc + n); } } xfer->qh_start[xfer->flags_int.curr_dma_set] = last_obj; if (!xfer->flags_int.curr_dma_set) { xfer->flags_int.curr_dma_set = 1; goto alloc_dma_set; } } static void ohci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc, struct usb_endpoint *ep) { ohci_softc_t *sc = OHCI_BUS2SC(udev->bus); DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d (%d)\n", ep, udev->address, edesc->bEndpointAddress, udev->flags.usb_mode, sc->sc_addr); if (udev->device_index != sc->sc_addr) { switch (edesc->bmAttributes & UE_XFERTYPE) { case UE_CONTROL: ep->methods = &ohci_device_ctrl_methods; break; case UE_INTERRUPT: ep->methods = &ohci_device_intr_methods; break; case UE_ISOCHRONOUS: if (udev->speed == USB_SPEED_FULL) { ep->methods = &ohci_device_isoc_methods; } break; case UE_BULK: ep->methods = &ohci_device_bulk_methods; break; default: /* do nothing */ break; } } } static void ohci_xfer_unsetup(struct usb_xfer *xfer) { return; } static void ohci_get_dma_delay(struct usb_device *udev, uint32_t *pus) { /* * Wait until hardware has finished any possible use of the * transfer descriptor(s) and QH */ *pus = (1125); /* microseconds */ } static void ohci_device_resume(struct usb_device *udev) { struct ohci_softc *sc = OHCI_BUS2SC(udev->bus); struct usb_xfer *xfer; const struct usb_pipe_methods *methods; ohci_ed_t *ed; DPRINTF("\n"); USB_BUS_LOCK(udev->bus); TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { if (xfer->xroot->udev == udev) { methods = xfer->endpoint->methods; ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; if (methods == &ohci_device_bulk_methods) { OHCI_APPEND_QH(ed, sc->sc_bulk_p_last); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF); } if (methods == &ohci_device_ctrl_methods) { OHCI_APPEND_QH(ed, sc->sc_ctrl_p_last); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF); } if (methods == &ohci_device_intr_methods) { OHCI_APPEND_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]); } } } USB_BUS_UNLOCK(udev->bus); return; } static void ohci_device_suspend(struct usb_device *udev) { struct ohci_softc *sc = OHCI_BUS2SC(udev->bus); struct usb_xfer *xfer; const struct usb_pipe_methods *methods; ohci_ed_t *ed; DPRINTF("\n"); USB_BUS_LOCK(udev->bus); TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { if (xfer->xroot->udev == udev) { methods = xfer->endpoint->methods; ed = xfer->qh_start[xfer->flags_int.curr_dma_set]; if (methods == &ohci_device_bulk_methods) { OHCI_REMOVE_QH(ed, sc->sc_bulk_p_last); } if (methods == &ohci_device_ctrl_methods) { OHCI_REMOVE_QH(ed, sc->sc_ctrl_p_last); } if (methods == &ohci_device_intr_methods) { OHCI_REMOVE_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]); } } } USB_BUS_UNLOCK(udev->bus); return; } static void ohci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state) { struct ohci_softc *sc = OHCI_BUS2SC(bus); switch (state) { case USB_HW_POWER_SUSPEND: case USB_HW_POWER_SHUTDOWN: ohci_suspend(sc); break; case USB_HW_POWER_RESUME: ohci_resume(sc); break; default: break; } } static void ohci_set_hw_power(struct usb_bus *bus) { struct ohci_softc *sc = OHCI_BUS2SC(bus); uint32_t temp; uint32_t flags; DPRINTF("\n"); USB_BUS_LOCK(bus); flags = bus->hw_power_state; temp = OREAD4(sc, OHCI_CONTROL); temp &= ~(OHCI_PLE | OHCI_IE | OHCI_CLE | OHCI_BLE); if (flags & USB_HW_POWER_CONTROL) temp |= OHCI_CLE; if (flags & USB_HW_POWER_BULK) temp |= OHCI_BLE; if (flags & USB_HW_POWER_INTERRUPT) temp |= OHCI_PLE; if (flags & USB_HW_POWER_ISOC) temp |= OHCI_IE | OHCI_PLE; OWRITE4(sc, OHCI_CONTROL, temp); USB_BUS_UNLOCK(bus); return; } static const struct usb_bus_methods ohci_bus_methods = { .endpoint_init = ohci_ep_init, .xfer_setup = ohci_xfer_setup, .xfer_unsetup = ohci_xfer_unsetup, .get_dma_delay = ohci_get_dma_delay, .device_resume = ohci_device_resume, .device_suspend = ohci_device_suspend, .set_hw_power = ohci_set_hw_power, .set_hw_power_sleep = ohci_set_hw_power_sleep, .roothub_exec = ohci_roothub_exec, .xfer_poll = ohci_do_poll, }; diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c index 0739e2865e39..6c58e6d03afa 100644 --- a/sys/dev/usb/controller/xhci.c +++ b/sys/dev/usb/controller/xhci.c @@ -1,4337 +1,4336 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2010 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB eXtensible Host Controller Interface, a.k.a. USB 3.0 controller. * * The XHCI 1.0 spec can be found at * http://www.intel.com/technology/usb/download/xHCI_Specification_for_USB.pdf * and the USB 3.0 spec at * http://www.usb.org/developers/docs/usb_30_spec_060910.zip */ /* * A few words about the design implementation: This driver emulates * the concept about TDs which is found in EHCI specification. This * way we achieve that the USB controller drivers look similar to * eachother which makes it easier to understand the code. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR xhcidebug #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ #include #include #define XHCI_BUS2SC(bus) \ ((struct xhci_softc *)(((uint8_t *)(bus)) - \ ((uint8_t *)&(((struct xhci_softc *)0)->sc_bus)))) static SYSCTL_NODE(_hw_usb, OID_AUTO, xhci, CTLFLAG_RW, 0, "USB XHCI"); static int xhcistreams; SYSCTL_INT(_hw_usb_xhci, OID_AUTO, streams, CTLFLAG_RWTUN, &xhcistreams, 0, "Set to enable streams mode support"); #ifdef USB_DEBUG static int xhcidebug; static int xhciroute; static int xhcipolling; static int xhcidma32; SYSCTL_INT(_hw_usb_xhci, OID_AUTO, debug, CTLFLAG_RWTUN, &xhcidebug, 0, "Debug level"); SYSCTL_INT(_hw_usb_xhci, OID_AUTO, xhci_port_route, CTLFLAG_RWTUN, &xhciroute, 0, "Routing bitmap for switching EHCI ports to the XHCI controller"); SYSCTL_INT(_hw_usb_xhci, OID_AUTO, use_polling, CTLFLAG_RWTUN, &xhcipolling, 0, "Set to enable software interrupt polling for the XHCI controller"); SYSCTL_INT(_hw_usb_xhci, OID_AUTO, dma32, CTLFLAG_RWTUN, &xhcidma32, 0, "Set to only use 32-bit DMA for the XHCI controller"); #else #define xhciroute 0 #define xhcidma32 0 #endif #define XHCI_INTR_ENDPT 1 struct xhci_std_temp { struct xhci_softc *sc; struct usb_page_cache *pc; struct xhci_td *td; struct xhci_td *td_next; uint32_t len; uint32_t offset; uint32_t max_packet_size; uint32_t average; uint16_t isoc_delta; uint16_t isoc_frame; uint8_t shortpkt; uint8_t multishort; uint8_t last_frame; uint8_t trb_type; uint8_t direction; uint8_t tbc; uint8_t tlbpc; uint8_t step_td; uint8_t do_isoc_sync; }; static void xhci_do_poll(struct usb_bus *); static void xhci_device_done(struct usb_xfer *, usb_error_t); static void xhci_root_intr(struct xhci_softc *); static void xhci_free_device_ext(struct usb_device *); static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *, struct usb_endpoint_descriptor *); static usb_proc_callback_t xhci_configure_msg; static usb_error_t xhci_configure_device(struct usb_device *); static usb_error_t xhci_configure_endpoint(struct usb_device *, struct usb_endpoint_descriptor *, struct xhci_endpoint_ext *, uint16_t, uint8_t, uint8_t, uint8_t, uint16_t, uint16_t, uint8_t); static usb_error_t xhci_configure_mask(struct usb_device *, uint32_t, uint8_t); static usb_error_t xhci_cmd_evaluate_ctx(struct xhci_softc *, uint64_t, uint8_t); static void xhci_endpoint_doorbell(struct usb_xfer *); static void xhci_ctx_set_le32(struct xhci_softc *sc, volatile uint32_t *ptr, uint32_t val); static uint32_t xhci_ctx_get_le32(struct xhci_softc *sc, volatile uint32_t *ptr); static void xhci_ctx_set_le64(struct xhci_softc *sc, volatile uint64_t *ptr, uint64_t val); #ifdef USB_DEBUG static uint64_t xhci_ctx_get_le64(struct xhci_softc *sc, volatile uint64_t *ptr); #endif static const struct usb_bus_methods xhci_bus_methods; #ifdef USB_DEBUG static void xhci_dump_trb(struct xhci_trb *trb) { DPRINTFN(5, "trb = %p\n", trb); DPRINTFN(5, "qwTrb0 = 0x%016llx\n", (long long)le64toh(trb->qwTrb0)); DPRINTFN(5, "dwTrb2 = 0x%08x\n", le32toh(trb->dwTrb2)); DPRINTFN(5, "dwTrb3 = 0x%08x\n", le32toh(trb->dwTrb3)); } static void xhci_dump_endpoint(struct xhci_softc *sc, struct xhci_endp_ctx *pep) { DPRINTFN(5, "pep = %p\n", pep); DPRINTFN(5, "dwEpCtx0=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx0)); DPRINTFN(5, "dwEpCtx1=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx1)); DPRINTFN(5, "qwEpCtx2=0x%016llx\n", (long long)xhci_ctx_get_le64(sc, &pep->qwEpCtx2)); DPRINTFN(5, "dwEpCtx4=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx4)); DPRINTFN(5, "dwEpCtx5=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx5)); DPRINTFN(5, "dwEpCtx6=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx6)); DPRINTFN(5, "dwEpCtx7=0x%08x\n", xhci_ctx_get_le32(sc, &pep->dwEpCtx7)); } static void xhci_dump_device(struct xhci_softc *sc, struct xhci_slot_ctx *psl) { DPRINTFN(5, "psl = %p\n", psl); DPRINTFN(5, "dwSctx0=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx0)); DPRINTFN(5, "dwSctx1=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx1)); DPRINTFN(5, "dwSctx2=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx2)); DPRINTFN(5, "dwSctx3=0x%08x\n", xhci_ctx_get_le32(sc, &psl->dwSctx3)); } #endif uint8_t xhci_use_polling(void) { #ifdef USB_DEBUG return (xhcipolling != 0); #else return (0); #endif } static void xhci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb) { struct xhci_softc *sc = XHCI_BUS2SC(bus); uint8_t i; cb(bus, &sc->sc_hw.root_pc, &sc->sc_hw.root_pg, sizeof(struct xhci_hw_root), XHCI_PAGE_SIZE); cb(bus, &sc->sc_hw.ctx_pc, &sc->sc_hw.ctx_pg, sizeof(struct xhci_dev_ctx_addr), XHCI_PAGE_SIZE); for (i = 0; i != XHCI_MAX_SCRATCHPADS; i++) { cb(bus, &sc->sc_hw.scratch_pc[i], &sc->sc_hw.scratch_pg[i], XHCI_PAGE_SIZE, XHCI_PAGE_SIZE); } } static void xhci_ctx_set_le32(struct xhci_softc *sc, volatile uint32_t *ptr, uint32_t val) { if (sc->sc_ctx_is_64_byte) { uint32_t offset; /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ /* all contexts are initially 32-bytes */ offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); ptr = (volatile uint32_t *)(((volatile uint8_t *)ptr) + offset); } *ptr = htole32(val); } static uint32_t xhci_ctx_get_le32(struct xhci_softc *sc, volatile uint32_t *ptr) { if (sc->sc_ctx_is_64_byte) { uint32_t offset; /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ /* all contexts are initially 32-bytes */ offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); ptr = (volatile uint32_t *)(((volatile uint8_t *)ptr) + offset); } return (le32toh(*ptr)); } static void xhci_ctx_set_le64(struct xhci_softc *sc, volatile uint64_t *ptr, uint64_t val) { if (sc->sc_ctx_is_64_byte) { uint32_t offset; /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ /* all contexts are initially 32-bytes */ offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); ptr = (volatile uint64_t *)(((volatile uint8_t *)ptr) + offset); } *ptr = htole64(val); } #ifdef USB_DEBUG static uint64_t xhci_ctx_get_le64(struct xhci_softc *sc, volatile uint64_t *ptr) { if (sc->sc_ctx_is_64_byte) { uint32_t offset; /* exploit the fact that our structures are XHCI_PAGE_SIZE aligned */ /* all contexts are initially 32-bytes */ offset = ((uintptr_t)ptr) & ((XHCI_PAGE_SIZE - 1) & ~(31U)); ptr = (volatile uint64_t *)(((volatile uint8_t *)ptr) + offset); } return (le64toh(*ptr)); } #endif static int xhci_reset_command_queue_locked(struct xhci_softc *sc) { struct usb_page_search buf_res; struct xhci_hw_root *phwr; uint64_t addr; uint32_t temp; DPRINTF("\n"); temp = XREAD4(sc, oper, XHCI_CRCR_LO); if (temp & XHCI_CRCR_LO_CRR) { DPRINTF("Command ring running\n"); temp &= ~(XHCI_CRCR_LO_CS | XHCI_CRCR_LO_CA); /* * Try to abort the last command as per section * 4.6.1.2 "Aborting a Command" of the XHCI * specification: */ /* stop and cancel */ XWRITE4(sc, oper, XHCI_CRCR_LO, temp | XHCI_CRCR_LO_CS); XWRITE4(sc, oper, XHCI_CRCR_HI, 0); XWRITE4(sc, oper, XHCI_CRCR_LO, temp | XHCI_CRCR_LO_CA); XWRITE4(sc, oper, XHCI_CRCR_HI, 0); /* wait 250ms */ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 4); /* check if command ring is still running */ temp = XREAD4(sc, oper, XHCI_CRCR_LO); if (temp & XHCI_CRCR_LO_CRR) { DPRINTF("Comand ring still running\n"); return (USB_ERR_IOERROR); } } /* reset command ring */ sc->sc_command_ccs = 1; sc->sc_command_idx = 0; usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); /* set up command ring control base address */ addr = buf_res.physaddr; phwr = buf_res.buffer; addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[0]; DPRINTF("CRCR=0x%016llx\n", (unsigned long long)addr); memset(phwr->hwr_commands, 0, sizeof(phwr->hwr_commands)); phwr->hwr_commands[XHCI_MAX_COMMANDS - 1].qwTrb0 = htole64(addr); usb_pc_cpu_flush(&sc->sc_hw.root_pc); XWRITE4(sc, oper, XHCI_CRCR_LO, ((uint32_t)addr) | XHCI_CRCR_LO_RCS); XWRITE4(sc, oper, XHCI_CRCR_HI, (uint32_t)(addr >> 32)); return (0); } usb_error_t xhci_start_controller(struct xhci_softc *sc) { struct usb_page_search buf_res; struct xhci_hw_root *phwr; struct xhci_dev_ctx_addr *pdctxa; uint64_t addr; uint32_t temp; uint16_t i; DPRINTF("\n"); sc->sc_event_ccs = 1; sc->sc_event_idx = 0; sc->sc_command_ccs = 1; sc->sc_command_idx = 0; /* Reset controller */ XWRITE4(sc, oper, XHCI_USBCMD, XHCI_CMD_HCRST); for (i = 0; i != 100; i++) { usb_pause_mtx(NULL, hz / 100); temp = (XREAD4(sc, oper, XHCI_USBCMD) & XHCI_CMD_HCRST) | (XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_CNR); if (!temp) break; } if (temp) { device_printf(sc->sc_bus.parent, "Controller " "reset timeout.\n"); return (USB_ERR_IOERROR); } /* set up number of device slots */ DPRINTF("CONFIG=0x%08x -> 0x%08x\n", XREAD4(sc, oper, XHCI_CONFIG), sc->sc_noslot); XWRITE4(sc, oper, XHCI_CONFIG, sc->sc_noslot); temp = XREAD4(sc, oper, XHCI_USBSTS); /* clear interrupts */ XWRITE4(sc, oper, XHCI_USBSTS, temp); /* disable all device notifications */ XWRITE4(sc, oper, XHCI_DNCTRL, 0); /* set up device context base address */ usbd_get_page(&sc->sc_hw.ctx_pc, 0, &buf_res); pdctxa = buf_res.buffer; memset(pdctxa, 0, sizeof(*pdctxa)); addr = buf_res.physaddr; addr += (uintptr_t)&((struct xhci_dev_ctx_addr *)0)->qwSpBufPtr[0]; /* slot 0 points to the table of scratchpad pointers */ pdctxa->qwBaaDevCtxAddr[0] = htole64(addr); for (i = 0; i != sc->sc_noscratch; i++) { struct usb_page_search buf_scp; usbd_get_page(&sc->sc_hw.scratch_pc[i], 0, &buf_scp); pdctxa->qwSpBufPtr[i] = htole64((uint64_t)buf_scp.physaddr); } addr = buf_res.physaddr; XWRITE4(sc, oper, XHCI_DCBAAP_LO, (uint32_t)addr); XWRITE4(sc, oper, XHCI_DCBAAP_HI, (uint32_t)(addr >> 32)); XWRITE4(sc, oper, XHCI_DCBAAP_LO, (uint32_t)addr); XWRITE4(sc, oper, XHCI_DCBAAP_HI, (uint32_t)(addr >> 32)); /* set up event table size */ DPRINTF("ERSTSZ=0x%08x -> 0x%08x\n", XREAD4(sc, runt, XHCI_ERSTSZ(0)), sc->sc_erst_max); XWRITE4(sc, runt, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(sc->sc_erst_max)); /* set up interrupt rate */ XWRITE4(sc, runt, XHCI_IMOD(0), sc->sc_imod_default); usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); phwr = buf_res.buffer; addr = buf_res.physaddr; addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_events[0]; /* reset hardware root structure */ memset(phwr, 0, sizeof(*phwr)); phwr->hwr_ring_seg[0].qwEvrsTablePtr = htole64(addr); phwr->hwr_ring_seg[0].dwEvrsTableSize = htole32(XHCI_MAX_EVENTS); DPRINTF("ERDP(0)=0x%016llx\n", (unsigned long long)addr); XWRITE4(sc, runt, XHCI_ERDP_LO(0), (uint32_t)addr); XWRITE4(sc, runt, XHCI_ERDP_HI(0), (uint32_t)(addr >> 32)); addr = buf_res.physaddr; DPRINTF("ERSTBA(0)=0x%016llx\n", (unsigned long long)addr); XWRITE4(sc, runt, XHCI_ERSTBA_LO(0), (uint32_t)addr); XWRITE4(sc, runt, XHCI_ERSTBA_HI(0), (uint32_t)(addr >> 32)); /* set up interrupter registers */ temp = XREAD4(sc, runt, XHCI_IMAN(0)); temp |= XHCI_IMAN_INTR_ENA; XWRITE4(sc, runt, XHCI_IMAN(0), temp); /* set up command ring control base address */ addr = buf_res.physaddr; addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[0]; DPRINTF("CRCR=0x%016llx\n", (unsigned long long)addr); XWRITE4(sc, oper, XHCI_CRCR_LO, ((uint32_t)addr) | XHCI_CRCR_LO_RCS); XWRITE4(sc, oper, XHCI_CRCR_HI, (uint32_t)(addr >> 32)); phwr->hwr_commands[XHCI_MAX_COMMANDS - 1].qwTrb0 = htole64(addr); usb_bus_mem_flush_all(&sc->sc_bus, &xhci_iterate_hw_softc); /* Go! */ XWRITE4(sc, oper, XHCI_USBCMD, XHCI_CMD_RS | XHCI_CMD_INTE | XHCI_CMD_HSEE); for (i = 0; i != 100; i++) { usb_pause_mtx(NULL, hz / 100); temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH; if (!temp) break; } if (temp) { XWRITE4(sc, oper, XHCI_USBCMD, 0); device_printf(sc->sc_bus.parent, "Run timeout.\n"); return (USB_ERR_IOERROR); } /* catch any lost interrupts */ xhci_do_poll(&sc->sc_bus); if (sc->sc_port_route != NULL) { /* Route all ports to the XHCI by default */ sc->sc_port_route(sc->sc_bus.parent, ~xhciroute, xhciroute); } return (0); } usb_error_t xhci_halt_controller(struct xhci_softc *sc) { uint32_t temp; uint16_t i; DPRINTF("\n"); sc->sc_capa_off = 0; sc->sc_oper_off = XREAD1(sc, capa, XHCI_CAPLENGTH); sc->sc_runt_off = XREAD4(sc, capa, XHCI_RTSOFF) & ~0xF; sc->sc_door_off = XREAD4(sc, capa, XHCI_DBOFF) & ~0x3; /* Halt controller */ XWRITE4(sc, oper, XHCI_USBCMD, 0); for (i = 0; i != 100; i++) { usb_pause_mtx(NULL, hz / 100); temp = XREAD4(sc, oper, XHCI_USBSTS) & XHCI_STS_HCH; if (temp) break; } if (!temp) { device_printf(sc->sc_bus.parent, "Controller halt timeout.\n"); return (USB_ERR_IOERROR); } return (0); } usb_error_t xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32) { uint32_t temp; DPRINTF("\n"); /* initialize some bus fields */ sc->sc_bus.parent = self; /* set the bus revision */ sc->sc_bus.usbrev = USB_REV_3_0; /* set up the bus struct */ sc->sc_bus.methods = &xhci_bus_methods; /* set up devices array */ sc->sc_bus.devices = sc->sc_devices; sc->sc_bus.devices_max = XHCI_MAX_DEVICES; /* set default cycle state in case of early interrupts */ sc->sc_event_ccs = 1; sc->sc_command_ccs = 1; /* set up bus space offsets */ sc->sc_capa_off = 0; sc->sc_oper_off = XREAD1(sc, capa, XHCI_CAPLENGTH); sc->sc_runt_off = XREAD4(sc, capa, XHCI_RTSOFF) & ~0x1F; sc->sc_door_off = XREAD4(sc, capa, XHCI_DBOFF) & ~0x3; DPRINTF("CAPLENGTH=0x%x\n", sc->sc_oper_off); DPRINTF("RUNTIMEOFFSET=0x%x\n", sc->sc_runt_off); DPRINTF("DOOROFFSET=0x%x\n", sc->sc_door_off); DPRINTF("xHCI version = 0x%04x\n", XREAD2(sc, capa, XHCI_HCIVERSION)); if (!(XREAD4(sc, oper, XHCI_PAGESIZE) & XHCI_PAGESIZE_4K)) { device_printf(sc->sc_bus.parent, "Controller does " "not support 4K page size.\n"); return (ENXIO); } temp = XREAD4(sc, capa, XHCI_HCSPARAMS0); DPRINTF("HCS0 = 0x%08x\n", temp); /* set up context size */ if (XHCI_HCS0_CSZ(temp)) { sc->sc_ctx_is_64_byte = 1; } else { sc->sc_ctx_is_64_byte = 0; } /* get DMA bits */ sc->sc_bus.dma_bits = (XHCI_HCS0_AC64(temp) && xhcidma32 == 0 && dma32 == 0) ? 64 : 32; device_printf(self, "%d bytes context size, %d-bit DMA\n", sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits); temp = XREAD4(sc, capa, XHCI_HCSPARAMS1); /* get number of device slots */ sc->sc_noport = XHCI_HCS1_N_PORTS(temp); if (sc->sc_noport == 0) { device_printf(sc->sc_bus.parent, "Invalid number " "of ports: %u\n", sc->sc_noport); return (ENXIO); } sc->sc_noport = sc->sc_noport; sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(temp); DPRINTF("Max slots: %u\n", sc->sc_noslot); if (sc->sc_noslot > XHCI_MAX_DEVICES) sc->sc_noslot = XHCI_MAX_DEVICES; temp = XREAD4(sc, capa, XHCI_HCSPARAMS2); DPRINTF("HCS2=0x%08x\n", temp); /* get number of scratchpads */ sc->sc_noscratch = XHCI_HCS2_SPB_MAX(temp); if (sc->sc_noscratch > XHCI_MAX_SCRATCHPADS) { device_printf(sc->sc_bus.parent, "XHCI request " "too many scratchpads\n"); return (ENOMEM); } DPRINTF("Max scratch: %u\n", sc->sc_noscratch); /* get event table size */ sc->sc_erst_max = 1U << XHCI_HCS2_ERST_MAX(temp); if (sc->sc_erst_max > XHCI_MAX_RSEG) sc->sc_erst_max = XHCI_MAX_RSEG; temp = XREAD4(sc, capa, XHCI_HCSPARAMS3); /* get maximum exit latency */ sc->sc_exit_lat_max = XHCI_HCS3_U1_DEL(temp) + XHCI_HCS3_U2_DEL(temp) + 250 /* us */; /* Check if we should use the default IMOD value. */ if (sc->sc_imod_default == 0) sc->sc_imod_default = XHCI_IMOD_DEFAULT; /* get all DMA memory */ if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(self), &xhci_iterate_hw_softc)) { return (ENOMEM); } /* set up command queue mutex and condition varible */ cv_init(&sc->sc_cmd_cv, "CMDQ"); sx_init(&sc->sc_cmd_sx, "CMDQ lock"); sc->sc_config_msg[0].hdr.pm_callback = &xhci_configure_msg; sc->sc_config_msg[0].bus = &sc->sc_bus; sc->sc_config_msg[1].hdr.pm_callback = &xhci_configure_msg; sc->sc_config_msg[1].bus = &sc->sc_bus; return (0); } void xhci_uninit(struct xhci_softc *sc) { /* * NOTE: At this point the control transfer process is gone * and "xhci_configure_msg" is no longer called. Consequently * waiting for the configuration messages to complete is not * needed. */ usb_bus_mem_free_all(&sc->sc_bus, &xhci_iterate_hw_softc); cv_destroy(&sc->sc_cmd_cv); sx_destroy(&sc->sc_cmd_sx); } static void xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state) { struct xhci_softc *sc = XHCI_BUS2SC(bus); switch (state) { case USB_HW_POWER_SUSPEND: DPRINTF("Stopping the XHCI\n"); xhci_halt_controller(sc); break; case USB_HW_POWER_SHUTDOWN: DPRINTF("Stopping the XHCI\n"); xhci_halt_controller(sc); break; case USB_HW_POWER_RESUME: DPRINTF("Starting the XHCI\n"); xhci_start_controller(sc); break; default: break; } } static usb_error_t xhci_generic_done_sub(struct usb_xfer *xfer) { struct xhci_td *td; struct xhci_td *td_alt_next; uint32_t len; uint8_t status; td = xfer->td_transfer_cache; td_alt_next = td->alt_next; if (xfer->aframes != xfer->nframes) usbd_xfer_set_frame_len(xfer, xfer->aframes, 0); while (1) { usb_pc_cpu_invalidate(td->page_cache); status = td->status; len = td->remainder; DPRINTFN(4, "xfer=%p[%u/%u] rem=%u/%u status=%u\n", xfer, (unsigned int)xfer->aframes, (unsigned int)xfer->nframes, (unsigned int)len, (unsigned int)td->len, (unsigned int)status); /* * Verify the status length and * add the length to "frlengths[]": */ if (len > td->len) { /* should not happen */ DPRINTF("Invalid status length, " "0x%04x/0x%04x bytes\n", len, td->len); status = XHCI_TRB_ERROR_LENGTH; } else if (xfer->aframes != xfer->nframes) { xfer->frlengths[xfer->aframes] += td->len - len; } /* Check for last transfer */ if (((void *)td) == xfer->td_transfer_last) { td = NULL; break; } /* Check for transfer error */ if (status != XHCI_TRB_ERROR_SHORT_PKT && status != XHCI_TRB_ERROR_SUCCESS) { /* the transfer is finished */ td = NULL; break; } /* Check for short transfer */ if (len > 0) { if (xfer->flags_int.short_frames_ok || xfer->flags_int.isochronous_xfr || xfer->flags_int.control_xfr) { /* follow alt next */ td = td->alt_next; } else { /* the transfer is finished */ td = NULL; } break; } td = td->obj_next; if (td->alt_next != td_alt_next) { /* this USB frame is complete */ break; } } /* update transfer cache */ xfer->td_transfer_cache = td; return ((status == XHCI_TRB_ERROR_STALL) ? USB_ERR_STALLED : (status != XHCI_TRB_ERROR_SHORT_PKT && status != XHCI_TRB_ERROR_SUCCESS) ? USB_ERR_IOERROR : USB_ERR_NORMAL_COMPLETION); } static void xhci_generic_done(struct usb_xfer *xfer) { usb_error_t err = 0; DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n", xfer, xfer->endpoint); /* reset scanner */ xfer->td_transfer_cache = xfer->td_transfer_first; if (xfer->flags_int.control_xfr) { if (xfer->flags_int.control_hdr) err = xhci_generic_done_sub(xfer); xfer->aframes = 1; if (xfer->td_transfer_cache == NULL) goto done; } while (xfer->aframes != xfer->nframes) { err = xhci_generic_done_sub(xfer); xfer->aframes++; if (xfer->td_transfer_cache == NULL) goto done; } if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) err = xhci_generic_done_sub(xfer); done: /* transfer is complete */ xhci_device_done(xfer, err); } static void xhci_activate_transfer(struct usb_xfer *xfer) { struct xhci_td *td; td = xfer->td_transfer_cache; usb_pc_cpu_invalidate(td->page_cache); if (!(td->td_trb[0].dwTrb3 & htole32(XHCI_TRB_3_CYCLE_BIT))) { /* activate the transfer */ td->td_trb[0].dwTrb3 |= htole32(XHCI_TRB_3_CYCLE_BIT); usb_pc_cpu_flush(td->page_cache); xhci_endpoint_doorbell(xfer); } } static void xhci_skip_transfer(struct usb_xfer *xfer) { struct xhci_td *td; struct xhci_td *td_last; td = xfer->td_transfer_cache; td_last = xfer->td_transfer_last; td = td->alt_next; usb_pc_cpu_invalidate(td->page_cache); if (!(td->td_trb[0].dwTrb3 & htole32(XHCI_TRB_3_CYCLE_BIT))) { usb_pc_cpu_invalidate(td_last->page_cache); /* copy LINK TRB to current waiting location */ td->td_trb[0].qwTrb0 = td_last->td_trb[td_last->ntrb].qwTrb0; td->td_trb[0].dwTrb2 = td_last->td_trb[td_last->ntrb].dwTrb2; usb_pc_cpu_flush(td->page_cache); td->td_trb[0].dwTrb3 = td_last->td_trb[td_last->ntrb].dwTrb3; usb_pc_cpu_flush(td->page_cache); xhci_endpoint_doorbell(xfer); } } /*------------------------------------------------------------------------* * xhci_check_transfer *------------------------------------------------------------------------*/ static void xhci_check_transfer(struct xhci_softc *sc, struct xhci_trb *trb) { struct xhci_endpoint_ext *pepext; int64_t offset; uint64_t td_event; uint32_t temp; uint32_t remainder; uint16_t stream_id; uint16_t i; uint8_t status; uint8_t halted; uint8_t epno; uint8_t index; /* decode TRB */ td_event = le64toh(trb->qwTrb0); temp = le32toh(trb->dwTrb2); remainder = XHCI_TRB_2_REM_GET(temp); status = XHCI_TRB_2_ERROR_GET(temp); stream_id = XHCI_TRB_2_STREAM_GET(temp); temp = le32toh(trb->dwTrb3); epno = XHCI_TRB_3_EP_GET(temp); index = XHCI_TRB_3_SLOT_GET(temp); /* check if error means halted */ halted = (status != XHCI_TRB_ERROR_SHORT_PKT && status != XHCI_TRB_ERROR_SUCCESS); DPRINTF("slot=%u epno=%u stream=%u remainder=%u status=%u\n", index, epno, stream_id, remainder, status); if (index > sc->sc_noslot) { DPRINTF("Invalid slot.\n"); return; } if ((epno == 0) || (epno >= XHCI_MAX_ENDPOINTS)) { DPRINTF("Invalid endpoint.\n"); return; } pepext = &sc->sc_hw.devs[index].endp[epno]; if (pepext->trb_ep_mode != USB_EP_MODE_STREAMS) { stream_id = 0; DPRINTF("stream_id=0\n"); } else if (stream_id >= XHCI_MAX_STREAMS) { DPRINTF("Invalid stream ID.\n"); return; } /* try to find the USB transfer that generated the event */ for (i = 0; i != (XHCI_MAX_TRANSFERS - 1); i++) { struct usb_xfer *xfer; struct xhci_td *td; xfer = pepext->xfer[i + (XHCI_MAX_TRANSFERS * stream_id)]; if (xfer == NULL) continue; td = xfer->td_transfer_cache; DPRINTFN(5, "Checking if 0x%016llx == (0x%016llx .. 0x%016llx)\n", (long long)td_event, (long long)td->td_self, (long long)td->td_self + sizeof(td->td_trb)); /* * NOTE: Some XHCI implementations might not trigger * an event on the last LINK TRB so we need to * consider both the last and second last event * address as conditions for a successful transfer. * * NOTE: We assume that the XHCI will only trigger one * event per chain of TRBs. */ offset = td_event - td->td_self; if (offset >= 0 && offset < (int64_t)sizeof(td->td_trb)) { usb_pc_cpu_invalidate(td->page_cache); /* compute rest of remainder, if any */ for (i = (offset / 16) + 1; i < td->ntrb; i++) { temp = le32toh(td->td_trb[i].dwTrb2); remainder += XHCI_TRB_2_BYTES_GET(temp); } DPRINTFN(5, "New remainder: %u\n", remainder); /* clear isochronous transfer errors */ if (xfer->flags_int.isochronous_xfr) { if (halted) { halted = 0; status = XHCI_TRB_ERROR_SUCCESS; remainder = td->len; } } /* "td->remainder" is verified later */ td->remainder = remainder; td->status = status; usb_pc_cpu_flush(td->page_cache); /* * 1) Last transfer descriptor makes the * transfer done */ if (((void *)td) == xfer->td_transfer_last) { DPRINTF("TD is last\n"); xhci_generic_done(xfer); break; } /* * 2) Any kind of error makes the transfer * done */ if (halted) { DPRINTF("TD has I/O error\n"); xhci_generic_done(xfer); break; } /* * 3) If there is no alternate next transfer, * a short packet also makes the transfer done */ if (td->remainder > 0) { if (td->alt_next == NULL) { DPRINTF( "short TD has no alternate next\n"); xhci_generic_done(xfer); break; } DPRINTF("TD has short pkt\n"); if (xfer->flags_int.short_frames_ok || xfer->flags_int.isochronous_xfr || xfer->flags_int.control_xfr) { /* follow the alt next */ xfer->td_transfer_cache = td->alt_next; xhci_activate_transfer(xfer); break; } xhci_skip_transfer(xfer); xhci_generic_done(xfer); break; } /* * 4) Transfer complete - go to next TD */ DPRINTF("Following next TD\n"); xfer->td_transfer_cache = td->obj_next; xhci_activate_transfer(xfer); break; /* there should only be one match */ } } } static int xhci_check_command(struct xhci_softc *sc, struct xhci_trb *trb) { if (sc->sc_cmd_addr == trb->qwTrb0) { DPRINTF("Received command event\n"); sc->sc_cmd_result[0] = trb->dwTrb2; sc->sc_cmd_result[1] = trb->dwTrb3; cv_signal(&sc->sc_cmd_cv); return (1); /* command match */ } return (0); } static int xhci_interrupt_poll(struct xhci_softc *sc) { struct usb_page_search buf_res; struct xhci_hw_root *phwr; uint64_t addr; uint32_t temp; int retval = 0; uint16_t i; uint8_t event; uint8_t j; uint8_t k; uint8_t t; usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); phwr = buf_res.buffer; /* Receive any events */ usb_pc_cpu_invalidate(&sc->sc_hw.root_pc); i = sc->sc_event_idx; j = sc->sc_event_ccs; t = 2; while (1) { temp = le32toh(phwr->hwr_events[i].dwTrb3); k = (temp & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; if (j != k) break; event = XHCI_TRB_3_TYPE_GET(temp); DPRINTFN(10, "event[%u] = %u (0x%016llx 0x%08lx 0x%08lx)\n", i, event, (long long)le64toh(phwr->hwr_events[i].qwTrb0), (long)le32toh(phwr->hwr_events[i].dwTrb2), (long)le32toh(phwr->hwr_events[i].dwTrb3)); switch (event) { case XHCI_TRB_EVENT_TRANSFER: xhci_check_transfer(sc, &phwr->hwr_events[i]); break; case XHCI_TRB_EVENT_CMD_COMPLETE: retval |= xhci_check_command(sc, &phwr->hwr_events[i]); break; default: DPRINTF("Unhandled event = %u\n", event); break; } i++; if (i == XHCI_MAX_EVENTS) { i = 0; j ^= 1; /* check for timeout */ if (!--t) break; } } sc->sc_event_idx = i; sc->sc_event_ccs = j; /* * NOTE: The Event Ring Dequeue Pointer Register is 64-bit * latched. That means to activate the register we need to * write both the low and high double word of the 64-bit * register. */ addr = buf_res.physaddr; addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_events[i]; /* try to clear busy bit */ addr |= XHCI_ERDP_LO_BUSY; XWRITE4(sc, runt, XHCI_ERDP_LO(0), (uint32_t)addr); XWRITE4(sc, runt, XHCI_ERDP_HI(0), (uint32_t)(addr >> 32)); return (retval); } static usb_error_t xhci_do_command(struct xhci_softc *sc, struct xhci_trb *trb, uint16_t timeout_ms) { struct usb_page_search buf_res; struct xhci_hw_root *phwr; uint64_t addr; uint32_t temp; uint8_t i; uint8_t j; uint8_t timeout = 0; int err; XHCI_CMD_ASSERT_LOCKED(sc); /* get hardware root structure */ usbd_get_page(&sc->sc_hw.root_pc, 0, &buf_res); phwr = buf_res.buffer; /* Queue command */ USB_BUS_LOCK(&sc->sc_bus); retry: i = sc->sc_command_idx; j = sc->sc_command_ccs; DPRINTFN(10, "command[%u] = %u (0x%016llx, 0x%08lx, 0x%08lx)\n", i, XHCI_TRB_3_TYPE_GET(le32toh(trb->dwTrb3)), (long long)le64toh(trb->qwTrb0), (long)le32toh(trb->dwTrb2), (long)le32toh(trb->dwTrb3)); phwr->hwr_commands[i].qwTrb0 = trb->qwTrb0; phwr->hwr_commands[i].dwTrb2 = trb->dwTrb2; usb_pc_cpu_flush(&sc->sc_hw.root_pc); temp = trb->dwTrb3; if (j) temp |= htole32(XHCI_TRB_3_CYCLE_BIT); else temp &= ~htole32(XHCI_TRB_3_CYCLE_BIT); temp &= ~htole32(XHCI_TRB_3_TC_BIT); phwr->hwr_commands[i].dwTrb3 = temp; usb_pc_cpu_flush(&sc->sc_hw.root_pc); addr = buf_res.physaddr; addr += (uintptr_t)&((struct xhci_hw_root *)0)->hwr_commands[i]; sc->sc_cmd_addr = htole64(addr); i++; if (i == (XHCI_MAX_COMMANDS - 1)) { if (j) { temp = htole32(XHCI_TRB_3_TC_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | XHCI_TRB_3_CYCLE_BIT); } else { temp = htole32(XHCI_TRB_3_TC_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); } phwr->hwr_commands[i].dwTrb3 = temp; usb_pc_cpu_flush(&sc->sc_hw.root_pc); i = 0; j ^= 1; } sc->sc_command_idx = i; sc->sc_command_ccs = j; XWRITE4(sc, door, XHCI_DOORBELL(0), 0); err = cv_timedwait(&sc->sc_cmd_cv, &sc->sc_bus.bus_mtx, USB_MS_TO_TICKS(timeout_ms)); /* * In some error cases event interrupts are not generated. * Poll one time to see if the command has completed. */ if (err != 0 && xhci_interrupt_poll(sc) != 0) { DPRINTF("Command was completed when polling\n"); err = 0; } if (err != 0) { DPRINTF("Command timeout!\n"); /* * After some weeks of continuous operation, it has * been observed that the ASMedia Technology, ASM1042 * SuperSpeed USB Host Controller can suddenly stop * accepting commands via the command queue. Try to * first reset the command queue. If that fails do a * host controller reset. */ if (timeout == 0 && xhci_reset_command_queue_locked(sc) == 0) { temp = le32toh(trb->dwTrb3); /* * Avoid infinite XHCI reset loops if the set * address command fails to respond due to a * non-enumerating device: */ if (XHCI_TRB_3_TYPE_GET(temp) == XHCI_TRB_TYPE_ADDRESS_DEVICE && (temp & XHCI_TRB_3_BSR_BIT) == 0) { DPRINTF("Set address timeout\n"); } else { timeout = 1; goto retry; } } else { DPRINTF("Controller reset!\n"); usb_bus_reset_async_locked(&sc->sc_bus); } err = USB_ERR_TIMEOUT; trb->dwTrb2 = 0; trb->dwTrb3 = 0; } else { temp = le32toh(sc->sc_cmd_result[0]); if (XHCI_TRB_2_ERROR_GET(temp) != XHCI_TRB_ERROR_SUCCESS) err = USB_ERR_IOERROR; trb->dwTrb2 = sc->sc_cmd_result[0]; trb->dwTrb3 = sc->sc_cmd_result[1]; } USB_BUS_UNLOCK(&sc->sc_bus); return (err); } #if 0 static usb_error_t xhci_cmd_nop(struct xhci_softc *sc) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NOOP); trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } #endif static usb_error_t xhci_cmd_enable_slot(struct xhci_softc *sc, uint8_t *pslot) { struct xhci_trb trb; uint32_t temp; usb_error_t err; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; trb.dwTrb3 = htole32(XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT)); err = xhci_do_command(sc, &trb, 100 /* ms */); if (err) goto done; temp = le32toh(trb.dwTrb3); *pslot = XHCI_TRB_3_SLOT_GET(temp); done: return (err); } static usb_error_t xhci_cmd_disable_slot(struct xhci_softc *sc, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT) | XHCI_TRB_3_SLOT_SET(slot_id); trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_set_address(struct xhci_softc *sc, uint64_t input_ctx, uint8_t bsr, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = htole64(input_ctx); trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | XHCI_TRB_3_SLOT_SET(slot_id); if (bsr) temp |= XHCI_TRB_3_BSR_BIT; trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 500 /* ms */)); } static usb_error_t xhci_set_address(struct usb_device *udev, struct mtx *mtx, uint16_t address) { struct usb_page_search buf_inp; struct usb_page_search buf_dev; struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct xhci_hw_dev *hdev; struct xhci_dev_ctx *pdev; struct xhci_endpoint_ext *pepext; uint32_t temp; uint16_t mps; usb_error_t err; uint8_t index; /* the root HUB case is not handled here */ if (udev->parent_hub == NULL) return (USB_ERR_INVAL); index = udev->controller_slot_id; hdev = &sc->sc_hw.devs[index]; if (mtx != NULL) mtx_unlock(mtx); XHCI_CMD_LOCK(sc); switch (hdev->state) { case XHCI_ST_DEFAULT: case XHCI_ST_ENABLED: hdev->state = XHCI_ST_ENABLED; /* set configure mask to slot and EP0 */ xhci_configure_mask(udev, 3, 0); /* configure input slot context structure */ err = xhci_configure_device(udev); if (err != 0) { DPRINTF("Could not configure device\n"); break; } /* configure input endpoint context structure */ switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: mps = 8; break; case USB_SPEED_HIGH: mps = 64; break; default: mps = 512; break; } pepext = xhci_get_endpoint_ext(udev, &udev->ctrl_ep_desc); /* ensure the control endpoint is setup again */ USB_BUS_LOCK(udev->bus); pepext->trb_halted = 1; pepext->trb_running = 0; USB_BUS_UNLOCK(udev->bus); err = xhci_configure_endpoint(udev, &udev->ctrl_ep_desc, pepext, 0, 1, 1, 0, mps, mps, USB_EP_MODE_DEFAULT); if (err != 0) { DPRINTF("Could not configure default endpoint\n"); break; } /* execute set address command */ usbd_get_page(&hdev->input_pc, 0, &buf_inp); err = xhci_cmd_set_address(sc, buf_inp.physaddr, (address == 0), index); if (err != 0) { temp = le32toh(sc->sc_cmd_result[0]); if (address == 0 && sc->sc_port_route != NULL && XHCI_TRB_2_ERROR_GET(temp) == XHCI_TRB_ERROR_PARAMETER) { /* LynxPoint XHCI - ports are not switchable */ /* Un-route all ports from the XHCI */ sc->sc_port_route(sc->sc_bus.parent, 0, ~0); } DPRINTF("Could not set address " "for slot %u.\n", index); if (address != 0) break; } /* update device address to new value */ usbd_get_page(&hdev->device_pc, 0, &buf_dev); pdev = buf_dev.buffer; usb_pc_cpu_invalidate(&hdev->device_pc); temp = xhci_ctx_get_le32(sc, &pdev->ctx_slot.dwSctx3); udev->address = XHCI_SCTX_3_DEV_ADDR_GET(temp); /* update device state to new value */ if (address != 0) hdev->state = XHCI_ST_ADDRESSED; else hdev->state = XHCI_ST_DEFAULT; break; default: DPRINTF("Wrong state for set address.\n"); err = USB_ERR_IOERROR; break; } XHCI_CMD_UNLOCK(sc); if (mtx != NULL) mtx_lock(mtx); return (err); } static usb_error_t xhci_cmd_configure_ep(struct xhci_softc *sc, uint64_t input_ctx, uint8_t deconfigure, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = htole64(input_ctx); trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP) | XHCI_TRB_3_SLOT_SET(slot_id); if (deconfigure) temp |= XHCI_TRB_3_DCEP_BIT; trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint64_t input_ctx, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = htole64(input_ctx); trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX) | XHCI_TRB_3_SLOT_SET(slot_id); trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_reset_ep(struct xhci_softc *sc, uint8_t preserve, uint8_t ep_id, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP) | XHCI_TRB_3_SLOT_SET(slot_id) | XHCI_TRB_3_EP_SET(ep_id); if (preserve) temp |= XHCI_TRB_3_PRSV_BIT; trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_set_tr_dequeue_ptr(struct xhci_softc *sc, uint64_t dequeue_ptr, uint16_t stream_id, uint8_t ep_id, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = htole64(dequeue_ptr); temp = XHCI_TRB_2_STREAM_SET(stream_id); trb.dwTrb2 = htole32(temp); temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE) | XHCI_TRB_3_SLOT_SET(slot_id) | XHCI_TRB_3_EP_SET(ep_id); trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t suspend, uint8_t ep_id, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | XHCI_TRB_3_SLOT_SET(slot_id) | XHCI_TRB_3_EP_SET(ep_id); if (suspend) temp |= XHCI_TRB_3_SUSP_EP_BIT; trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } static usb_error_t xhci_cmd_reset_dev(struct xhci_softc *sc, uint8_t slot_id) { struct xhci_trb trb; uint32_t temp; DPRINTF("\n"); trb.qwTrb0 = 0; trb.dwTrb2 = 0; temp = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_DEVICE) | XHCI_TRB_3_SLOT_SET(slot_id); trb.dwTrb3 = htole32(temp); return (xhci_do_command(sc, &trb, 100 /* ms */)); } /*------------------------------------------------------------------------* * xhci_interrupt - XHCI interrupt handler *------------------------------------------------------------------------*/ void xhci_interrupt(struct xhci_softc *sc) { uint32_t status; uint32_t temp; USB_BUS_LOCK(&sc->sc_bus); status = XREAD4(sc, oper, XHCI_USBSTS); /* acknowledge interrupts, if any */ if (status != 0) { XWRITE4(sc, oper, XHCI_USBSTS, status); DPRINTFN(16, "real interrupt (status=0x%08x)\n", status); } temp = XREAD4(sc, runt, XHCI_IMAN(0)); /* force clearing of pending interrupts */ if (temp & XHCI_IMAN_INTR_PEND) XWRITE4(sc, runt, XHCI_IMAN(0), temp); /* check for event(s) */ xhci_interrupt_poll(sc); if (status & (XHCI_STS_PCD | XHCI_STS_HCH | XHCI_STS_HSE | XHCI_STS_HCE)) { if (status & XHCI_STS_PCD) { xhci_root_intr(sc); } if (status & XHCI_STS_HCH) { printf("%s: host controller halted\n", __FUNCTION__); } if (status & XHCI_STS_HSE) { printf("%s: host system error\n", __FUNCTION__); } if (status & XHCI_STS_HCE) { printf("%s: host controller error\n", __FUNCTION__); } } USB_BUS_UNLOCK(&sc->sc_bus); } /*------------------------------------------------------------------------* * xhci_timeout - XHCI timeout handler *------------------------------------------------------------------------*/ static void xhci_timeout(void *arg) { struct usb_xfer *xfer = arg; DPRINTF("xfer=%p\n", xfer); USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); /* transfer is transferred */ xhci_device_done(xfer, USB_ERR_TIMEOUT); } static void xhci_do_poll(struct usb_bus *bus) { struct xhci_softc *sc = XHCI_BUS2SC(bus); USB_BUS_LOCK(&sc->sc_bus); xhci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } static void xhci_setup_generic_chain_sub(struct xhci_std_temp *temp) { struct usb_page_search buf_res; struct xhci_td *td; struct xhci_td *td_next; struct xhci_td *td_alt_next; struct xhci_td *td_first; uint32_t buf_offset; uint32_t average; uint32_t len_old; uint32_t npkt_off; uint32_t dword; uint8_t shortpkt_old; uint8_t precompute; uint8_t x; td_alt_next = NULL; buf_offset = 0; shortpkt_old = temp->shortpkt; len_old = temp->len; npkt_off = 0; precompute = 1; restart: td = temp->td; td_next = td_first = temp->td_next; while (1) { if (temp->len == 0) { if (temp->shortpkt) break; /* send a Zero Length Packet, ZLP, last */ temp->shortpkt = 1; average = 0; } else { average = temp->average; if (temp->len < average) { if (temp->len % temp->max_packet_size) { temp->shortpkt = 1; } average = temp->len; } } if (td_next == NULL) panic("%s: out of XHCI transfer descriptors!", __FUNCTION__); /* get next TD */ td = td_next; td_next = td->obj_next; /* check if we are pre-computing */ if (precompute) { /* update remaining length */ temp->len -= average; continue; } /* fill out current TD */ td->len = average; td->remainder = 0; td->status = 0; /* update remaining length */ temp->len -= average; /* reset TRB index */ x = 0; if (temp->trb_type == XHCI_TRB_TYPE_SETUP_STAGE) { /* immediate data */ if (average > 8) average = 8; td->td_trb[0].qwTrb0 = 0; usbd_copy_out(temp->pc, temp->offset + buf_offset, (uint8_t *)(uintptr_t)&td->td_trb[0].qwTrb0, average); dword = XHCI_TRB_2_BYTES_SET(8) | XHCI_TRB_2_TDSZ_SET(0) | XHCI_TRB_2_IRQ_SET(0); td->td_trb[0].dwTrb2 = htole32(dword); dword = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | XHCI_TRB_3_IDT_BIT | XHCI_TRB_3_CYCLE_BIT; /* check wLength */ if (td->td_trb[0].qwTrb0 & htole64(XHCI_TRB_0_WLENGTH_MASK)) { if (td->td_trb[0].qwTrb0 & htole64(XHCI_TRB_0_DIR_IN_MASK)) dword |= XHCI_TRB_3_TRT_IN; else dword |= XHCI_TRB_3_TRT_OUT; } td->td_trb[0].dwTrb3 = htole32(dword); #ifdef USB_DEBUG xhci_dump_trb(&td->td_trb[x]); #endif x++; } else do { uint32_t npkt; /* fill out buffer pointers */ if (average == 0) { memset(&buf_res, 0, sizeof(buf_res)); } else { usbd_get_page(temp->pc, temp->offset + buf_offset, &buf_res); /* get length to end of page */ if (buf_res.length > average) buf_res.length = average; /* check for maximum length */ if (buf_res.length > XHCI_TD_PAGE_SIZE) buf_res.length = XHCI_TD_PAGE_SIZE; npkt_off += buf_res.length; } /* set up npkt */ - npkt = (len_old - npkt_off + temp->max_packet_size - 1) / - temp->max_packet_size; + npkt = howmany(len_old - npkt_off, + temp->max_packet_size); if (npkt == 0) npkt = 1; else if (npkt > 31) npkt = 31; /* fill out TRB's */ td->td_trb[x].qwTrb0 = htole64((uint64_t)buf_res.physaddr); dword = XHCI_TRB_2_BYTES_SET(buf_res.length) | XHCI_TRB_2_TDSZ_SET(npkt) | XHCI_TRB_2_IRQ_SET(0); td->td_trb[x].dwTrb2 = htole32(dword); switch (temp->trb_type) { case XHCI_TRB_TYPE_ISOCH: dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TBC_SET(temp->tbc) | XHCI_TRB_3_TLBPC_SET(temp->tlbpc); if (td != td_first) { dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL); } else if (temp->do_isoc_sync != 0) { temp->do_isoc_sync = 0; /* wait until "isoc_frame" */ dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | XHCI_TRB_3_FRID_SET(temp->isoc_frame / 8); } else { /* start data transfer at next interval */ dword |= XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | XHCI_TRB_3_ISO_SIA_BIT; } if (temp->direction == UE_DIR_IN) dword |= XHCI_TRB_3_ISP_BIT; break; case XHCI_TRB_TYPE_DATA_STAGE: dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE); if (temp->direction == UE_DIR_IN) dword |= XHCI_TRB_3_DIR_IN | XHCI_TRB_3_ISP_BIT; /* * Section 3.2.9 in the XHCI * specification about control * transfers says that we should use a * normal-TRB if there are more TRBs * extending the data-stage * TRB. Update the "trb_type". */ temp->trb_type = XHCI_TRB_TYPE_NORMAL; break; case XHCI_TRB_TYPE_STATUS_STAGE: dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE); if (temp->direction == UE_DIR_IN) dword |= XHCI_TRB_3_DIR_IN; break; default: /* XHCI_TRB_TYPE_NORMAL */ dword = XHCI_TRB_3_CHAIN_BIT | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL); if (temp->direction == UE_DIR_IN) dword |= XHCI_TRB_3_ISP_BIT; break; } td->td_trb[x].dwTrb3 = htole32(dword); average -= buf_res.length; buf_offset += buf_res.length; #ifdef USB_DEBUG xhci_dump_trb(&td->td_trb[x]); #endif x++; } while (average != 0); td->td_trb[x-1].dwTrb3 |= htole32(XHCI_TRB_3_IOC_BIT); /* store number of data TRB's */ td->ntrb = x; DPRINTF("NTRB=%u\n", x); /* fill out link TRB */ if (td_next != NULL) { /* link the current TD with the next one */ td->td_trb[x].qwTrb0 = htole64((uint64_t)td_next->td_self); DPRINTF("LINK=0x%08llx\n", (long long)td_next->td_self); } else { /* this field will get updated later */ DPRINTF("NOLINK\n"); } dword = XHCI_TRB_2_IRQ_SET(0); td->td_trb[x].dwTrb2 = htole32(dword); dword = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_IOC_BIT | /* * CHAIN-BIT: Ensure that a multi-TRB IN-endpoint * frame only receives a single short packet event * by setting the CHAIN bit in the LINK field. In * addition some XHCI controllers have problems * sending a ZLP unless the CHAIN-BIT is set in * the LINK TRB. */ XHCI_TRB_3_CHAIN_BIT; td->td_trb[x].dwTrb3 = htole32(dword); td->alt_next = td_alt_next; #ifdef USB_DEBUG xhci_dump_trb(&td->td_trb[x]); #endif usb_pc_cpu_flush(td->page_cache); } if (precompute) { precompute = 0; /* set up alt next pointer, if any */ if (temp->last_frame) { td_alt_next = NULL; } else { /* we use this field internally */ td_alt_next = td_next; } /* restore */ temp->shortpkt = shortpkt_old; temp->len = len_old; goto restart; } /* * Remove cycle bit from the first TRB if we are * stepping them: */ if (temp->step_td != 0) { td_first->td_trb[0].dwTrb3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); usb_pc_cpu_flush(td_first->page_cache); } /* clear TD SIZE to zero, hence this is the last TRB */ /* remove chain bit because this is the last data TRB in the chain */ td->td_trb[td->ntrb - 1].dwTrb2 &= ~htole32(XHCI_TRB_2_TDSZ_SET(15)); td->td_trb[td->ntrb - 1].dwTrb3 &= ~htole32(XHCI_TRB_3_CHAIN_BIT); /* remove CHAIN-BIT from last LINK TRB */ td->td_trb[td->ntrb].dwTrb3 &= ~htole32(XHCI_TRB_3_CHAIN_BIT); usb_pc_cpu_flush(td->page_cache); temp->td = td; temp->td_next = td_next; } static void xhci_setup_generic_chain(struct usb_xfer *xfer) { struct xhci_std_temp temp; struct xhci_td *td; uint32_t x; uint32_t y; uint8_t mult; temp.do_isoc_sync = 0; temp.step_td = 0; temp.tbc = 0; temp.tlbpc = 0; temp.average = xfer->max_hc_frame_size; temp.max_packet_size = xfer->max_packet_size; temp.sc = XHCI_BUS2SC(xfer->xroot->bus); temp.pc = NULL; temp.last_frame = 0; temp.offset = 0; temp.multishort = xfer->flags_int.isochronous_xfr || xfer->flags_int.control_xfr || xfer->flags_int.short_frames_ok; /* toggle the DMA set we are using */ xfer->flags_int.curr_dma_set ^= 1; /* get next DMA set */ td = xfer->td_start[xfer->flags_int.curr_dma_set]; temp.td = NULL; temp.td_next = td; xfer->td_transfer_first = td; xfer->td_transfer_cache = td; if (xfer->flags_int.isochronous_xfr) { uint8_t shift; /* compute multiplier for ISOCHRONOUS transfers */ mult = xfer->endpoint->ecomp ? UE_GET_SS_ISO_MULT(xfer->endpoint->ecomp->bmAttributes) : 0; /* check for USB 2.0 multiplier */ if (mult == 0) { mult = (xfer->endpoint->edesc-> wMaxPacketSize[1] >> 3) & 3; } /* range check */ if (mult > 2) mult = 3; else mult++; x = XREAD4(temp.sc, runt, XHCI_MFINDEX); DPRINTF("MFINDEX=0x%08x\n", x); switch (usbd_get_speed(xfer->xroot->udev)) { case USB_SPEED_FULL: shift = 3; temp.isoc_delta = 8; /* 1ms */ x += temp.isoc_delta - 1; x &= ~(temp.isoc_delta - 1); break; default: shift = usbd_xfer_get_fps_shift(xfer); temp.isoc_delta = 1U << shift; x += temp.isoc_delta - 1; x &= ~(temp.isoc_delta - 1); /* simple frame load balancing */ x += xfer->endpoint->usb_uframe; break; } y = XHCI_MFINDEX_GET(x - xfer->endpoint->isoc_next); if ((xfer->endpoint->is_synced == 0) || (y < (xfer->nframes << shift)) || (XHCI_MFINDEX_GET(-y) >= (128 * 8))) { /* * If there is data underflow or the pipe * queue is empty we schedule the transfer a * few frames ahead of the current frame * position. Else two isochronous transfers * might overlap. */ xfer->endpoint->isoc_next = XHCI_MFINDEX_GET(x + (3 * 8)); xfer->endpoint->is_synced = 1; temp.do_isoc_sync = 1; DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next); } /* compute isochronous completion time */ y = XHCI_MFINDEX_GET(xfer->endpoint->isoc_next - (x & ~7)); xfer->isoc_time_complete = usb_isoc_time_expand(&temp.sc->sc_bus, x / 8) + (y / 8) + (((xfer->nframes << shift) + 7) / 8); x = 0; temp.isoc_frame = xfer->endpoint->isoc_next; temp.trb_type = XHCI_TRB_TYPE_ISOCH; xfer->endpoint->isoc_next += xfer->nframes << shift; } else if (xfer->flags_int.control_xfr) { /* check if we should prepend a setup message */ if (xfer->flags_int.control_hdr) { temp.len = xfer->frlengths[0]; temp.pc = xfer->frbuffers + 0; temp.shortpkt = temp.len ? 1 : 0; temp.trb_type = XHCI_TRB_TYPE_SETUP_STAGE; temp.direction = 0; /* check for last frame */ if (xfer->nframes == 1) { /* no STATUS stage yet, SETUP is last */ if (xfer->flags_int.control_act) temp.last_frame = 1; } xhci_setup_generic_chain_sub(&temp); } x = 1; mult = 1; temp.isoc_delta = 0; temp.isoc_frame = 0; temp.trb_type = xfer->flags_int.control_did_data ? XHCI_TRB_TYPE_NORMAL : XHCI_TRB_TYPE_DATA_STAGE; } else { x = 0; mult = 1; temp.isoc_delta = 0; temp.isoc_frame = 0; temp.trb_type = XHCI_TRB_TYPE_NORMAL; } if (x != xfer->nframes) { /* set up page_cache pointer */ temp.pc = xfer->frbuffers + x; /* set endpoint direction */ temp.direction = UE_GET_DIR(xfer->endpointno); } while (x != xfer->nframes) { /* DATA0 / DATA1 message */ temp.len = xfer->frlengths[x]; temp.step_td = ((xfer->endpointno & UE_DIR_IN) && x != 0 && temp.multishort == 0); x++; if (x == xfer->nframes) { if (xfer->flags_int.control_xfr) { /* no STATUS stage yet, DATA is last */ if (xfer->flags_int.control_act) temp.last_frame = 1; } else { temp.last_frame = 1; } } if (temp.len == 0) { /* make sure that we send an USB packet */ temp.shortpkt = 0; temp.tbc = 0; temp.tlbpc = mult - 1; } else if (xfer->flags_int.isochronous_xfr) { uint8_t tdpc; /* * Isochronous transfers don't have short * packet termination: */ temp.shortpkt = 1; /* isochronous transfers have a transfer limit */ if (temp.len > xfer->max_frame_size) temp.len = xfer->max_frame_size; /* compute TD packet count */ - tdpc = (temp.len + xfer->max_packet_size - 1) / - xfer->max_packet_size; + tdpc = howmany(temp.len, xfer->max_packet_size); - temp.tbc = ((tdpc + mult - 1) / mult) - 1; + temp.tbc = howmany(tdpc, mult) - 1; temp.tlbpc = (tdpc % mult); if (temp.tlbpc == 0) temp.tlbpc = mult - 1; else temp.tlbpc--; } else { /* regular data transfer */ temp.shortpkt = xfer->flags.force_short_xfer ? 0 : 1; } xhci_setup_generic_chain_sub(&temp); if (xfer->flags_int.isochronous_xfr) { temp.offset += xfer->frlengths[x - 1]; temp.isoc_frame += temp.isoc_delta; } else { /* get next Page Cache pointer */ temp.pc = xfer->frbuffers + x; } } /* check if we should append a status stage */ if (xfer->flags_int.control_xfr && !xfer->flags_int.control_act) { /* * Send a DATA1 message and invert the current * endpoint direction. */ temp.step_td = (xfer->nframes != 0); temp.direction = UE_GET_DIR(xfer->endpointno) ^ UE_DIR_IN; temp.len = 0; temp.pc = NULL; temp.shortpkt = 0; temp.last_frame = 1; temp.trb_type = XHCI_TRB_TYPE_STATUS_STAGE; xhci_setup_generic_chain_sub(&temp); } td = temp.td; /* must have at least one frame! */ xfer->td_transfer_last = td; DPRINTF("first=%p last=%p\n", xfer->td_transfer_first, td); } static void xhci_set_slot_pointer(struct xhci_softc *sc, uint8_t index, uint64_t dev_addr) { struct usb_page_search buf_res; struct xhci_dev_ctx_addr *pdctxa; usbd_get_page(&sc->sc_hw.ctx_pc, 0, &buf_res); pdctxa = buf_res.buffer; DPRINTF("addr[%u]=0x%016llx\n", index, (long long)dev_addr); pdctxa->qwBaaDevCtxAddr[index] = htole64(dev_addr); usb_pc_cpu_flush(&sc->sc_hw.ctx_pc); } static usb_error_t xhci_configure_mask(struct usb_device *udev, uint32_t mask, uint8_t drop) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct usb_page_search buf_inp; struct xhci_input_dev_ctx *pinp; uint32_t temp; uint8_t index; uint8_t x; index = udev->controller_slot_id; usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); pinp = buf_inp.buffer; if (drop) { mask &= XHCI_INCTX_NON_CTRL_MASK; xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx0, mask); xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx1, 0); } else { /* * Some hardware requires that we drop the endpoint * context before adding it again: */ xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx0, mask & XHCI_INCTX_NON_CTRL_MASK); /* Add new endpoint context */ xhci_ctx_set_le32(sc, &pinp->ctx_input.dwInCtx1, mask); /* find most significant set bit */ for (x = 31; x != 1; x--) { if (mask & (1 << x)) break; } /* adjust */ x--; /* figure out the maximum number of contexts */ if (x > sc->sc_hw.devs[index].context_num) sc->sc_hw.devs[index].context_num = x; else x = sc->sc_hw.devs[index].context_num; /* update number of contexts */ temp = xhci_ctx_get_le32(sc, &pinp->ctx_slot.dwSctx0); temp &= ~XHCI_SCTX_0_CTX_NUM_SET(31); temp |= XHCI_SCTX_0_CTX_NUM_SET(x + 1); xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp); } usb_pc_cpu_flush(&sc->sc_hw.devs[index].input_pc); return (0); } static usb_error_t xhci_configure_endpoint(struct usb_device *udev, struct usb_endpoint_descriptor *edesc, struct xhci_endpoint_ext *pepext, uint16_t interval, uint8_t max_packet_count, uint8_t mult, uint8_t fps_shift, uint16_t max_packet_size, uint16_t max_frame_size, uint8_t ep_mode) { struct usb_page_search buf_inp; struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct xhci_input_dev_ctx *pinp; uint64_t ring_addr = pepext->physaddr; uint32_t temp; uint8_t index; uint8_t epno; uint8_t type; index = udev->controller_slot_id; usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); pinp = buf_inp.buffer; epno = edesc->bEndpointAddress; type = edesc->bmAttributes & UE_XFERTYPE; if (type == UE_CONTROL) epno |= UE_DIR_IN; epno = XHCI_EPNO2EPID(epno); if (epno == 0) return (USB_ERR_NO_PIPE); /* invalid */ if (max_packet_count == 0) return (USB_ERR_BAD_BUFSIZE); max_packet_count--; if (mult == 0) return (USB_ERR_BAD_BUFSIZE); /* store endpoint mode */ pepext->trb_ep_mode = ep_mode; /* store bMaxPacketSize for control endpoints */ pepext->trb_ep_maxp = edesc->wMaxPacketSize[0]; usb_pc_cpu_flush(pepext->page_cache); if (ep_mode == USB_EP_MODE_STREAMS) { temp = XHCI_EPCTX_0_EPSTATE_SET(0) | XHCI_EPCTX_0_MAXP_STREAMS_SET(XHCI_MAX_STREAMS_LOG - 1) | XHCI_EPCTX_0_LSA_SET(1); ring_addr += sizeof(struct xhci_trb) * XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS; } else { temp = XHCI_EPCTX_0_EPSTATE_SET(0) | XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | XHCI_EPCTX_0_LSA_SET(0); ring_addr |= XHCI_EPCTX_2_DCS_SET(1); } switch (udev->speed) { case USB_SPEED_FULL: case USB_SPEED_LOW: /* 1ms -> 125us */ fps_shift += 3; break; default: break; } switch (type) { case UE_INTERRUPT: if (fps_shift > 3) fps_shift--; temp |= XHCI_EPCTX_0_IVAL_SET(fps_shift); break; case UE_ISOCHRONOUS: temp |= XHCI_EPCTX_0_IVAL_SET(fps_shift); switch (udev->speed) { case USB_SPEED_SUPER: if (mult > 3) mult = 3; temp |= XHCI_EPCTX_0_MULT_SET(mult - 1); max_packet_count /= mult; break; default: break; } break; default: break; } xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx0, temp); temp = XHCI_EPCTX_1_HID_SET(0) | XHCI_EPCTX_1_MAXB_SET(max_packet_count) | XHCI_EPCTX_1_MAXP_SIZE_SET(max_packet_size); /* * Always enable the "three strikes and you are gone" feature * except for ISOCHRONOUS endpoints. This is suggested by * section 4.3.3 in the XHCI specification about device slot * initialisation. */ if (type != UE_ISOCHRONOUS) temp |= XHCI_EPCTX_1_CERR_SET(3); switch (type) { case UE_CONTROL: temp |= XHCI_EPCTX_1_EPTYPE_SET(4); break; case UE_ISOCHRONOUS: temp |= XHCI_EPCTX_1_EPTYPE_SET(1); break; case UE_BULK: temp |= XHCI_EPCTX_1_EPTYPE_SET(2); break; default: temp |= XHCI_EPCTX_1_EPTYPE_SET(3); break; } /* check for IN direction */ if (epno & 1) temp |= XHCI_EPCTX_1_EPTYPE_SET(4); xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx1, temp); xhci_ctx_set_le64(sc, &pinp->ctx_ep[epno - 1].qwEpCtx2, ring_addr); switch (edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: case UE_ISOCHRONOUS: temp = XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(max_frame_size) | XHCI_EPCTX_4_AVG_TRB_LEN_SET(MIN(XHCI_PAGE_SIZE, max_frame_size)); break; case UE_CONTROL: temp = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); break; default: temp = XHCI_EPCTX_4_AVG_TRB_LEN_SET(XHCI_PAGE_SIZE); break; } xhci_ctx_set_le32(sc, &pinp->ctx_ep[epno - 1].dwEpCtx4, temp); #ifdef USB_DEBUG xhci_dump_endpoint(sc, &pinp->ctx_ep[epno - 1]); #endif usb_pc_cpu_flush(&sc->sc_hw.devs[index].input_pc); return (0); /* success */ } static usb_error_t xhci_configure_endpoint_by_xfer(struct usb_xfer *xfer) { struct xhci_endpoint_ext *pepext; struct usb_endpoint_ss_comp_descriptor *ecomp; usb_stream_t x; pepext = xhci_get_endpoint_ext(xfer->xroot->udev, xfer->endpoint->edesc); ecomp = xfer->endpoint->ecomp; for (x = 0; x != XHCI_MAX_STREAMS; x++) { uint64_t temp; /* halt any transfers */ pepext->trb[x * XHCI_MAX_TRANSFERS].dwTrb3 = 0; /* compute start of TRB ring for stream "x" */ temp = pepext->physaddr + (x * XHCI_MAX_TRANSFERS * sizeof(struct xhci_trb)) + XHCI_SCTX_0_SCT_SEC_TR_RING; /* make tree structure */ pepext->trb[(XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS) + x].qwTrb0 = htole64(temp); /* reserved fields */ pepext->trb[(XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS) + x].dwTrb2 = 0; pepext->trb[(XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS) + x].dwTrb3 = 0; } usb_pc_cpu_flush(pepext->page_cache); return (xhci_configure_endpoint(xfer->xroot->udev, xfer->endpoint->edesc, pepext, xfer->interval, xfer->max_packet_count, (ecomp != NULL) ? UE_GET_SS_ISO_MULT(ecomp->bmAttributes) + 1 : 1, usbd_xfer_get_fps_shift(xfer), xfer->max_packet_size, xfer->max_frame_size, xfer->endpoint->ep_mode)); } static usb_error_t xhci_configure_device(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct usb_page_search buf_inp; struct usb_page_cache *pcinp; struct xhci_input_dev_ctx *pinp; struct usb_device *hubdev; uint32_t temp; uint32_t route; uint32_t rh_port; uint8_t is_hub; uint8_t index; uint8_t depth; index = udev->controller_slot_id; DPRINTF("index=%u\n", index); pcinp = &sc->sc_hw.devs[index].input_pc; usbd_get_page(pcinp, 0, &buf_inp); pinp = buf_inp.buffer; rh_port = 0; route = 0; /* figure out route string and root HUB port number */ for (hubdev = udev; hubdev != NULL; hubdev = hubdev->parent_hub) { if (hubdev->parent_hub == NULL) break; depth = hubdev->parent_hub->depth; /* * NOTE: HS/FS/LS devices and the SS root HUB can have * more than 15 ports */ rh_port = hubdev->port_no; if (depth == 0) break; if (rh_port > 15) rh_port = 15; if (depth < 6) route |= rh_port << (4 * (depth - 1)); } DPRINTF("Route=0x%08x\n", route); temp = XHCI_SCTX_0_ROUTE_SET(route) | XHCI_SCTX_0_CTX_NUM_SET( sc->sc_hw.devs[index].context_num + 1); switch (udev->speed) { case USB_SPEED_LOW: temp |= XHCI_SCTX_0_SPEED_SET(2); if (udev->parent_hs_hub != NULL && udev->parent_hs_hub->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) { DPRINTF("Device inherits MTT\n"); temp |= XHCI_SCTX_0_MTT_SET(1); } break; case USB_SPEED_HIGH: temp |= XHCI_SCTX_0_SPEED_SET(3); if (sc->sc_hw.devs[index].nports != 0 && udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) { DPRINTF("HUB supports MTT\n"); temp |= XHCI_SCTX_0_MTT_SET(1); } break; case USB_SPEED_FULL: temp |= XHCI_SCTX_0_SPEED_SET(1); if (udev->parent_hs_hub != NULL && udev->parent_hs_hub->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) { DPRINTF("Device inherits MTT\n"); temp |= XHCI_SCTX_0_MTT_SET(1); } break; default: temp |= XHCI_SCTX_0_SPEED_SET(4); break; } is_hub = sc->sc_hw.devs[index].nports != 0 && (udev->speed == USB_SPEED_SUPER || udev->speed == USB_SPEED_HIGH); if (is_hub) temp |= XHCI_SCTX_0_HUB_SET(1); xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx0, temp); temp = XHCI_SCTX_1_RH_PORT_SET(rh_port); if (is_hub) { temp |= XHCI_SCTX_1_NUM_PORTS_SET( sc->sc_hw.devs[index].nports); } switch (udev->speed) { case USB_SPEED_SUPER: switch (sc->sc_hw.devs[index].state) { case XHCI_ST_ADDRESSED: case XHCI_ST_CONFIGURED: /* enable power save */ temp |= XHCI_SCTX_1_MAX_EL_SET(sc->sc_exit_lat_max); break; default: /* disable power save */ break; } break; default: break; } xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx1, temp); temp = XHCI_SCTX_2_IRQ_TARGET_SET(0); if (is_hub) { temp |= XHCI_SCTX_2_TT_THINK_TIME_SET( sc->sc_hw.devs[index].tt); } hubdev = udev->parent_hs_hub; /* check if we should activate the transaction translator */ switch (udev->speed) { case USB_SPEED_FULL: case USB_SPEED_LOW: if (hubdev != NULL) { temp |= XHCI_SCTX_2_TT_HUB_SID_SET( hubdev->controller_slot_id); temp |= XHCI_SCTX_2_TT_PORT_NUM_SET( udev->hs_port_no); } break; default: break; } xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx2, temp); /* * These fields should be initialized to zero, according to * XHCI section 6.2.2 - slot context: */ temp = XHCI_SCTX_3_DEV_ADDR_SET(0) | XHCI_SCTX_3_SLOT_STATE_SET(0); xhci_ctx_set_le32(sc, &pinp->ctx_slot.dwSctx3, temp); #ifdef USB_DEBUG xhci_dump_device(sc, &pinp->ctx_slot); #endif usb_pc_cpu_flush(pcinp); return (0); /* success */ } static usb_error_t xhci_alloc_device_ext(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct usb_page_search buf_dev; struct usb_page_search buf_ep; struct xhci_trb *trb; struct usb_page_cache *pc; struct usb_page *pg; uint64_t addr; uint8_t index; uint8_t i; index = udev->controller_slot_id; pc = &sc->sc_hw.devs[index].device_pc; pg = &sc->sc_hw.devs[index].device_pg; /* need to initialize the page cache */ pc->tag_parent = sc->sc_bus.dma_parent_tag; if (usb_pc_alloc_mem(pc, pg, sc->sc_ctx_is_64_byte ? (2 * sizeof(struct xhci_dev_ctx)) : sizeof(struct xhci_dev_ctx), XHCI_PAGE_SIZE)) goto error; usbd_get_page(pc, 0, &buf_dev); pc = &sc->sc_hw.devs[index].input_pc; pg = &sc->sc_hw.devs[index].input_pg; /* need to initialize the page cache */ pc->tag_parent = sc->sc_bus.dma_parent_tag; if (usb_pc_alloc_mem(pc, pg, sc->sc_ctx_is_64_byte ? (2 * sizeof(struct xhci_input_dev_ctx)) : sizeof(struct xhci_input_dev_ctx), XHCI_PAGE_SIZE)) { goto error; } /* initialize all endpoint LINK TRBs */ for (i = 0; i != XHCI_MAX_ENDPOINTS; i++) { pc = &sc->sc_hw.devs[index].endpoint_pc[i]; pg = &sc->sc_hw.devs[index].endpoint_pg[i]; /* need to initialize the page cache */ pc->tag_parent = sc->sc_bus.dma_parent_tag; if (usb_pc_alloc_mem(pc, pg, sizeof(struct xhci_dev_endpoint_trbs), XHCI_TRB_ALIGN)) { goto error; } /* lookup endpoint TRB ring */ usbd_get_page(pc, 0, &buf_ep); /* get TRB pointer */ trb = buf_ep.buffer; trb += XHCI_MAX_TRANSFERS - 1; /* get TRB start address */ addr = buf_ep.physaddr; /* create LINK TRB */ trb->qwTrb0 = htole64(addr); trb->dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); trb->dwTrb3 = htole32(XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); usb_pc_cpu_flush(pc); } xhci_set_slot_pointer(sc, index, buf_dev.physaddr); return (0); error: xhci_free_device_ext(udev); return (USB_ERR_NOMEM); } static void xhci_free_device_ext(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); uint8_t index; uint8_t i; index = udev->controller_slot_id; xhci_set_slot_pointer(sc, index, 0); usb_pc_free_mem(&sc->sc_hw.devs[index].device_pc); usb_pc_free_mem(&sc->sc_hw.devs[index].input_pc); for (i = 0; i != XHCI_MAX_ENDPOINTS; i++) usb_pc_free_mem(&sc->sc_hw.devs[index].endpoint_pc[i]); } static struct xhci_endpoint_ext * xhci_get_endpoint_ext(struct usb_device *udev, struct usb_endpoint_descriptor *edesc) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct xhci_endpoint_ext *pepext; struct usb_page_cache *pc; struct usb_page_search buf_ep; uint8_t epno; uint8_t index; epno = edesc->bEndpointAddress; if ((edesc->bmAttributes & UE_XFERTYPE) == UE_CONTROL) epno |= UE_DIR_IN; epno = XHCI_EPNO2EPID(epno); index = udev->controller_slot_id; pc = &sc->sc_hw.devs[index].endpoint_pc[epno]; usbd_get_page(pc, 0, &buf_ep); pepext = &sc->sc_hw.devs[index].endp[epno]; pepext->page_cache = pc; pepext->trb = buf_ep.buffer; pepext->physaddr = buf_ep.physaddr; return (pepext); } static void xhci_endpoint_doorbell(struct usb_xfer *xfer) { struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); uint8_t epno; uint8_t index; epno = xfer->endpointno; if (xfer->flags_int.control_xfr) epno |= UE_DIR_IN; epno = XHCI_EPNO2EPID(epno); index = xfer->xroot->udev->controller_slot_id; if (xfer->xroot->udev->flags.self_suspended == 0) { XWRITE4(sc, door, XHCI_DOORBELL(index), epno | XHCI_DB_SID_SET(xfer->stream_id)); } } static void xhci_transfer_remove(struct usb_xfer *xfer, usb_error_t error) { struct xhci_endpoint_ext *pepext; if (xfer->flags_int.bandwidth_reclaimed) { xfer->flags_int.bandwidth_reclaimed = 0; pepext = xhci_get_endpoint_ext(xfer->xroot->udev, xfer->endpoint->edesc); pepext->trb_used[xfer->stream_id]--; pepext->xfer[xfer->qh_pos] = NULL; if (error && pepext->trb_running != 0) { pepext->trb_halted = 1; pepext->trb_running = 0; } } } static usb_error_t xhci_transfer_insert(struct usb_xfer *xfer) { struct xhci_td *td_first; struct xhci_td *td_last; struct xhci_trb *trb_link; struct xhci_endpoint_ext *pepext; uint64_t addr; usb_stream_t id; uint8_t i; uint8_t inext; uint8_t trb_limit; DPRINTFN(8, "\n"); id = xfer->stream_id; /* check if already inserted */ if (xfer->flags_int.bandwidth_reclaimed) { DPRINTFN(8, "Already in schedule\n"); return (0); } pepext = xhci_get_endpoint_ext(xfer->xroot->udev, xfer->endpoint->edesc); td_first = xfer->td_transfer_first; td_last = xfer->td_transfer_last; addr = pepext->physaddr; switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) { case UE_CONTROL: case UE_INTERRUPT: /* single buffered */ trb_limit = 1; break; default: /* multi buffered */ trb_limit = (XHCI_MAX_TRANSFERS - 2); break; } if (pepext->trb_used[id] >= trb_limit) { DPRINTFN(8, "Too many TDs queued.\n"); return (USB_ERR_NOMEM); } /* check if bMaxPacketSize changed */ if (xfer->flags_int.control_xfr != 0 && pepext->trb_ep_maxp != xfer->endpoint->edesc->wMaxPacketSize[0]) { DPRINTFN(8, "Reconfigure control endpoint\n"); /* force driver to reconfigure endpoint */ pepext->trb_halted = 1; pepext->trb_running = 0; } /* check for stopped condition, after putting transfer on interrupt queue */ if (pepext->trb_running == 0) { struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); DPRINTFN(8, "Not running\n"); /* start configuration */ (void)usb_proc_msignal(USB_BUS_CONTROL_XFER_PROC(&sc->sc_bus), &sc->sc_config_msg[0], &sc->sc_config_msg[1]); return (0); } pepext->trb_used[id]++; /* get current TRB index */ i = pepext->trb_index[id]; /* get next TRB index */ inext = (i + 1); /* the last entry of the ring is a hardcoded link TRB */ if (inext >= (XHCI_MAX_TRANSFERS - 1)) inext = 0; /* store next TRB index, before stream ID offset is added */ pepext->trb_index[id] = inext; /* offset for stream */ i += id * XHCI_MAX_TRANSFERS; inext += id * XHCI_MAX_TRANSFERS; /* compute terminating return address */ addr += (inext * sizeof(struct xhci_trb)); /* compute link TRB pointer */ trb_link = td_last->td_trb + td_last->ntrb; /* update next pointer of last link TRB */ trb_link->qwTrb0 = htole64(addr); trb_link->dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); trb_link->dwTrb3 = htole32(XHCI_TRB_3_IOC_BIT | XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); #ifdef USB_DEBUG xhci_dump_trb(&td_last->td_trb[td_last->ntrb]); #endif usb_pc_cpu_flush(td_last->page_cache); /* write ahead chain end marker */ pepext->trb[inext].qwTrb0 = 0; pepext->trb[inext].dwTrb2 = 0; pepext->trb[inext].dwTrb3 = 0; /* update next pointer of link TRB */ pepext->trb[i].qwTrb0 = htole64((uint64_t)td_first->td_self); pepext->trb[i].dwTrb2 = htole32(XHCI_TRB_2_IRQ_SET(0)); #ifdef USB_DEBUG xhci_dump_trb(&pepext->trb[i]); #endif usb_pc_cpu_flush(pepext->page_cache); /* toggle cycle bit which activates the transfer chain */ pepext->trb[i].dwTrb3 = htole32(XHCI_TRB_3_CYCLE_BIT | XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK)); usb_pc_cpu_flush(pepext->page_cache); DPRINTF("qh_pos = %u\n", i); pepext->xfer[i] = xfer; xfer->qh_pos = i; xfer->flags_int.bandwidth_reclaimed = 1; xhci_endpoint_doorbell(xfer); return (0); } static void xhci_root_intr(struct xhci_softc *sc) { uint16_t i; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* clear any old interrupt data */ memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata)); for (i = 1; i <= sc->sc_noport; i++) { /* pick out CHANGE bits from the status register */ if (XREAD4(sc, oper, XHCI_PORTSC(i)) & ( XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_OCC | XHCI_PS_WRC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC)) { sc->sc_hub_idata[i / 8] |= 1 << (i % 8); DPRINTF("port %d changed\n", i); } } uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata, sizeof(sc->sc_hub_idata)); } /*------------------------------------------------------------------------* * xhci_device_done - XHCI done handler * * NOTE: This function can be called two times in a row on * the same USB transfer. From close and from interrupt. *------------------------------------------------------------------------*/ static void xhci_device_done(struct usb_xfer *xfer, usb_error_t error) { DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n", xfer, xfer->endpoint, error); /* remove transfer from HW queue */ xhci_transfer_remove(xfer, error); /* dequeue transfer and start next transfer */ usbd_transfer_done(xfer, error); } /*------------------------------------------------------------------------* * XHCI data transfer support (generic type) *------------------------------------------------------------------------*/ static void xhci_device_generic_open(struct usb_xfer *xfer) { if (xfer->flags_int.isochronous_xfr) { switch (xfer->xroot->udev->speed) { case USB_SPEED_FULL: break; default: usb_hs_bandwidth_alloc(xfer); break; } } } static void xhci_device_generic_close(struct usb_xfer *xfer) { DPRINTF("\n"); xhci_device_done(xfer, USB_ERR_CANCELLED); if (xfer->flags_int.isochronous_xfr) { switch (xfer->xroot->udev->speed) { case USB_SPEED_FULL: break; default: usb_hs_bandwidth_free(xfer); break; } } } static void xhci_device_generic_multi_enter(struct usb_endpoint *ep, usb_stream_t stream_id, struct usb_xfer *enter_xfer) { struct usb_xfer *xfer; /* check if there is a current transfer */ xfer = ep->endpoint_q[stream_id].curr; if (xfer == NULL) return; /* * Check if the current transfer is started and then pickup * the next one, if any. Else wait for next start event due to * block on failure feature. */ if (!xfer->flags_int.bandwidth_reclaimed) return; xfer = TAILQ_FIRST(&ep->endpoint_q[stream_id].head); if (xfer == NULL) { /* * In case of enter we have to consider that the * transfer is queued by the USB core after the enter * method is called. */ xfer = enter_xfer; if (xfer == NULL) return; } /* try to multi buffer */ xhci_transfer_insert(xfer); } static void xhci_device_generic_enter(struct usb_xfer *xfer) { DPRINTF("\n"); /* set up TD's and QH */ xhci_setup_generic_chain(xfer); xhci_device_generic_multi_enter(xfer->endpoint, xfer->stream_id, xfer); } static void xhci_device_generic_start(struct usb_xfer *xfer) { DPRINTF("\n"); /* try to insert xfer on HW queue */ xhci_transfer_insert(xfer); /* try to multi buffer */ xhci_device_generic_multi_enter(xfer->endpoint, xfer->stream_id, NULL); /* add transfer last on interrupt queue */ usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer); /* start timeout, if any */ if (xfer->timeout != 0) usbd_transfer_timeout_ms(xfer, &xhci_timeout, xfer->timeout); } static const struct usb_pipe_methods xhci_device_generic_methods = { .open = xhci_device_generic_open, .close = xhci_device_generic_close, .enter = xhci_device_generic_enter, .start = xhci_device_generic_start, }; /*------------------------------------------------------------------------* * xhci root HUB support *------------------------------------------------------------------------* * Simulate a hardware HUB by handling all the necessary requests. *------------------------------------------------------------------------*/ #define HSETW(ptr, val) ptr = { (uint8_t)(val), (uint8_t)((val) >> 8) } static const struct usb_device_descriptor xhci_devd = { .bLength = sizeof(xhci_devd), .bDescriptorType = UDESC_DEVICE, /* type */ HSETW(.bcdUSB, 0x0300), /* USB version */ .bDeviceClass = UDCLASS_HUB, /* class */ .bDeviceSubClass = UDSUBCLASS_HUB, /* subclass */ .bDeviceProtocol = UDPROTO_SSHUB, /* protocol */ .bMaxPacketSize = 9, /* max packet size */ HSETW(.idVendor, 0x0000), /* vendor */ HSETW(.idProduct, 0x0000), /* product */ HSETW(.bcdDevice, 0x0100), /* device version */ .iManufacturer = 1, .iProduct = 2, .iSerialNumber = 0, .bNumConfigurations = 1, /* # of configurations */ }; static const struct xhci_bos_desc xhci_bosd = { .bosd = { .bLength = sizeof(xhci_bosd.bosd), .bDescriptorType = UDESC_BOS, HSETW(.wTotalLength, sizeof(xhci_bosd)), .bNumDeviceCaps = 3, }, .usb2extd = { .bLength = sizeof(xhci_bosd.usb2extd), .bDescriptorType = 1, .bDevCapabilityType = 2, .bmAttributes[0] = 2, }, .usbdcd = { .bLength = sizeof(xhci_bosd.usbdcd), .bDescriptorType = UDESC_DEVICE_CAPABILITY, .bDevCapabilityType = 3, .bmAttributes = 0, /* XXX */ HSETW(.wSpeedsSupported, 0x000C), .bFunctionalitySupport = 8, .bU1DevExitLat = 255, /* dummy - not used */ .wU2DevExitLat = { 0x00, 0x08 }, }, .cidd = { .bLength = sizeof(xhci_bosd.cidd), .bDescriptorType = 1, .bDevCapabilityType = 4, .bReserved = 0, .bContainerID = 0, /* XXX */ }, }; static const struct xhci_config_desc xhci_confd = { .confd = { .bLength = sizeof(xhci_confd.confd), .bDescriptorType = UDESC_CONFIG, .wTotalLength[0] = sizeof(xhci_confd), .bNumInterface = 1, .bConfigurationValue = 1, .iConfiguration = 0, .bmAttributes = UC_SELF_POWERED, .bMaxPower = 0 /* max power */ }, .ifcd = { .bLength = sizeof(xhci_confd.ifcd), .bDescriptorType = UDESC_INTERFACE, .bNumEndpoints = 1, .bInterfaceClass = UICLASS_HUB, .bInterfaceSubClass = UISUBCLASS_HUB, .bInterfaceProtocol = 0, }, .endpd = { .bLength = sizeof(xhci_confd.endpd), .bDescriptorType = UDESC_ENDPOINT, .bEndpointAddress = UE_DIR_IN | XHCI_INTR_ENDPT, .bmAttributes = UE_INTERRUPT, .wMaxPacketSize[0] = 2, /* max 15 ports */ .bInterval = 255, }, .endpcd = { .bLength = sizeof(xhci_confd.endpcd), .bDescriptorType = UDESC_ENDPOINT_SS_COMP, .bMaxBurst = 0, .bmAttributes = 0, }, }; static const struct usb_hub_ss_descriptor xhci_hubd = { .bLength = sizeof(xhci_hubd), .bDescriptorType = UDESC_SS_HUB, }; static usb_error_t xhci_roothub_exec(struct usb_device *udev, struct usb_device_request *req, const void **pptr, uint16_t *plength) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); const char *str_ptr; const void *ptr; uint32_t port; uint32_t v; uint16_t len; uint16_t i; uint16_t value; uint16_t index; uint8_t j; usb_error_t err; USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED); /* buffer reset */ ptr = (const void *)&sc->sc_hub_desc; len = 0; err = 0; value = UGETW(req->wValue); index = UGETW(req->wIndex); DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x " "wValue=0x%04x wIndex=0x%04x\n", req->bmRequestType, req->bRequest, UGETW(req->wLength), value, index); #define C(x,y) ((x) | ((y) << 8)) switch (C(req->bRequest, req->bmRequestType)) { case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): /* * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops * for the integrated root hub. */ break; case C(UR_GET_CONFIG, UT_READ_DEVICE): len = 1; sc->sc_hub_desc.temp[0] = sc->sc_conf; break; case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): switch (value >> 8) { case UDESC_DEVICE: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(xhci_devd); ptr = (const void *)&xhci_devd; break; case UDESC_BOS: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(xhci_bosd); ptr = (const void *)&xhci_bosd; break; case UDESC_CONFIG: if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } len = sizeof(xhci_confd); ptr = (const void *)&xhci_confd; break; case UDESC_STRING: switch (value & 0xff) { case 0: /* Language table */ str_ptr = "\001"; break; case 1: /* Vendor */ str_ptr = sc->sc_vendor; break; case 2: /* Product */ str_ptr = "XHCI root HUB"; break; default: str_ptr = ""; break; } len = usb_make_str_desc( sc->sc_hub_desc.temp, sizeof(sc->sc_hub_desc.temp), str_ptr); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_GET_INTERFACE, UT_READ_INTERFACE): len = 1; sc->sc_hub_desc.temp[0] = 0; break; case C(UR_GET_STATUS, UT_READ_DEVICE): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED); break; case C(UR_GET_STATUS, UT_READ_INTERFACE): case C(UR_GET_STATUS, UT_READ_ENDPOINT): len = 2; USETW(sc->sc_hub_desc.stat.wStatus, 0); break; case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): if (value >= XHCI_MAX_DEVICES) { err = USB_ERR_IOERROR; goto done; } break; case C(UR_SET_CONFIG, UT_WRITE_DEVICE): if (value != 0 && value != 1) { err = USB_ERR_IOERROR; goto done; } sc->sc_conf = value; break; case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_DEVICE): case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): err = USB_ERR_IOERROR; goto done; case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): break; case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): break; /* Hub requests */ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): DPRINTFN(9, "UR_CLEAR_PORT_FEATURE\n"); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = XHCI_PORTSC(index); v = XREAD4(sc, oper, port); i = XHCI_PS_PLS_GET(v); v &= ~XHCI_PS_CLEAR; switch (value) { case UHF_C_BH_PORT_RESET: XWRITE4(sc, oper, port, v | XHCI_PS_WRC); break; case UHF_C_PORT_CONFIG_ERROR: XWRITE4(sc, oper, port, v | XHCI_PS_CEC); break; case UHF_C_PORT_SUSPEND: case UHF_C_PORT_LINK_STATE: XWRITE4(sc, oper, port, v | XHCI_PS_PLC); break; case UHF_C_PORT_CONNECTION: XWRITE4(sc, oper, port, v | XHCI_PS_CSC); break; case UHF_C_PORT_ENABLE: XWRITE4(sc, oper, port, v | XHCI_PS_PEC); break; case UHF_C_PORT_OVER_CURRENT: XWRITE4(sc, oper, port, v | XHCI_PS_OCC); break; case UHF_C_PORT_RESET: XWRITE4(sc, oper, port, v | XHCI_PS_PRC); break; case UHF_PORT_ENABLE: XWRITE4(sc, oper, port, v | XHCI_PS_PED); break; case UHF_PORT_POWER: XWRITE4(sc, oper, port, v & ~XHCI_PS_PP); break; case UHF_PORT_INDICATOR: XWRITE4(sc, oper, port, v & ~XHCI_PS_PIC_SET(3)); break; case UHF_PORT_SUSPEND: /* U3 -> U15 */ if (i == 3) { XWRITE4(sc, oper, port, v | XHCI_PS_PLS_SET(0xF) | XHCI_PS_LWS); } /* wait 20ms for resume sequence to complete */ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 50); /* U0 */ XWRITE4(sc, oper, port, v | XHCI_PS_PLS_SET(0) | XHCI_PS_LWS); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): if ((value & 0xff) != 0) { err = USB_ERR_IOERROR; goto done; } v = XREAD4(sc, capa, XHCI_HCSPARAMS0); sc->sc_hub_desc.hubd = xhci_hubd; sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport; if (XHCI_HCS0_PPC(v)) i = UHD_PWR_INDIVIDUAL; else i = UHD_PWR_GANGED; if (XHCI_HCS0_PIND(v)) i |= UHD_PORT_IND; i |= UHD_OC_INDIVIDUAL; USETW(sc->sc_hub_desc.hubd.wHubCharacteristics, i); /* see XHCI section 5.4.9: */ sc->sc_hub_desc.hubd.bPwrOn2PwrGood = 10; for (j = 1; j <= sc->sc_noport; j++) { v = XREAD4(sc, oper, XHCI_PORTSC(j)); if (v & XHCI_PS_DR) { sc->sc_hub_desc.hubd. DeviceRemovable[j / 8] |= 1U << (j % 8); } } len = sc->sc_hub_desc.hubd.bLength; break; case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): len = 16; memset(sc->sc_hub_desc.temp, 0, 16); break; case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): DPRINTFN(9, "UR_GET_STATUS i=%d\n", index); if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } v = XREAD4(sc, oper, XHCI_PORTSC(index)); DPRINTFN(9, "port status=0x%08x\n", v); i = UPS_PORT_LINK_STATE_SET(XHCI_PS_PLS_GET(v)); switch (XHCI_PS_SPEED_GET(v)) { case 3: i |= UPS_HIGH_SPEED; break; case 2: i |= UPS_LOW_SPEED; break; case 1: /* FULL speed */ break; default: i |= UPS_OTHER_SPEED; break; } if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; if (v & XHCI_PS_PR) i |= UPS_RESET; if (v & XHCI_PS_PP) { /* * The USB 3.0 RH is using the * USB 2.0's power bit */ i |= UPS_PORT_POWER; } USETW(sc->sc_hub_desc.ps.wPortStatus, i); i = 0; if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; USETW(sc->sc_hub_desc.ps.wPortChange, i); len = sizeof(sc->sc_hub_desc.ps); break; case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): err = USB_ERR_IOERROR; goto done; case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): break; case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): i = index >> 8; index &= 0x00FF; if ((index < 1) || (index > sc->sc_noport)) { err = USB_ERR_IOERROR; goto done; } port = XHCI_PORTSC(index); v = XREAD4(sc, oper, port) & ~XHCI_PS_CLEAR; switch (value) { case UHF_PORT_U1_TIMEOUT: if (XHCI_PS_SPEED_GET(v) != 4) { err = USB_ERR_IOERROR; goto done; } port = XHCI_PORTPMSC(index); v = XREAD4(sc, oper, port); v &= ~XHCI_PM3_U1TO_SET(0xFF); v |= XHCI_PM3_U1TO_SET(i); XWRITE4(sc, oper, port, v); break; case UHF_PORT_U2_TIMEOUT: if (XHCI_PS_SPEED_GET(v) != 4) { err = USB_ERR_IOERROR; goto done; } port = XHCI_PORTPMSC(index); v = XREAD4(sc, oper, port); v &= ~XHCI_PM3_U2TO_SET(0xFF); v |= XHCI_PM3_U2TO_SET(i); XWRITE4(sc, oper, port, v); break; case UHF_BH_PORT_RESET: XWRITE4(sc, oper, port, v | XHCI_PS_WPR); break; case UHF_PORT_LINK_STATE: XWRITE4(sc, oper, port, v | XHCI_PS_PLS_SET(i) | XHCI_PS_LWS); /* 4ms settle time */ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 250); break; case UHF_PORT_ENABLE: DPRINTFN(3, "set port enable %d\n", index); break; case UHF_PORT_SUSPEND: DPRINTFN(6, "suspend port %u (LPM=%u)\n", index, i); j = XHCI_PS_SPEED_GET(v); if ((j < 1) || (j > 3)) { /* non-supported speed */ err = USB_ERR_IOERROR; goto done; } XWRITE4(sc, oper, port, v | XHCI_PS_PLS_SET(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS); break; case UHF_PORT_RESET: DPRINTFN(6, "reset port %d\n", index); XWRITE4(sc, oper, port, v | XHCI_PS_PR); break; case UHF_PORT_POWER: DPRINTFN(3, "set port power %d\n", index); XWRITE4(sc, oper, port, v | XHCI_PS_PP); break; case UHF_PORT_TEST: DPRINTFN(3, "set port test %d\n", index); break; case UHF_PORT_INDICATOR: DPRINTFN(3, "set port indicator %d\n", index); v &= ~XHCI_PS_PIC_SET(3); v |= XHCI_PS_PIC_SET(1); XWRITE4(sc, oper, port, v); break; default: err = USB_ERR_IOERROR; goto done; } break; case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): break; default: err = USB_ERR_IOERROR; goto done; } done: *plength = len; *pptr = ptr; return (err); } static void xhci_xfer_setup(struct usb_setup_params *parm) { struct usb_page_search page_info; struct usb_page_cache *pc; struct xhci_softc *sc; struct usb_xfer *xfer; void *last_obj; uint32_t ntd; uint32_t n; sc = XHCI_BUS2SC(parm->udev->bus); xfer = parm->curr_xfer; /* * The proof for the "ntd" formula is illustrated like this: * * +------------------------------------+ * | | * | |remainder -> | * | +-----+---+ | * | | xxx | x | frm 0 | * | +-----+---++ | * | | xxx | xx | frm 1 | * | +-----+----+ | * | ... | * +------------------------------------+ * * "xxx" means a completely full USB transfer descriptor * * "x" and "xx" means a short USB packet * * For the remainder of an USB transfer modulo * "max_data_length" we need two USB transfer descriptors. * One to transfer the remaining data and one to finalise with * a zero length packet in case the "force_short_xfer" flag is * set. We only need two USB transfer descriptors in the case * where the transfer length of the first one is a factor of * "max_frame_size". The rest of the needed USB transfer * descriptors is given by the buffer size divided by the * maximum data payload. */ parm->hc_max_packet_size = 0x400; parm->hc_max_packet_count = 16 * 3; parm->hc_max_frame_size = XHCI_TD_PAYLOAD_MAX; xfer->flags_int.bdma_enable = 1; usbd_transfer_setup_sub(parm); if (xfer->flags_int.isochronous_xfr) { ntd = ((1 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); } else if (xfer->flags_int.control_xfr) { ntd = ((2 * xfer->nframes) + 1 /* STATUS */ + (xfer->max_data_length / xfer->max_hc_frame_size)); } else { ntd = ((2 * xfer->nframes) + (xfer->max_data_length / xfer->max_hc_frame_size)); } alloc_dma_set: if (parm->err) return; /* * Allocate queue heads and transfer descriptors */ last_obj = NULL; if (usbd_transfer_setup_sub_malloc( parm, &pc, sizeof(struct xhci_td), XHCI_TD_ALIGN, ntd)) { parm->err = USB_ERR_NOMEM; return; } if (parm->buf) { for (n = 0; n != ntd; n++) { struct xhci_td *td; usbd_get_page(pc + n, 0, &page_info); td = page_info.buffer; /* init TD */ td->td_self = page_info.physaddr; td->obj_next = last_obj; td->page_cache = pc + n; last_obj = td; usb_pc_cpu_flush(pc + n); } } xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj; if (!xfer->flags_int.curr_dma_set) { xfer->flags_int.curr_dma_set = 1; goto alloc_dma_set; } } static usb_error_t xhci_configure_reset_endpoint(struct usb_xfer *xfer) { struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); struct usb_page_search buf_inp; struct usb_device *udev; struct xhci_endpoint_ext *pepext; struct usb_endpoint_descriptor *edesc; struct usb_page_cache *pcinp; usb_error_t err; usb_stream_t stream_id; uint8_t index; uint8_t epno; pepext = xhci_get_endpoint_ext(xfer->xroot->udev, xfer->endpoint->edesc); udev = xfer->xroot->udev; index = udev->controller_slot_id; pcinp = &sc->sc_hw.devs[index].input_pc; usbd_get_page(pcinp, 0, &buf_inp); edesc = xfer->endpoint->edesc; epno = edesc->bEndpointAddress; stream_id = xfer->stream_id; if ((edesc->bmAttributes & UE_XFERTYPE) == UE_CONTROL) epno |= UE_DIR_IN; epno = XHCI_EPNO2EPID(epno); if (epno == 0) return (USB_ERR_NO_PIPE); /* invalid */ XHCI_CMD_LOCK(sc); /* configure endpoint */ err = xhci_configure_endpoint_by_xfer(xfer); if (err != 0) { XHCI_CMD_UNLOCK(sc); return (err); } /* * Get the endpoint into the stopped state according to the * endpoint context state diagram in the XHCI specification: */ err = xhci_cmd_stop_ep(sc, 0, epno, index); if (err != 0) DPRINTF("Could not stop endpoint %u\n", epno); err = xhci_cmd_reset_ep(sc, 0, epno, index); if (err != 0) DPRINTF("Could not reset endpoint %u\n", epno); err = xhci_cmd_set_tr_dequeue_ptr(sc, (pepext->physaddr + (stream_id * sizeof(struct xhci_trb) * XHCI_MAX_TRANSFERS)) | XHCI_EPCTX_2_DCS_SET(1), stream_id, epno, index); if (err != 0) DPRINTF("Could not set dequeue ptr for endpoint %u\n", epno); /* * Get the endpoint into the running state according to the * endpoint context state diagram in the XHCI specification: */ xhci_configure_mask(udev, (1U << epno) | 1U, 0); err = xhci_cmd_evaluate_ctx(sc, buf_inp.physaddr, index); if (err != 0) DPRINTF("Could not configure endpoint %u\n", epno); err = xhci_cmd_configure_ep(sc, buf_inp.physaddr, 0, index); if (err != 0) DPRINTF("Could not configure endpoint %u\n", epno); XHCI_CMD_UNLOCK(sc); return (0); } static void xhci_xfer_unsetup(struct usb_xfer *xfer) { return; } static void xhci_start_dma_delay(struct usb_xfer *xfer) { struct xhci_softc *sc = XHCI_BUS2SC(xfer->xroot->bus); /* put transfer on interrupt queue (again) */ usbd_transfer_enqueue(&sc->sc_bus.intr_q, xfer); (void)usb_proc_msignal(USB_BUS_CONTROL_XFER_PROC(&sc->sc_bus), &sc->sc_config_msg[0], &sc->sc_config_msg[1]); } static void xhci_configure_msg(struct usb_proc_msg *pm) { struct xhci_softc *sc; struct xhci_endpoint_ext *pepext; struct usb_xfer *xfer; sc = XHCI_BUS2SC(((struct usb_bus_msg *)pm)->bus); restart: TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { pepext = xhci_get_endpoint_ext(xfer->xroot->udev, xfer->endpoint->edesc); if ((pepext->trb_halted != 0) || (pepext->trb_running == 0)) { uint16_t i; /* clear halted and running */ pepext->trb_halted = 0; pepext->trb_running = 0; /* nuke remaining buffered transfers */ for (i = 0; i != (XHCI_MAX_TRANSFERS * XHCI_MAX_STREAMS); i++) { /* * NOTE: We need to use the timeout * error code here else existing * isochronous clients can get * confused: */ if (pepext->xfer[i] != NULL) { xhci_device_done(pepext->xfer[i], USB_ERR_TIMEOUT); } } /* * NOTE: The USB transfer cannot vanish in * this state! */ USB_BUS_UNLOCK(&sc->sc_bus); xhci_configure_reset_endpoint(xfer); USB_BUS_LOCK(&sc->sc_bus); /* check if halted is still cleared */ if (pepext->trb_halted == 0) { pepext->trb_running = 1; memset(pepext->trb_index, 0, sizeof(pepext->trb_index)); } goto restart; } if (xfer->flags_int.did_dma_delay) { /* remove transfer from interrupt queue (again) */ usbd_transfer_dequeue(xfer); /* we are finally done */ usb_dma_delay_done_cb(xfer); /* queue changed - restart */ goto restart; } } TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) { /* try to insert xfer on HW queue */ xhci_transfer_insert(xfer); /* try to multi buffer */ xhci_device_generic_multi_enter(xfer->endpoint, xfer->stream_id, NULL); } } static void xhci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc, struct usb_endpoint *ep) { struct xhci_endpoint_ext *pepext; DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d\n", ep, udev->address, edesc->bEndpointAddress, udev->flags.usb_mode); if (udev->parent_hub == NULL) { /* root HUB has special endpoint handling */ return; } ep->methods = &xhci_device_generic_methods; pepext = xhci_get_endpoint_ext(udev, edesc); USB_BUS_LOCK(udev->bus); pepext->trb_halted = 1; pepext->trb_running = 0; USB_BUS_UNLOCK(udev->bus); } static void xhci_ep_uninit(struct usb_device *udev, struct usb_endpoint *ep) { } static void xhci_ep_clear_stall(struct usb_device *udev, struct usb_endpoint *ep) { struct xhci_endpoint_ext *pepext; DPRINTF("\n"); if (udev->flags.usb_mode != USB_MODE_HOST) { /* not supported */ return; } if (udev->parent_hub == NULL) { /* root HUB has special endpoint handling */ return; } pepext = xhci_get_endpoint_ext(udev, ep->edesc); USB_BUS_LOCK(udev->bus); pepext->trb_halted = 1; pepext->trb_running = 0; USB_BUS_UNLOCK(udev->bus); } static usb_error_t xhci_device_init(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); usb_error_t err; uint8_t temp; /* no init for root HUB */ if (udev->parent_hub == NULL) return (0); XHCI_CMD_LOCK(sc); /* set invalid default */ udev->controller_slot_id = sc->sc_noslot + 1; /* try to get a new slot ID from the XHCI */ err = xhci_cmd_enable_slot(sc, &temp); if (err) { XHCI_CMD_UNLOCK(sc); return (err); } if (temp > sc->sc_noslot) { XHCI_CMD_UNLOCK(sc); return (USB_ERR_BAD_ADDRESS); } if (sc->sc_hw.devs[temp].state != XHCI_ST_DISABLED) { DPRINTF("slot %u already allocated.\n", temp); XHCI_CMD_UNLOCK(sc); return (USB_ERR_BAD_ADDRESS); } /* store slot ID for later reference */ udev->controller_slot_id = temp; /* reset data structure */ memset(&sc->sc_hw.devs[temp], 0, sizeof(sc->sc_hw.devs[0])); /* set mark slot allocated */ sc->sc_hw.devs[temp].state = XHCI_ST_ENABLED; err = xhci_alloc_device_ext(udev); XHCI_CMD_UNLOCK(sc); /* get device into default state */ if (err == 0) err = xhci_set_address(udev, NULL, 0); return (err); } static void xhci_device_uninit(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); uint8_t index; /* no init for root HUB */ if (udev->parent_hub == NULL) return; XHCI_CMD_LOCK(sc); index = udev->controller_slot_id; if (index <= sc->sc_noslot) { xhci_cmd_disable_slot(sc, index); sc->sc_hw.devs[index].state = XHCI_ST_DISABLED; /* free device extension */ xhci_free_device_ext(udev); } XHCI_CMD_UNLOCK(sc); } static void xhci_get_dma_delay(struct usb_device *udev, uint32_t *pus) { /* * Wait until the hardware has finished any possible use of * the transfer descriptor(s) */ *pus = 2048; /* microseconds */ } static void xhci_device_resume(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); uint8_t index; uint8_t n; uint8_t p; DPRINTF("\n"); /* check for root HUB */ if (udev->parent_hub == NULL) return; index = udev->controller_slot_id; XHCI_CMD_LOCK(sc); /* blindly resume all endpoints */ USB_BUS_LOCK(udev->bus); for (n = 1; n != XHCI_MAX_ENDPOINTS; n++) { for (p = 0; p != XHCI_MAX_STREAMS; p++) { XWRITE4(sc, door, XHCI_DOORBELL(index), n | XHCI_DB_SID_SET(p)); } } USB_BUS_UNLOCK(udev->bus); XHCI_CMD_UNLOCK(sc); } static void xhci_device_suspend(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); uint8_t index; uint8_t n; usb_error_t err; DPRINTF("\n"); /* check for root HUB */ if (udev->parent_hub == NULL) return; index = udev->controller_slot_id; XHCI_CMD_LOCK(sc); /* blindly suspend all endpoints */ for (n = 1; n != XHCI_MAX_ENDPOINTS; n++) { err = xhci_cmd_stop_ep(sc, 1, n, index); if (err != 0) { DPRINTF("Failed to suspend endpoint " "%u on slot %u (ignored).\n", n, index); } } XHCI_CMD_UNLOCK(sc); } static void xhci_set_hw_power(struct usb_bus *bus) { DPRINTF("\n"); } static void xhci_device_state_change(struct usb_device *udev) { struct xhci_softc *sc = XHCI_BUS2SC(udev->bus); struct usb_page_search buf_inp; usb_error_t err; uint8_t index; /* check for root HUB */ if (udev->parent_hub == NULL) return; index = udev->controller_slot_id; DPRINTF("\n"); if (usb_get_device_state(udev) == USB_STATE_CONFIGURED) { err = uhub_query_info(udev, &sc->sc_hw.devs[index].nports, &sc->sc_hw.devs[index].tt); if (err != 0) sc->sc_hw.devs[index].nports = 0; } XHCI_CMD_LOCK(sc); switch (usb_get_device_state(udev)) { case USB_STATE_POWERED: if (sc->sc_hw.devs[index].state == XHCI_ST_DEFAULT) break; /* set default state */ sc->sc_hw.devs[index].state = XHCI_ST_DEFAULT; /* reset number of contexts */ sc->sc_hw.devs[index].context_num = 0; err = xhci_cmd_reset_dev(sc, index); if (err != 0) { DPRINTF("Device reset failed " "for slot %u.\n", index); } break; case USB_STATE_ADDRESSED: if (sc->sc_hw.devs[index].state == XHCI_ST_ADDRESSED) break; sc->sc_hw.devs[index].state = XHCI_ST_ADDRESSED; err = xhci_cmd_configure_ep(sc, 0, 1, index); if (err) { DPRINTF("Failed to deconfigure " "slot %u.\n", index); } break; case USB_STATE_CONFIGURED: if (sc->sc_hw.devs[index].state == XHCI_ST_CONFIGURED) break; /* set configured state */ sc->sc_hw.devs[index].state = XHCI_ST_CONFIGURED; /* reset number of contexts */ sc->sc_hw.devs[index].context_num = 0; usbd_get_page(&sc->sc_hw.devs[index].input_pc, 0, &buf_inp); xhci_configure_mask(udev, 3, 0); err = xhci_configure_device(udev); if (err != 0) { DPRINTF("Could not configure device " "at slot %u.\n", index); } err = xhci_cmd_evaluate_ctx(sc, buf_inp.physaddr, index); if (err != 0) { DPRINTF("Could not evaluate device " "context at slot %u.\n", index); } break; default: break; } XHCI_CMD_UNLOCK(sc); } static usb_error_t xhci_set_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep, uint8_t ep_mode) { switch (ep_mode) { case USB_EP_MODE_DEFAULT: return (0); case USB_EP_MODE_STREAMS: if (xhcistreams == 0 || (ep->edesc->bmAttributes & UE_XFERTYPE) != UE_BULK || udev->speed != USB_SPEED_SUPER) return (USB_ERR_INVAL); return (0); default: return (USB_ERR_INVAL); } } static const struct usb_bus_methods xhci_bus_methods = { .endpoint_init = xhci_ep_init, .endpoint_uninit = xhci_ep_uninit, .xfer_setup = xhci_xfer_setup, .xfer_unsetup = xhci_xfer_unsetup, .get_dma_delay = xhci_get_dma_delay, .device_init = xhci_device_init, .device_uninit = xhci_device_uninit, .device_resume = xhci_device_resume, .device_suspend = xhci_device_suspend, .set_hw_power = xhci_set_hw_power, .roothub_exec = xhci_roothub_exec, .xfer_poll = xhci_do_poll, .start_dma_delay = xhci_start_dma_delay, .set_address = xhci_set_address, .clear_stall = xhci_ep_clear_stall, .device_state_change = xhci_device_state_change, .set_hw_power_sleep = xhci_set_hw_power_sleep, .set_endpoint_mode = xhci_set_endpoint_mode, }; diff --git a/sys/dev/usb/usb_transfer.c b/sys/dev/usb/usb_transfer.c index 17b9367df4b7..e3f388d81d03 100644 --- a/sys/dev/usb/usb_transfer.c +++ b/sys/dev/usb/usb_transfer.c @@ -1,3538 +1,3538 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2008 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR usb_debug #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ struct usb_std_packet_size { struct { uint16_t min; /* inclusive */ uint16_t max; /* inclusive */ } range; uint16_t fixed[4]; }; static usb_callback_t usb_request_callback; static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = { /* This transfer is used for generic control endpoint transfers */ [0] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control endpoint */ .direction = UE_DIR_ANY, .bufsize = USB_EP0_BUFSIZE, /* bytes */ .flags = {.proxy_buffer = 1,}, .callback = &usb_request_callback, .usb_mode = USB_MODE_DUAL, /* both modes */ }, /* This transfer is used for generic clear stall only */ [1] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &usb_do_clear_stall_callback, .timeout = 1000, /* 1 second */ .interval = 50, /* 50ms */ .usb_mode = USB_MODE_HOST, }, }; /* function prototypes */ static void usbd_update_max_frame_size(struct usb_xfer *); static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t); static void usbd_control_transfer_init(struct usb_xfer *); static int usbd_setup_ctrl_transfer(struct usb_xfer *); static void usb_callback_proc(struct usb_proc_msg *); static void usbd_callback_ss_done_defer(struct usb_xfer *); static void usbd_callback_wrapper(struct usb_xfer_queue *); static void usbd_transfer_start_cb(void *); static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *); static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, uint8_t type, enum usb_dev_speed speed); /*------------------------------------------------------------------------* * usb_request_callback *------------------------------------------------------------------------*/ static void usb_request_callback(struct usb_xfer *xfer, usb_error_t error) { if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) usb_handle_request_callback(xfer, error); else usbd_do_request_callback(xfer, error); } /*------------------------------------------------------------------------* * usbd_update_max_frame_size * * This function updates the maximum frame size, hence high speed USB * can transfer multiple consecutive packets. *------------------------------------------------------------------------*/ static void usbd_update_max_frame_size(struct usb_xfer *xfer) { /* compute maximum frame size */ /* this computation should not overflow 16-bit */ /* max = 15 * 1024 */ xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count; } /*------------------------------------------------------------------------* * usbd_get_dma_delay * * The following function is called when we need to * synchronize with DMA hardware. * * Returns: * 0: no DMA delay required * Else: milliseconds of DMA delay *------------------------------------------------------------------------*/ usb_timeout_t usbd_get_dma_delay(struct usb_device *udev) { const struct usb_bus_methods *mtod; uint32_t temp; mtod = udev->bus->methods; temp = 0; if (mtod->get_dma_delay) { (mtod->get_dma_delay) (udev, &temp); /* * Round up and convert to milliseconds. Note that we use * 1024 milliseconds per second. to save a division. */ temp += 0x3FF; temp /= 0x400; } return (temp); } /*------------------------------------------------------------------------* * usbd_transfer_setup_sub_malloc * * This function will allocate one or more DMA'able memory chunks * according to "size", "align" and "count" arguments. "ppc" is * pointed to a linear array of USB page caches afterwards. * * If the "align" argument is equal to "1" a non-contiguous allocation * can happen. Else if the "align" argument is greater than "1", the * allocation will always be contiguous in memory. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ #if USB_HAVE_BUSDMA uint8_t usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm, struct usb_page_cache **ppc, usb_size_t size, usb_size_t align, usb_size_t count) { struct usb_page_cache *pc; struct usb_page *pg; void *buf; usb_size_t n_dma_pc; usb_size_t n_dma_pg; usb_size_t n_obj; usb_size_t x; usb_size_t y; usb_size_t r; usb_size_t z; USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n", align)); USB_ASSERT(size > 0, ("Invalid size = 0\n")); if (count == 0) { return (0); /* nothing to allocate */ } /* * Make sure that the size is aligned properly. */ size = -((-size) & (-align)); /* * Try multi-allocation chunks to reduce the number of DMA * allocations, hence DMA allocations are slow. */ if (align == 1) { /* special case - non-cached multi page DMA memory */ n_dma_pc = count; n_dma_pg = (2 + (size / USB_PAGE_SIZE)); n_obj = 1; } else if (size >= USB_PAGE_SIZE) { n_dma_pc = count; n_dma_pg = 1; n_obj = 1; } else { /* compute number of objects per page */ #ifdef USB_DMA_SINGLE_ALLOC n_obj = 1; #else n_obj = (USB_PAGE_SIZE / size); #endif /* * Compute number of DMA chunks, rounded up * to nearest one: */ - n_dma_pc = ((count + n_obj - 1) / n_obj); + n_dma_pc = howmany(count, n_obj); n_dma_pg = 1; } /* * DMA memory is allocated once, but mapped twice. That's why * there is one list for auto-free and another list for * non-auto-free which only holds the mapping and not the * allocation. */ if (parm->buf == NULL) { /* reserve memory (auto-free) */ parm->dma_page_ptr += n_dma_pc * n_dma_pg; parm->dma_page_cache_ptr += n_dma_pc; /* reserve memory (no-auto-free) */ parm->dma_page_ptr += count * n_dma_pg; parm->xfer_page_cache_ptr += count; return (0); } for (x = 0; x != n_dma_pc; x++) { /* need to initialize the page cache */ parm->dma_page_cache_ptr[x].tag_parent = &parm->curr_xfer->xroot->dma_parent_tag; } for (x = 0; x != count; x++) { /* need to initialize the page cache */ parm->xfer_page_cache_ptr[x].tag_parent = &parm->curr_xfer->xroot->dma_parent_tag; } if (ppc != NULL) { if (n_obj != 1) *ppc = parm->xfer_page_cache_ptr; else *ppc = parm->dma_page_cache_ptr; } r = count; /* set remainder count */ z = n_obj * size; /* set allocation size */ pc = parm->xfer_page_cache_ptr; pg = parm->dma_page_ptr; if (n_obj == 1) { /* * Avoid mapping memory twice if only a single object * should be allocated per page cache: */ for (x = 0; x != n_dma_pc; x++) { if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, pg, z, align)) { return (1); /* failure */ } /* Make room for one DMA page cache and "n_dma_pg" pages */ parm->dma_page_cache_ptr++; pg += n_dma_pg; } } else { for (x = 0; x != n_dma_pc; x++) { if (r < n_obj) { /* compute last remainder */ z = r * size; n_obj = r; } if (usb_pc_alloc_mem(parm->dma_page_cache_ptr, pg, z, align)) { return (1); /* failure */ } /* Set beginning of current buffer */ buf = parm->dma_page_cache_ptr->buffer; /* Make room for one DMA page cache and "n_dma_pg" pages */ parm->dma_page_cache_ptr++; pg += n_dma_pg; for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) { /* Load sub-chunk into DMA */ if (usb_pc_dmamap_create(pc, size)) { return (1); /* failure */ } pc->buffer = USB_ADD_BYTES(buf, y * size); pc->page_start = pg; mtx_lock(pc->tag_parent->mtx); if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) { mtx_unlock(pc->tag_parent->mtx); return (1); /* failure */ } mtx_unlock(pc->tag_parent->mtx); } } } parm->xfer_page_cache_ptr = pc; parm->dma_page_ptr = pg; return (0); } #endif /*------------------------------------------------------------------------* * usbd_transfer_setup_sub - transfer setup subroutine * * This function must be called from the "xfer_setup" callback of the * USB Host or Device controller driver when setting up an USB * transfer. This function will setup correct packet sizes, buffer * sizes, flags and more, that are stored in the "usb_xfer" * structure. *------------------------------------------------------------------------*/ void usbd_transfer_setup_sub(struct usb_setup_params *parm) { enum { REQ_SIZE = 8, MIN_PKT = 8, }; struct usb_xfer *xfer = parm->curr_xfer; const struct usb_config *setup = parm->curr_setup; struct usb_endpoint_ss_comp_descriptor *ecomp; struct usb_endpoint_descriptor *edesc; struct usb_std_packet_size std_size; usb_frcount_t n_frlengths; usb_frcount_t n_frbuffers; usb_frcount_t x; uint16_t maxp_old; uint8_t type; uint8_t zmps; /* * Sanity check. The following parameters must be initialized before * calling this function. */ if ((parm->hc_max_packet_size == 0) || (parm->hc_max_packet_count == 0) || (parm->hc_max_frame_size == 0)) { parm->err = USB_ERR_INVAL; goto done; } edesc = xfer->endpoint->edesc; ecomp = xfer->endpoint->ecomp; type = (edesc->bmAttributes & UE_XFERTYPE); xfer->flags = setup->flags; xfer->nframes = setup->frames; xfer->timeout = setup->timeout; xfer->callback = setup->callback; xfer->interval = setup->interval; xfer->endpointno = edesc->bEndpointAddress; xfer->max_packet_size = UGETW(edesc->wMaxPacketSize); xfer->max_packet_count = 1; /* make a shadow copy: */ xfer->flags_int.usb_mode = parm->udev->flags.usb_mode; parm->bufsize = setup->bufsize; switch (parm->speed) { case USB_SPEED_HIGH: switch (type) { case UE_ISOCHRONOUS: case UE_INTERRUPT: xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; /* check for invalid max packet count */ if (xfer->max_packet_count > 3) xfer->max_packet_count = 3; break; default: break; } xfer->max_packet_size &= 0x7FF; break; case USB_SPEED_SUPER: xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3; if (ecomp != NULL) xfer->max_packet_count += ecomp->bMaxBurst; if ((xfer->max_packet_count == 0) || (xfer->max_packet_count > 16)) xfer->max_packet_count = 16; switch (type) { case UE_CONTROL: xfer->max_packet_count = 1; break; case UE_ISOCHRONOUS: if (ecomp != NULL) { uint8_t mult; mult = UE_GET_SS_ISO_MULT( ecomp->bmAttributes) + 1; if (mult > 3) mult = 3; xfer->max_packet_count *= mult; } break; default: break; } xfer->max_packet_size &= 0x7FF; break; default: break; } /* range check "max_packet_count" */ if (xfer->max_packet_count > parm->hc_max_packet_count) { xfer->max_packet_count = parm->hc_max_packet_count; } /* store max packet size value before filtering */ maxp_old = xfer->max_packet_size; /* filter "wMaxPacketSize" according to HC capabilities */ if ((xfer->max_packet_size > parm->hc_max_packet_size) || (xfer->max_packet_size == 0)) { xfer->max_packet_size = parm->hc_max_packet_size; } /* filter "wMaxPacketSize" according to standard sizes */ usbd_get_std_packet_size(&std_size, type, parm->speed); if (std_size.range.min || std_size.range.max) { if (xfer->max_packet_size < std_size.range.min) { xfer->max_packet_size = std_size.range.min; } if (xfer->max_packet_size > std_size.range.max) { xfer->max_packet_size = std_size.range.max; } } else { if (xfer->max_packet_size >= std_size.fixed[3]) { xfer->max_packet_size = std_size.fixed[3]; } else if (xfer->max_packet_size >= std_size.fixed[2]) { xfer->max_packet_size = std_size.fixed[2]; } else if (xfer->max_packet_size >= std_size.fixed[1]) { xfer->max_packet_size = std_size.fixed[1]; } else { /* only one possibility left */ xfer->max_packet_size = std_size.fixed[0]; } } /* * Check if the max packet size was outside its allowed range * and clamped to a valid value: */ if (maxp_old != xfer->max_packet_size) xfer->flags_int.maxp_was_clamped = 1; /* compute "max_frame_size" */ usbd_update_max_frame_size(xfer); /* check interrupt interval and transfer pre-delay */ if (type == UE_ISOCHRONOUS) { uint16_t frame_limit; xfer->interval = 0; /* not used, must be zero */ xfer->flags_int.isochronous_xfr = 1; /* set flag */ if (xfer->timeout == 0) { /* * set a default timeout in * case something goes wrong! */ xfer->timeout = 1000 / 4; } switch (parm->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER; xfer->fps_shift = 0; break; default: frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER; xfer->fps_shift = edesc->bInterval; if (xfer->fps_shift > 0) xfer->fps_shift--; if (xfer->fps_shift > 3) xfer->fps_shift = 3; if (xfer->flags.pre_scale_frames != 0) xfer->nframes <<= (3 - xfer->fps_shift); break; } if (xfer->nframes > frame_limit) { /* * this is not going to work * cross hardware */ parm->err = USB_ERR_INVAL; goto done; } if (xfer->nframes == 0) { /* * this is not a valid value */ parm->err = USB_ERR_ZERO_NFRAMES; goto done; } } else { /* * If a value is specified use that else check the * endpoint descriptor! */ if (type == UE_INTERRUPT) { uint32_t temp; if (xfer->interval == 0) { xfer->interval = edesc->bInterval; switch (parm->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: break; default: /* 125us -> 1ms */ if (xfer->interval < 4) xfer->interval = 1; else if (xfer->interval > 16) xfer->interval = (1 << (16 - 4)); else xfer->interval = (1 << (xfer->interval - 4)); break; } } if (xfer->interval == 0) { /* * One millisecond is the smallest * interval we support: */ xfer->interval = 1; } xfer->fps_shift = 0; temp = 1; while ((temp != 0) && (temp < xfer->interval)) { xfer->fps_shift++; temp *= 2; } switch (parm->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: break; default: xfer->fps_shift += 3; break; } } } /* * NOTE: we do not allow "max_packet_size" or "max_frame_size" * to be equal to zero when setting up USB transfers, hence * this leads to alot of extra code in the USB kernel. */ if ((xfer->max_frame_size == 0) || (xfer->max_packet_size == 0)) { zmps = 1; if ((parm->bufsize <= MIN_PKT) && (type != UE_CONTROL) && (type != UE_BULK)) { /* workaround */ xfer->max_packet_size = MIN_PKT; xfer->max_packet_count = 1; parm->bufsize = 0; /* automatic setup length */ usbd_update_max_frame_size(xfer); } else { parm->err = USB_ERR_ZERO_MAXP; goto done; } } else { zmps = 0; } /* * check if we should setup a default * length: */ if (parm->bufsize == 0) { parm->bufsize = xfer->max_frame_size; if (type == UE_ISOCHRONOUS) { parm->bufsize *= xfer->nframes; } } /* * check if we are about to setup a proxy * type of buffer: */ if (xfer->flags.proxy_buffer) { /* round bufsize up */ parm->bufsize += (xfer->max_frame_size - 1); if (parm->bufsize < xfer->max_frame_size) { /* length wrapped around */ parm->err = USB_ERR_INVAL; goto done; } /* subtract remainder */ parm->bufsize -= (parm->bufsize % xfer->max_frame_size); /* add length of USB device request structure, if any */ if (type == UE_CONTROL) { parm->bufsize += REQ_SIZE; /* SETUP message */ } } xfer->max_data_length = parm->bufsize; /* Setup "n_frlengths" and "n_frbuffers" */ if (type == UE_ISOCHRONOUS) { n_frlengths = xfer->nframes; n_frbuffers = 1; } else { if (type == UE_CONTROL) { xfer->flags_int.control_xfr = 1; if (xfer->nframes == 0) { if (parm->bufsize <= REQ_SIZE) { /* * there will never be any data * stage */ xfer->nframes = 1; } else { xfer->nframes = 2; } } } else { if (xfer->nframes == 0) { xfer->nframes = 1; } } n_frlengths = xfer->nframes; n_frbuffers = xfer->nframes; } /* * check if we have room for the * USB device request structure: */ if (type == UE_CONTROL) { if (xfer->max_data_length < REQ_SIZE) { /* length wrapped around or too small bufsize */ parm->err = USB_ERR_INVAL; goto done; } xfer->max_data_length -= REQ_SIZE; } /* * Setup "frlengths" and shadow "frlengths" for keeping the * initial frame lengths when a USB transfer is complete. This * information is useful when computing isochronous offsets. */ xfer->frlengths = parm->xfer_length_ptr; parm->xfer_length_ptr += 2 * n_frlengths; /* setup "frbuffers" */ xfer->frbuffers = parm->xfer_page_cache_ptr; parm->xfer_page_cache_ptr += n_frbuffers; /* initialize max frame count */ xfer->max_frame_count = xfer->nframes; /* * check if we need to setup * a local buffer: */ if (!xfer->flags.ext_buffer) { #if USB_HAVE_BUSDMA struct usb_page_search page_info; struct usb_page_cache *pc; if (usbd_transfer_setup_sub_malloc(parm, &pc, parm->bufsize, 1, 1)) { parm->err = USB_ERR_NOMEM; } else if (parm->buf != NULL) { usbd_get_page(pc, 0, &page_info); xfer->local_buffer = page_info.buffer; usbd_xfer_set_frame_offset(xfer, 0, 0); if ((type == UE_CONTROL) && (n_frbuffers > 1)) { usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); } } #else /* align data */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); if (parm->buf != NULL) { xfer->local_buffer = USB_ADD_BYTES(parm->buf, parm->size[0]); usbd_xfer_set_frame_offset(xfer, 0, 0); if ((type == UE_CONTROL) && (n_frbuffers > 1)) { usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1); } } parm->size[0] += parm->bufsize; /* align data again */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); #endif } /* * Compute maximum buffer size */ if (parm->bufsize_max < parm->bufsize) { parm->bufsize_max = parm->bufsize; } #if USB_HAVE_BUSDMA if (xfer->flags_int.bdma_enable) { /* * Setup "dma_page_ptr". * * Proof for formula below: * * Assume there are three USB frames having length "a", "b" and * "c". These USB frames will at maximum need "z" * "usb_page" structures. "z" is given by: * * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) + * ((c / USB_PAGE_SIZE) + 2); * * Constraining "a", "b" and "c" like this: * * (a + b + c) <= parm->bufsize * * We know that: * * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2)); * * Here is the general formula: */ xfer->dma_page_ptr = parm->dma_page_ptr; parm->dma_page_ptr += (2 * n_frbuffers); parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE); } #endif if (zmps) { /* correct maximum data length */ xfer->max_data_length = 0; } /* subtract USB frame remainder from "hc_max_frame_size" */ xfer->max_hc_frame_size = (parm->hc_max_frame_size - (parm->hc_max_frame_size % xfer->max_frame_size)); if (xfer->max_hc_frame_size == 0) { parm->err = USB_ERR_INVAL; goto done; } /* initialize frame buffers */ if (parm->buf) { for (x = 0; x != n_frbuffers; x++) { xfer->frbuffers[x].tag_parent = &xfer->xroot->dma_parent_tag; #if USB_HAVE_BUSDMA if (xfer->flags_int.bdma_enable && (parm->bufsize_max > 0)) { if (usb_pc_dmamap_create( xfer->frbuffers + x, parm->bufsize_max)) { parm->err = USB_ERR_NOMEM; goto done; } } #endif } } done: if (parm->err) { /* * Set some dummy values so that we avoid division by zero: */ xfer->max_hc_frame_size = 1; xfer->max_frame_size = 1; xfer->max_packet_size = 1; xfer->max_data_length = 0; xfer->nframes = 0; xfer->max_frame_count = 0; } } static uint8_t usbd_transfer_setup_has_bulk(const struct usb_config *setup_start, uint16_t n_setup) { while (n_setup--) { uint8_t type = setup_start[n_setup].type; if (type == UE_BULK || type == UE_BULK_INTR || type == UE_TYPE_ANY) return (1); } return (0); } /*------------------------------------------------------------------------* * usbd_transfer_setup - setup an array of USB transfers * * NOTE: You must always call "usbd_transfer_unsetup" after calling * "usbd_transfer_setup" if success was returned. * * The idea is that the USB device driver should pre-allocate all its * transfers by one call to this function. * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ usb_error_t usbd_transfer_setup(struct usb_device *udev, const uint8_t *ifaces, struct usb_xfer **ppxfer, const struct usb_config *setup_start, uint16_t n_setup, void *priv_sc, struct mtx *xfer_mtx) { const struct usb_config *setup_end = setup_start + n_setup; const struct usb_config *setup; struct usb_setup_params *parm; struct usb_endpoint *ep; struct usb_xfer_root *info; struct usb_xfer *xfer; void *buf = NULL; usb_error_t error = 0; uint16_t n; uint16_t refcount; uint8_t do_unlock; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "usbd_transfer_setup can sleep!"); /* do some checking first */ if (n_setup == 0) { DPRINTFN(6, "setup array has zero length!\n"); return (USB_ERR_INVAL); } if (ifaces == NULL) { DPRINTFN(6, "ifaces array is NULL!\n"); return (USB_ERR_INVAL); } if (xfer_mtx == NULL) { DPRINTFN(6, "using global lock\n"); xfer_mtx = &Giant; } /* more sanity checks */ for (setup = setup_start, n = 0; setup != setup_end; setup++, n++) { if (setup->bufsize == (usb_frlength_t)-1) { error = USB_ERR_BAD_BUFSIZE; DPRINTF("invalid bufsize\n"); } if (setup->callback == NULL) { error = USB_ERR_NO_CALLBACK; DPRINTF("no callback\n"); } ppxfer[n] = NULL; } if (error) return (error); /* Protect scratch area */ do_unlock = usbd_enum_lock(udev); refcount = 0; info = NULL; parm = &udev->scratch.xfer_setup[0].parm; memset(parm, 0, sizeof(*parm)); parm->udev = udev; parm->speed = usbd_get_speed(udev); parm->hc_max_packet_count = 1; if (parm->speed >= USB_SPEED_MAX) { parm->err = USB_ERR_INVAL; goto done; } /* setup all transfers */ while (1) { if (buf) { /* * Initialize the "usb_xfer_root" structure, * which is common for all our USB transfers. */ info = USB_ADD_BYTES(buf, 0); info->memory_base = buf; info->memory_size = parm->size[0]; #if USB_HAVE_BUSDMA info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]); info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]); #endif info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]); info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]); cv_init(&info->cv_drain, "WDRAIN"); info->xfer_mtx = xfer_mtx; #if USB_HAVE_BUSDMA usb_dma_tag_setup(&info->dma_parent_tag, parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag, xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits, parm->dma_tag_max); #endif info->bus = udev->bus; info->udev = udev; TAILQ_INIT(&info->done_q.head); info->done_q.command = &usbd_callback_wrapper; #if USB_HAVE_BUSDMA TAILQ_INIT(&info->dma_q.head); info->dma_q.command = &usb_bdma_work_loop; #endif info->done_m[0].hdr.pm_callback = &usb_callback_proc; info->done_m[0].xroot = info; info->done_m[1].hdr.pm_callback = &usb_callback_proc; info->done_m[1].xroot = info; /* * In device side mode control endpoint * requests need to run from a separate * context, else there is a chance of * deadlock! */ if (setup_start == usb_control_ep_cfg) info->done_p = USB_BUS_CONTROL_XFER_PROC(udev->bus); else if (xfer_mtx == &Giant) info->done_p = USB_BUS_GIANT_PROC(udev->bus); else if (usbd_transfer_setup_has_bulk(setup_start, n_setup)) info->done_p = USB_BUS_NON_GIANT_BULK_PROC(udev->bus); else info->done_p = USB_BUS_NON_GIANT_ISOC_PROC(udev->bus); } /* reset sizes */ parm->size[0] = 0; parm->buf = buf; parm->size[0] += sizeof(info[0]); for (setup = setup_start, n = 0; setup != setup_end; setup++, n++) { /* skip USB transfers without callbacks: */ if (setup->callback == NULL) { continue; } /* see if there is a matching endpoint */ ep = usbd_get_endpoint(udev, ifaces[setup->if_index], setup); /* * Check that the USB PIPE is valid and that * the endpoint mode is proper. * * Make sure we don't allocate a streams * transfer when such a combination is not * valid. */ if ((ep == NULL) || (ep->methods == NULL) || ((ep->ep_mode != USB_EP_MODE_STREAMS) && (ep->ep_mode != USB_EP_MODE_DEFAULT)) || (setup->stream_id != 0 && (setup->stream_id >= USB_MAX_EP_STREAMS || (ep->ep_mode != USB_EP_MODE_STREAMS)))) { if (setup->flags.no_pipe_ok) continue; if ((setup->usb_mode != USB_MODE_DUAL) && (setup->usb_mode != udev->flags.usb_mode)) continue; parm->err = USB_ERR_NO_PIPE; goto done; } /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); /* store current setup pointer */ parm->curr_setup = setup; if (buf) { /* * Common initialization of the * "usb_xfer" structure. */ xfer = USB_ADD_BYTES(buf, parm->size[0]); xfer->address = udev->address; xfer->priv_sc = priv_sc; xfer->xroot = info; usb_callout_init_mtx(&xfer->timeout_handle, &udev->bus->bus_mtx, 0); } else { /* * Setup a dummy xfer, hence we are * writing to the "usb_xfer" * structure pointed to by "xfer" * before we have allocated any * memory: */ xfer = &udev->scratch.xfer_setup[0].dummy; memset(xfer, 0, sizeof(*xfer)); refcount++; } /* set transfer endpoint pointer */ xfer->endpoint = ep; /* set transfer stream ID */ xfer->stream_id = setup->stream_id; parm->size[0] += sizeof(xfer[0]); parm->methods = xfer->endpoint->methods; parm->curr_xfer = xfer; /* * Call the Host or Device controller transfer * setup routine: */ (udev->bus->methods->xfer_setup) (parm); /* check for error */ if (parm->err) goto done; if (buf) { /* * Increment the endpoint refcount. This * basically prevents setting a new * configuration and alternate setting * when USB transfers are in use on * the given interface. Search the USB * code for "endpoint->refcount_alloc" if you * want more information. */ USB_BUS_LOCK(info->bus); if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX) parm->err = USB_ERR_INVAL; xfer->endpoint->refcount_alloc++; if (xfer->endpoint->refcount_alloc == 0) panic("usbd_transfer_setup(): Refcount wrapped to zero\n"); USB_BUS_UNLOCK(info->bus); /* * Whenever we set ppxfer[] then we * also need to increment the * "setup_refcount": */ info->setup_refcount++; /* * Transfer is successfully setup and * can be used: */ ppxfer[n] = xfer; } /* check for error */ if (parm->err) goto done; } if (buf != NULL || parm->err != 0) goto done; /* if no transfers, nothing to do */ if (refcount == 0) goto done; /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); /* store offset temporarily */ parm->size[1] = parm->size[0]; /* * The number of DMA tags required depends on * the number of endpoints. The current estimate * for maximum number of DMA tags per endpoint * is three: * 1) for loading memory * 2) for allocating memory * 3) for fixing memory [UHCI] */ parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX); /* * DMA tags for QH, TD, Data and more. */ parm->dma_tag_max += 8; parm->dma_tag_p += parm->dma_tag_max; parm->size[0] += ((uint8_t *)parm->dma_tag_p) - ((uint8_t *)0); /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); /* store offset temporarily */ parm->size[3] = parm->size[0]; parm->size[0] += ((uint8_t *)parm->dma_page_ptr) - ((uint8_t *)0); /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); /* store offset temporarily */ parm->size[4] = parm->size[0]; parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) - ((uint8_t *)0); /* store end offset temporarily */ parm->size[5] = parm->size[0]; parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) - ((uint8_t *)0); /* store end offset temporarily */ parm->size[2] = parm->size[0]; /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); parm->size[6] = parm->size[0]; parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) - ((uint8_t *)0); /* align data properly */ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1)); /* allocate zeroed memory */ buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO); if (buf == NULL) { parm->err = USB_ERR_NOMEM; DPRINTFN(0, "cannot allocate memory block for " "configuration (%d bytes)\n", parm->size[0]); goto done; } parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]); parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]); parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]); parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]); parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]); } done: if (buf) { if (info->setup_refcount == 0) { /* * "usbd_transfer_unsetup_sub" will unlock * the bus mutex before returning ! */ USB_BUS_LOCK(info->bus); /* something went wrong */ usbd_transfer_unsetup_sub(info, 0); } } /* check if any errors happened */ if (parm->err) usbd_transfer_unsetup(ppxfer, n_setup); error = parm->err; if (do_unlock) usbd_enum_unlock(udev); return (error); } /*------------------------------------------------------------------------* * usbd_transfer_unsetup_sub - factored out code *------------------------------------------------------------------------*/ static void usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay) { #if USB_HAVE_BUSDMA struct usb_page_cache *pc; #endif USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); /* wait for any outstanding DMA operations */ if (needs_delay) { usb_timeout_t temp; temp = usbd_get_dma_delay(info->udev); if (temp != 0) { usb_pause_mtx(&info->bus->bus_mtx, USB_MS_TO_TICKS(temp)); } } /* make sure that our done messages are not queued anywhere */ usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]); USB_BUS_UNLOCK(info->bus); #if USB_HAVE_BUSDMA /* free DMA'able memory, if any */ pc = info->dma_page_cache_start; while (pc != info->dma_page_cache_end) { usb_pc_free_mem(pc); pc++; } /* free DMA maps in all "xfer->frbuffers" */ pc = info->xfer_page_cache_start; while (pc != info->xfer_page_cache_end) { usb_pc_dmamap_destroy(pc); pc++; } /* free all DMA tags */ usb_dma_tag_unsetup(&info->dma_parent_tag); #endif cv_destroy(&info->cv_drain); /* * free the "memory_base" last, hence the "info" structure is * contained within the "memory_base"! */ free(info->memory_base, M_USB); } /*------------------------------------------------------------------------* * usbd_transfer_unsetup - unsetup/free an array of USB transfers * * NOTE: All USB transfers in progress will get called back passing * the error code "USB_ERR_CANCELLED" before this function * returns. *------------------------------------------------------------------------*/ void usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup) { struct usb_xfer *xfer; struct usb_xfer_root *info; uint8_t needs_delay = 0; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "usbd_transfer_unsetup can sleep!"); while (n_setup--) { xfer = pxfer[n_setup]; if (xfer == NULL) continue; info = xfer->xroot; USB_XFER_LOCK(xfer); USB_BUS_LOCK(info->bus); /* * HINT: when you start/stop a transfer, it might be a * good idea to directly use the "pxfer[]" structure: * * usbd_transfer_start(sc->pxfer[0]); * usbd_transfer_stop(sc->pxfer[0]); * * That way, if your code has many parts that will not * stop running under the same lock, in other words * "xfer_mtx", the usbd_transfer_start and * usbd_transfer_stop functions will simply return * when they detect a NULL pointer argument. * * To avoid any races we clear the "pxfer[]" pointer * while holding the private mutex of the driver: */ pxfer[n_setup] = NULL; USB_BUS_UNLOCK(info->bus); USB_XFER_UNLOCK(xfer); usbd_transfer_drain(xfer); #if USB_HAVE_BUSDMA if (xfer->flags_int.bdma_enable) needs_delay = 1; #endif /* * NOTE: default endpoint does not have an * interface, even if endpoint->iface_index == 0 */ USB_BUS_LOCK(info->bus); xfer->endpoint->refcount_alloc--; USB_BUS_UNLOCK(info->bus); usb_callout_drain(&xfer->timeout_handle); USB_BUS_LOCK(info->bus); USB_ASSERT(info->setup_refcount != 0, ("Invalid setup " "reference count\n")); info->setup_refcount--; if (info->setup_refcount == 0) { usbd_transfer_unsetup_sub(info, needs_delay); } else { USB_BUS_UNLOCK(info->bus); } } } /*------------------------------------------------------------------------* * usbd_control_transfer_init - factored out code * * In USB Device Mode we have to wait for the SETUP packet which * containst the "struct usb_device_request" structure, before we can * transfer any data. In USB Host Mode we already have the SETUP * packet at the moment the USB transfer is started. This leads us to * having to setup the USB transfer at two different places in * time. This function just contains factored out control transfer * initialisation code, so that we don't duplicate the code. *------------------------------------------------------------------------*/ static void usbd_control_transfer_init(struct usb_xfer *xfer) { struct usb_device_request req; /* copy out the USB request header */ usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); /* setup remainder */ xfer->flags_int.control_rem = UGETW(req.wLength); /* copy direction to endpoint variable */ xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT); xfer->endpointno |= (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT; } /*------------------------------------------------------------------------* * usbd_control_transfer_did_data * * This function returns non-zero if a control endpoint has * transferred the first DATA packet after the SETUP packet. * Else it returns zero. *------------------------------------------------------------------------*/ static uint8_t usbd_control_transfer_did_data(struct usb_xfer *xfer) { struct usb_device_request req; /* SETUP packet is not yet sent */ if (xfer->flags_int.control_hdr != 0) return (0); /* copy out the USB request header */ usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req)); /* compare remainder to the initial value */ return (xfer->flags_int.control_rem != UGETW(req.wLength)); } /*------------------------------------------------------------------------* * usbd_setup_ctrl_transfer * * This function handles initialisation of control transfers. Control * transfers are special in that regard that they can both transmit * and receive data. * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static int usbd_setup_ctrl_transfer(struct usb_xfer *xfer) { usb_frlength_t len; /* Check for control endpoint stall */ if (xfer->flags.stall_pipe && xfer->flags_int.control_act) { /* the control transfer is no longer active */ xfer->flags_int.control_stall = 1; xfer->flags_int.control_act = 0; } else { /* don't stall control transfer by default */ xfer->flags_int.control_stall = 0; } /* Check for invalid number of frames */ if (xfer->nframes > 2) { /* * If you need to split a control transfer, you * have to do one part at a time. Only with * non-control transfers you can do multiple * parts a time. */ DPRINTFN(0, "Too many frames: %u\n", (unsigned int)xfer->nframes); goto error; } /* * Check if there is a control * transfer in progress: */ if (xfer->flags_int.control_act) { if (xfer->flags_int.control_hdr) { /* clear send header flag */ xfer->flags_int.control_hdr = 0; /* setup control transfer */ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { usbd_control_transfer_init(xfer); } } /* get data length */ len = xfer->sumlen; } else { /* the size of the SETUP structure is hardcoded ! */ if (xfer->frlengths[0] != sizeof(struct usb_device_request)) { DPRINTFN(0, "Wrong framelength %u != %zu\n", xfer->frlengths[0], sizeof(struct usb_device_request)); goto error; } /* check USB mode */ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { /* check number of frames */ if (xfer->nframes != 1) { /* * We need to receive the setup * message first so that we know the * data direction! */ DPRINTF("Misconfigured transfer\n"); goto error; } /* * Set a dummy "control_rem" value. This * variable will be overwritten later by a * call to "usbd_control_transfer_init()" ! */ xfer->flags_int.control_rem = 0xFFFF; } else { /* setup "endpoint" and "control_rem" */ usbd_control_transfer_init(xfer); } /* set transfer-header flag */ xfer->flags_int.control_hdr = 1; /* get data length */ len = (xfer->sumlen - sizeof(struct usb_device_request)); } /* update did data flag */ xfer->flags_int.control_did_data = usbd_control_transfer_did_data(xfer); /* check if there is a length mismatch */ if (len > xfer->flags_int.control_rem) { DPRINTFN(0, "Length (%d) greater than " "remaining length (%d)\n", len, xfer->flags_int.control_rem); goto error; } /* check if we are doing a short transfer */ if (xfer->flags.force_short_xfer) { xfer->flags_int.control_rem = 0; } else { if ((len != xfer->max_data_length) && (len != xfer->flags_int.control_rem) && (xfer->nframes != 1)) { DPRINTFN(0, "Short control transfer without " "force_short_xfer set\n"); goto error; } xfer->flags_int.control_rem -= len; } /* the status part is executed when "control_act" is 0 */ if ((xfer->flags_int.control_rem > 0) || (xfer->flags.manual_status)) { /* don't execute the STATUS stage yet */ xfer->flags_int.control_act = 1; /* sanity check */ if ((!xfer->flags_int.control_hdr) && (xfer->nframes == 1)) { /* * This is not a valid operation! */ DPRINTFN(0, "Invalid parameter " "combination\n"); goto error; } } else { /* time to execute the STATUS stage */ xfer->flags_int.control_act = 0; } return (0); /* success */ error: return (1); /* failure */ } /*------------------------------------------------------------------------* * usbd_transfer_submit - start USB hardware for the given transfer * * This function should only be called from the USB callback. *------------------------------------------------------------------------*/ void usbd_transfer_submit(struct usb_xfer *xfer) { struct usb_xfer_root *info; struct usb_bus *bus; usb_frcount_t x; info = xfer->xroot; bus = info->bus; DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n", xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ? "read" : "write"); #ifdef USB_DEBUG if (USB_DEBUG_VAR > 0) { USB_BUS_LOCK(bus); usb_dump_endpoint(xfer->endpoint); USB_BUS_UNLOCK(bus); } #endif USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED); /* Only open the USB transfer once! */ if (!xfer->flags_int.open) { xfer->flags_int.open = 1; DPRINTF("open\n"); USB_BUS_LOCK(bus); (xfer->endpoint->methods->open) (xfer); USB_BUS_UNLOCK(bus); } /* set "transferring" flag */ xfer->flags_int.transferring = 1; #if USB_HAVE_POWERD /* increment power reference */ usbd_transfer_power_ref(xfer, 1); #endif /* * Check if the transfer is waiting on a queue, most * frequently the "done_q": */ if (xfer->wait_queue) { USB_BUS_LOCK(bus); usbd_transfer_dequeue(xfer); USB_BUS_UNLOCK(bus); } /* clear "did_dma_delay" flag */ xfer->flags_int.did_dma_delay = 0; /* clear "did_close" flag */ xfer->flags_int.did_close = 0; #if USB_HAVE_BUSDMA /* clear "bdma_setup" flag */ xfer->flags_int.bdma_setup = 0; #endif /* by default we cannot cancel any USB transfer immediately */ xfer->flags_int.can_cancel_immed = 0; /* clear lengths and frame counts by default */ xfer->sumlen = 0; xfer->actlen = 0; xfer->aframes = 0; /* clear any previous errors */ xfer->error = 0; /* Check if the device is still alive */ if (info->udev->state < USB_STATE_POWERED) { USB_BUS_LOCK(bus); /* * Must return cancelled error code else * device drivers can hang. */ usbd_transfer_done(xfer, USB_ERR_CANCELLED); USB_BUS_UNLOCK(bus); return; } /* sanity check */ if (xfer->nframes == 0) { if (xfer->flags.stall_pipe) { /* * Special case - want to stall without transferring * any data: */ DPRINTF("xfer=%p nframes=0: stall " "or clear stall!\n", xfer); USB_BUS_LOCK(bus); xfer->flags_int.can_cancel_immed = 1; /* start the transfer */ usb_command_wrapper(&xfer->endpoint-> endpoint_q[xfer->stream_id], xfer); USB_BUS_UNLOCK(bus); return; } USB_BUS_LOCK(bus); usbd_transfer_done(xfer, USB_ERR_INVAL); USB_BUS_UNLOCK(bus); return; } /* compute some variables */ for (x = 0; x != xfer->nframes; x++) { /* make a copy of the frlenghts[] */ xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x]; /* compute total transfer length */ xfer->sumlen += xfer->frlengths[x]; if (xfer->sumlen < xfer->frlengths[x]) { /* length wrapped around */ USB_BUS_LOCK(bus); usbd_transfer_done(xfer, USB_ERR_INVAL); USB_BUS_UNLOCK(bus); return; } } /* clear some internal flags */ xfer->flags_int.short_xfer_ok = 0; xfer->flags_int.short_frames_ok = 0; /* check if this is a control transfer */ if (xfer->flags_int.control_xfr) { if (usbd_setup_ctrl_transfer(xfer)) { USB_BUS_LOCK(bus); usbd_transfer_done(xfer, USB_ERR_STALLED); USB_BUS_UNLOCK(bus); return; } } /* * Setup filtered version of some transfer flags, * in case of data read direction */ if (USB_GET_DATA_ISREAD(xfer)) { if (xfer->flags.short_frames_ok) { xfer->flags_int.short_xfer_ok = 1; xfer->flags_int.short_frames_ok = 1; } else if (xfer->flags.short_xfer_ok) { xfer->flags_int.short_xfer_ok = 1; /* check for control transfer */ if (xfer->flags_int.control_xfr) { /* * 1) Control transfers do not support * reception of multiple short USB * frames in host mode and device side * mode, with exception of: * * 2) Due to sometimes buggy device * side firmware we need to do a * STATUS stage in case of short * control transfers in USB host mode. * The STATUS stage then becomes the * "alt_next" to the DATA stage. */ xfer->flags_int.short_frames_ok = 1; } } } /* * Check if BUS-DMA support is enabled and try to load virtual * buffers into DMA, if any: */ #if USB_HAVE_BUSDMA if (xfer->flags_int.bdma_enable) { /* insert the USB transfer last in the BUS-DMA queue */ usb_command_wrapper(&xfer->xroot->dma_q, xfer); return; } #endif /* * Enter the USB transfer into the Host Controller or * Device Controller schedule: */ usbd_pipe_enter(xfer); } /*------------------------------------------------------------------------* * usbd_pipe_enter - factored out code *------------------------------------------------------------------------*/ void usbd_pipe_enter(struct usb_xfer *xfer) { struct usb_endpoint *ep; USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); USB_BUS_LOCK(xfer->xroot->bus); ep = xfer->endpoint; DPRINTF("enter\n"); /* the transfer can now be cancelled */ xfer->flags_int.can_cancel_immed = 1; /* enter the transfer */ (ep->methods->enter) (xfer); /* check for transfer error */ if (xfer->error) { /* some error has happened */ usbd_transfer_done(xfer, 0); USB_BUS_UNLOCK(xfer->xroot->bus); return; } /* start the transfer */ usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer); USB_BUS_UNLOCK(xfer->xroot->bus); } /*------------------------------------------------------------------------* * usbd_transfer_start - start an USB transfer * * NOTE: Calling this function more than one time will only * result in a single transfer start, until the USB transfer * completes. *------------------------------------------------------------------------*/ void usbd_transfer_start(struct usb_xfer *xfer) { if (xfer == NULL) { /* transfer is gone */ return; } USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); /* mark the USB transfer started */ if (!xfer->flags_int.started) { /* lock the BUS lock to avoid races updating flags_int */ USB_BUS_LOCK(xfer->xroot->bus); xfer->flags_int.started = 1; USB_BUS_UNLOCK(xfer->xroot->bus); } /* check if the USB transfer callback is already transferring */ if (xfer->flags_int.transferring) { return; } USB_BUS_LOCK(xfer->xroot->bus); /* call the USB transfer callback */ usbd_callback_ss_done_defer(xfer); USB_BUS_UNLOCK(xfer->xroot->bus); } /*------------------------------------------------------------------------* * usbd_transfer_stop - stop an USB transfer * * NOTE: Calling this function more than one time will only * result in a single transfer stop. * NOTE: When this function returns it is not safe to free nor * reuse any DMA buffers. See "usbd_transfer_drain()". *------------------------------------------------------------------------*/ void usbd_transfer_stop(struct usb_xfer *xfer) { struct usb_endpoint *ep; if (xfer == NULL) { /* transfer is gone */ return; } USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); /* check if the USB transfer was ever opened */ if (!xfer->flags_int.open) { if (xfer->flags_int.started) { /* nothing to do except clearing the "started" flag */ /* lock the BUS lock to avoid races updating flags_int */ USB_BUS_LOCK(xfer->xroot->bus); xfer->flags_int.started = 0; USB_BUS_UNLOCK(xfer->xroot->bus); } return; } /* try to stop the current USB transfer */ USB_BUS_LOCK(xfer->xroot->bus); /* override any previous error */ xfer->error = USB_ERR_CANCELLED; /* * Clear "open" and "started" when both private and USB lock * is locked so that we don't get a race updating "flags_int" */ xfer->flags_int.open = 0; xfer->flags_int.started = 0; /* * Check if we can cancel the USB transfer immediately. */ if (xfer->flags_int.transferring) { if (xfer->flags_int.can_cancel_immed && (!xfer->flags_int.did_close)) { DPRINTF("close\n"); /* * The following will lead to an USB_ERR_CANCELLED * error code being passed to the USB callback. */ (xfer->endpoint->methods->close) (xfer); /* only close once */ xfer->flags_int.did_close = 1; } else { /* need to wait for the next done callback */ } } else { DPRINTF("close\n"); /* close here and now */ (xfer->endpoint->methods->close) (xfer); /* * Any additional DMA delay is done by * "usbd_transfer_unsetup()". */ /* * Special case. Check if we need to restart a blocked * endpoint. */ ep = xfer->endpoint; /* * If the current USB transfer is completing we need * to start the next one: */ if (ep->endpoint_q[xfer->stream_id].curr == xfer) { usb_command_wrapper( &ep->endpoint_q[xfer->stream_id], NULL); } } USB_BUS_UNLOCK(xfer->xroot->bus); } /*------------------------------------------------------------------------* * usbd_transfer_pending * * This function will check if an USB transfer is pending which is a * little bit complicated! * Return values: * 0: Not pending * 1: Pending: The USB transfer will receive a callback in the future. *------------------------------------------------------------------------*/ uint8_t usbd_transfer_pending(struct usb_xfer *xfer) { struct usb_xfer_root *info; struct usb_xfer_queue *pq; if (xfer == NULL) { /* transfer is gone */ return (0); } USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); if (xfer->flags_int.transferring) { /* trivial case */ return (1); } USB_BUS_LOCK(xfer->xroot->bus); if (xfer->wait_queue) { /* we are waiting on a queue somewhere */ USB_BUS_UNLOCK(xfer->xroot->bus); return (1); } info = xfer->xroot; pq = &info->done_q; if (pq->curr == xfer) { /* we are currently scheduled for callback */ USB_BUS_UNLOCK(xfer->xroot->bus); return (1); } /* we are not pending */ USB_BUS_UNLOCK(xfer->xroot->bus); return (0); } /*------------------------------------------------------------------------* * usbd_transfer_drain * * This function will stop the USB transfer and wait for any * additional BUS-DMA and HW-DMA operations to complete. Buffers that * are loaded into DMA can safely be freed or reused after that this * function has returned. *------------------------------------------------------------------------*/ void usbd_transfer_drain(struct usb_xfer *xfer) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "usbd_transfer_drain can sleep!"); if (xfer == NULL) { /* transfer is gone */ return; } if (xfer->xroot->xfer_mtx != &Giant) { USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED); } USB_XFER_LOCK(xfer); usbd_transfer_stop(xfer); while (usbd_transfer_pending(xfer) || xfer->flags_int.doing_callback) { /* * It is allowed that the callback can drop its * transfer mutex. In that case checking only * "usbd_transfer_pending()" is not enough to tell if * the USB transfer is fully drained. We also need to * check the internal "doing_callback" flag. */ xfer->flags_int.draining = 1; /* * Wait until the current outstanding USB * transfer is complete ! */ cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx); } USB_XFER_UNLOCK(xfer); } struct usb_page_cache * usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); return (&xfer->frbuffers[frindex]); } void * usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex) { struct usb_page_search page_info; KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info); return (page_info.buffer); } /*------------------------------------------------------------------------* * usbd_xfer_get_fps_shift * * The following function is only useful for isochronous transfers. It * returns how many times the frame execution rate has been shifted * down. * * Return value: * Success: 0..3 * Failure: 0 *------------------------------------------------------------------------*/ uint8_t usbd_xfer_get_fps_shift(struct usb_xfer *xfer) { return (xfer->fps_shift); } usb_frlength_t usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); return (xfer->frlengths[frindex]); } /*------------------------------------------------------------------------* * usbd_xfer_set_frame_data * * This function sets the pointer of the buffer that should * loaded directly into DMA for the given USB frame. Passing "ptr" * equal to NULL while the corresponding "frlength" is greater * than zero gives undefined results! *------------------------------------------------------------------------*/ void usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, void *ptr, usb_frlength_t len) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); /* set virtual address to load and length */ xfer->frbuffers[frindex].buffer = ptr; usbd_xfer_set_frame_len(xfer, frindex, len); } void usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex, void **ptr, int *len) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); if (ptr != NULL) *ptr = xfer->frbuffers[frindex].buffer; if (len != NULL) *len = xfer->frlengths[frindex]; } /*------------------------------------------------------------------------* * usbd_xfer_old_frame_length * * This function returns the framelength of the given frame at the * time the transfer was submitted. This function can be used to * compute the starting data pointer of the next isochronous frame * when an isochronous transfer has completed. *------------------------------------------------------------------------*/ usb_frlength_t usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); return (xfer->frlengths[frindex + xfer->max_frame_count]); } void usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes, int *nframes) { if (actlen != NULL) *actlen = xfer->actlen; if (sumlen != NULL) *sumlen = xfer->sumlen; if (aframes != NULL) *aframes = xfer->aframes; if (nframes != NULL) *nframes = xfer->nframes; } /*------------------------------------------------------------------------* * usbd_xfer_set_frame_offset * * This function sets the frame data buffer offset relative to the beginning * of the USB DMA buffer allocated for this USB transfer. *------------------------------------------------------------------------*/ void usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset, usb_frcount_t frindex) { KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame " "when the USB buffer is external\n")); KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); /* set virtual address to load */ xfer->frbuffers[frindex].buffer = USB_ADD_BYTES(xfer->local_buffer, offset); } void usbd_xfer_set_interval(struct usb_xfer *xfer, int i) { xfer->interval = i; } void usbd_xfer_set_timeout(struct usb_xfer *xfer, int t) { xfer->timeout = t; } void usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n) { xfer->nframes = n; } usb_frcount_t usbd_xfer_max_frames(struct usb_xfer *xfer) { return (xfer->max_frame_count); } usb_frlength_t usbd_xfer_max_len(struct usb_xfer *xfer) { return (xfer->max_data_length); } usb_frlength_t usbd_xfer_max_framelen(struct usb_xfer *xfer) { return (xfer->max_frame_size); } void usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex, usb_frlength_t len) { KASSERT(frindex < xfer->max_frame_count, ("frame index overflow")); xfer->frlengths[frindex] = len; } /*------------------------------------------------------------------------* * usb_callback_proc - factored out code * * This function performs USB callbacks. *------------------------------------------------------------------------*/ static void usb_callback_proc(struct usb_proc_msg *_pm) { struct usb_done_msg *pm = (void *)_pm; struct usb_xfer_root *info = pm->xroot; /* Change locking order */ USB_BUS_UNLOCK(info->bus); /* * We exploit the fact that the mutex is the same for all * callbacks that will be called from this thread: */ mtx_lock(info->xfer_mtx); USB_BUS_LOCK(info->bus); /* Continue where we lost track */ usb_command_wrapper(&info->done_q, info->done_q.curr); mtx_unlock(info->xfer_mtx); } /*------------------------------------------------------------------------* * usbd_callback_ss_done_defer * * This function will defer the start, stop and done callback to the * correct thread. *------------------------------------------------------------------------*/ static void usbd_callback_ss_done_defer(struct usb_xfer *xfer) { struct usb_xfer_root *info = xfer->xroot; struct usb_xfer_queue *pq = &info->done_q; USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); if (pq->curr != xfer) { usbd_transfer_enqueue(pq, xfer); } if (!pq->recurse_1) { /* * We have to postpone the callback due to the fact we * will have a Lock Order Reversal, LOR, if we try to * proceed ! */ (void) usb_proc_msignal(info->done_p, &info->done_m[0], &info->done_m[1]); } else { /* clear second recurse flag */ pq->recurse_2 = 0; } return; } /*------------------------------------------------------------------------* * usbd_callback_wrapper * * This is a wrapper for USB callbacks. This wrapper does some * auto-magic things like figuring out if we can call the callback * directly from the current context or if we need to wakeup the * interrupt process. *------------------------------------------------------------------------*/ static void usbd_callback_wrapper(struct usb_xfer_queue *pq) { struct usb_xfer *xfer = pq->curr; struct usb_xfer_root *info = xfer->xroot; USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) && SCHEDULER_STOPPED() == 0) { /* * Cases that end up here: * * 5) HW interrupt done callback or other source. * 6) HW completed transfer during callback */ DPRINTFN(3, "case 5 and 6\n"); /* * We have to postpone the callback due to the fact we * will have a Lock Order Reversal, LOR, if we try to * proceed! * * Postponing the callback also ensures that other USB * transfer queues get a chance. */ (void) usb_proc_msignal(info->done_p, &info->done_m[0], &info->done_m[1]); return; } /* * Cases that end up here: * * 1) We are starting a transfer * 2) We are prematurely calling back a transfer * 3) We are stopping a transfer * 4) We are doing an ordinary callback */ DPRINTFN(3, "case 1-4\n"); /* get next USB transfer in the queue */ info->done_q.curr = NULL; /* set flag in case of drain */ xfer->flags_int.doing_callback = 1; USB_BUS_UNLOCK(info->bus); USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED); /* set correct USB state for callback */ if (!xfer->flags_int.transferring) { xfer->usb_state = USB_ST_SETUP; if (!xfer->flags_int.started) { /* we got stopped before we even got started */ USB_BUS_LOCK(info->bus); goto done; } } else { if (usbd_callback_wrapper_sub(xfer)) { /* the callback has been deferred */ USB_BUS_LOCK(info->bus); goto done; } #if USB_HAVE_POWERD /* decrement power reference */ usbd_transfer_power_ref(xfer, -1); #endif xfer->flags_int.transferring = 0; if (xfer->error) { xfer->usb_state = USB_ST_ERROR; } else { /* set transferred state */ xfer->usb_state = USB_ST_TRANSFERRED; #if USB_HAVE_BUSDMA /* sync DMA memory, if any */ if (xfer->flags_int.bdma_enable && (!xfer->flags_int.bdma_no_post_sync)) { usb_bdma_post_sync(xfer); } #endif } } #if USB_HAVE_PF if (xfer->usb_state != USB_ST_SETUP) { USB_BUS_LOCK(info->bus); usbpf_xfertap(xfer, USBPF_XFERTAP_DONE); USB_BUS_UNLOCK(info->bus); } #endif /* call processing routine */ (xfer->callback) (xfer, xfer->error); /* pickup the USB mutex again */ USB_BUS_LOCK(info->bus); /* * Check if we got started after that we got cancelled, but * before we managed to do the callback. */ if ((!xfer->flags_int.open) && (xfer->flags_int.started) && (xfer->usb_state == USB_ST_ERROR)) { /* clear flag in case of drain */ xfer->flags_int.doing_callback = 0; /* try to loop, but not recursivly */ usb_command_wrapper(&info->done_q, xfer); return; } done: /* clear flag in case of drain */ xfer->flags_int.doing_callback = 0; /* * Check if we are draining. */ if (xfer->flags_int.draining && (!xfer->flags_int.transferring)) { /* "usbd_transfer_drain()" is waiting for end of transfer */ xfer->flags_int.draining = 0; cv_broadcast(&info->cv_drain); } /* do the next callback, if any */ usb_command_wrapper(&info->done_q, info->done_q.curr); } /*------------------------------------------------------------------------* * usb_dma_delay_done_cb * * This function is called when the DMA delay has been exectuded, and * will make sure that the callback is called to complete the USB * transfer. This code path is ususally only used when there is an USB * error like USB_ERR_CANCELLED. *------------------------------------------------------------------------*/ void usb_dma_delay_done_cb(struct usb_xfer *xfer) { USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); DPRINTFN(3, "Completed %p\n", xfer); /* queue callback for execution, again */ usbd_transfer_done(xfer, 0); } /*------------------------------------------------------------------------* * usbd_transfer_dequeue * * - This function is used to remove an USB transfer from a USB * transfer queue. * * - This function can be called multiple times in a row. *------------------------------------------------------------------------*/ void usbd_transfer_dequeue(struct usb_xfer *xfer) { struct usb_xfer_queue *pq; pq = xfer->wait_queue; if (pq) { TAILQ_REMOVE(&pq->head, xfer, wait_entry); xfer->wait_queue = NULL; } } /*------------------------------------------------------------------------* * usbd_transfer_enqueue * * - This function is used to insert an USB transfer into a USB * * transfer queue. * * - This function can be called multiple times in a row. *------------------------------------------------------------------------*/ void usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer) { /* * Insert the USB transfer into the queue, if it is not * already on a USB transfer queue: */ if (xfer->wait_queue == NULL) { xfer->wait_queue = pq; TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry); } } /*------------------------------------------------------------------------* * usbd_transfer_done * * - This function is used to remove an USB transfer from the busdma, * pipe or interrupt queue. * * - This function is used to queue the USB transfer on the done * queue. * * - This function is used to stop any USB transfer timeouts. *------------------------------------------------------------------------*/ void usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error) { struct usb_xfer_root *info = xfer->xroot; USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED); DPRINTF("err=%s\n", usbd_errstr(error)); /* * If we are not transferring then just return. * This can happen during transfer cancel. */ if (!xfer->flags_int.transferring) { DPRINTF("not transferring\n"); /* end of control transfer, if any */ xfer->flags_int.control_act = 0; return; } /* only set transfer error, if not already set */ if (xfer->error == USB_ERR_NORMAL_COMPLETION) xfer->error = error; /* stop any callouts */ usb_callout_stop(&xfer->timeout_handle); /* * If we are waiting on a queue, just remove the USB transfer * from the queue, if any. We should have the required locks * locked to do the remove when this function is called. */ usbd_transfer_dequeue(xfer); #if USB_HAVE_BUSDMA if (mtx_owned(info->xfer_mtx)) { struct usb_xfer_queue *pq; /* * If the private USB lock is not locked, then we assume * that the BUS-DMA load stage has been passed: */ pq = &info->dma_q; if (pq->curr == xfer) { /* start the next BUS-DMA load, if any */ usb_command_wrapper(pq, NULL); } } #endif /* keep some statistics */ if (xfer->error) { info->bus->stats_err.uds_requests [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; } else { info->bus->stats_ok.uds_requests [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++; } /* call the USB transfer callback */ usbd_callback_ss_done_defer(xfer); } /*------------------------------------------------------------------------* * usbd_transfer_start_cb * * This function is called to start the USB transfer when * "xfer->interval" is greater than zero, and and the endpoint type is * BULK or CONTROL. *------------------------------------------------------------------------*/ static void usbd_transfer_start_cb(void *arg) { struct usb_xfer *xfer = arg; struct usb_endpoint *ep = xfer->endpoint; USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); DPRINTF("start\n"); #if USB_HAVE_PF usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); #endif /* the transfer can now be cancelled */ xfer->flags_int.can_cancel_immed = 1; /* start USB transfer, if no error */ if (xfer->error == 0) (ep->methods->start) (xfer); /* check for transfer error */ if (xfer->error) { /* some error has happened */ usbd_transfer_done(xfer, 0); } } /*------------------------------------------------------------------------* * usbd_xfer_set_stall * * This function is used to set the stall flag outside the * callback. This function is NULL safe. *------------------------------------------------------------------------*/ void usbd_xfer_set_stall(struct usb_xfer *xfer) { if (xfer == NULL) { /* tearing down */ return; } USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); /* avoid any races by locking the USB mutex */ USB_BUS_LOCK(xfer->xroot->bus); xfer->flags.stall_pipe = 1; USB_BUS_UNLOCK(xfer->xroot->bus); } int usbd_xfer_is_stalled(struct usb_xfer *xfer) { return (xfer->endpoint->is_stalled); } /*------------------------------------------------------------------------* * usbd_transfer_clear_stall * * This function is used to clear the stall flag outside the * callback. This function is NULL safe. *------------------------------------------------------------------------*/ void usbd_transfer_clear_stall(struct usb_xfer *xfer) { if (xfer == NULL) { /* tearing down */ return; } USB_XFER_LOCK_ASSERT(xfer, MA_OWNED); /* avoid any races by locking the USB mutex */ USB_BUS_LOCK(xfer->xroot->bus); xfer->flags.stall_pipe = 0; USB_BUS_UNLOCK(xfer->xroot->bus); } /*------------------------------------------------------------------------* * usbd_pipe_start * * This function is used to add an USB transfer to the pipe transfer list. *------------------------------------------------------------------------*/ void usbd_pipe_start(struct usb_xfer_queue *pq) { struct usb_endpoint *ep; struct usb_xfer *xfer; uint8_t type; xfer = pq->curr; ep = xfer->endpoint; USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); /* * If the endpoint is already stalled we do nothing ! */ if (ep->is_stalled) { return; } /* * Check if we are supposed to stall the endpoint: */ if (xfer->flags.stall_pipe) { struct usb_device *udev; struct usb_xfer_root *info; /* clear stall command */ xfer->flags.stall_pipe = 0; /* get pointer to USB device */ info = xfer->xroot; udev = info->udev; /* * Only stall BULK and INTERRUPT endpoints. */ type = (ep->edesc->bmAttributes & UE_XFERTYPE); if ((type == UE_BULK) || (type == UE_INTERRUPT)) { uint8_t did_stall; did_stall = 1; if (udev->flags.usb_mode == USB_MODE_DEVICE) { (udev->bus->methods->set_stall) ( udev, ep, &did_stall); } else if (udev->ctrl_xfer[1]) { info = udev->ctrl_xfer[1]->xroot; usb_proc_msignal( USB_BUS_CS_PROC(info->bus), &udev->cs_msg[0], &udev->cs_msg[1]); } else { /* should not happen */ DPRINTFN(0, "No stall handler\n"); } /* * Check if we should stall. Some USB hardware * handles set- and clear-stall in hardware. */ if (did_stall) { /* * The transfer will be continued when * the clear-stall control endpoint * message is received. */ ep->is_stalled = 1; return; } } else if (type == UE_ISOCHRONOUS) { /* * Make sure any FIFO overflow or other FIFO * error conditions go away by resetting the * endpoint FIFO through the clear stall * method. */ if (udev->flags.usb_mode == USB_MODE_DEVICE) { (udev->bus->methods->clear_stall) (udev, ep); } } } /* Set or clear stall complete - special case */ if (xfer->nframes == 0) { /* we are complete */ xfer->aframes = 0; usbd_transfer_done(xfer, 0); return; } /* * Handled cases: * * 1) Start the first transfer queued. * * 2) Re-start the current USB transfer. */ /* * Check if there should be any * pre transfer start delay: */ if (xfer->interval > 0) { type = (ep->edesc->bmAttributes & UE_XFERTYPE); if ((type == UE_BULK) || (type == UE_CONTROL)) { usbd_transfer_timeout_ms(xfer, &usbd_transfer_start_cb, xfer->interval); return; } } DPRINTF("start\n"); #if USB_HAVE_PF usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT); #endif /* the transfer can now be cancelled */ xfer->flags_int.can_cancel_immed = 1; /* start USB transfer, if no error */ if (xfer->error == 0) (ep->methods->start) (xfer); /* check for transfer error */ if (xfer->error) { /* some error has happened */ usbd_transfer_done(xfer, 0); } } /*------------------------------------------------------------------------* * usbd_transfer_timeout_ms * * This function is used to setup a timeout on the given USB * transfer. If the timeout has been deferred the callback given by * "cb" will get called after "ms" milliseconds. *------------------------------------------------------------------------*/ void usbd_transfer_timeout_ms(struct usb_xfer *xfer, void (*cb) (void *arg), usb_timeout_t ms) { USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED); /* defer delay */ usb_callout_reset(&xfer->timeout_handle, USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer); } /*------------------------------------------------------------------------* * usbd_callback_wrapper_sub * * - This function will update variables in an USB transfer after * that the USB transfer is complete. * * - This function is used to start the next USB transfer on the * ep transfer queue, if any. * * NOTE: In some special cases the USB transfer will not be removed from * the pipe queue, but remain first. To enforce USB transfer removal call * this function passing the error code "USB_ERR_CANCELLED". * * Return values: * 0: Success. * Else: The callback has been deferred. *------------------------------------------------------------------------*/ static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *xfer) { struct usb_endpoint *ep; struct usb_bus *bus; usb_frcount_t x; bus = xfer->xroot->bus; if ((!xfer->flags_int.open) && (!xfer->flags_int.did_close)) { DPRINTF("close\n"); USB_BUS_LOCK(bus); (xfer->endpoint->methods->close) (xfer); USB_BUS_UNLOCK(bus); /* only close once */ xfer->flags_int.did_close = 1; return (1); /* wait for new callback */ } /* * If we have a non-hardware induced error we * need to do the DMA delay! */ if (xfer->error != 0 && !xfer->flags_int.did_dma_delay && (xfer->error == USB_ERR_CANCELLED || xfer->error == USB_ERR_TIMEOUT || bus->methods->start_dma_delay != NULL)) { usb_timeout_t temp; /* only delay once */ xfer->flags_int.did_dma_delay = 1; /* we can not cancel this delay */ xfer->flags_int.can_cancel_immed = 0; temp = usbd_get_dma_delay(xfer->xroot->udev); DPRINTFN(3, "DMA delay, %u ms, " "on %p\n", temp, xfer); if (temp != 0) { USB_BUS_LOCK(bus); /* * Some hardware solutions have dedicated * events when it is safe to free DMA'ed * memory. For the other hardware platforms we * use a static delay. */ if (bus->methods->start_dma_delay != NULL) { (bus->methods->start_dma_delay) (xfer); } else { usbd_transfer_timeout_ms(xfer, (void (*)(void *))&usb_dma_delay_done_cb, temp); } USB_BUS_UNLOCK(bus); return (1); /* wait for new callback */ } } /* check actual number of frames */ if (xfer->aframes > xfer->nframes) { if (xfer->error == 0) { panic("%s: actual number of frames, %d, is " "greater than initial number of frames, %d\n", __FUNCTION__, xfer->aframes, xfer->nframes); } else { /* just set some valid value */ xfer->aframes = xfer->nframes; } } /* compute actual length */ xfer->actlen = 0; for (x = 0; x != xfer->aframes; x++) { xfer->actlen += xfer->frlengths[x]; } /* * Frames that were not transferred get zero actual length in * case the USB device driver does not check the actual number * of frames transferred, "xfer->aframes": */ for (; x < xfer->nframes; x++) { usbd_xfer_set_frame_len(xfer, x, 0); } /* check actual length */ if (xfer->actlen > xfer->sumlen) { if (xfer->error == 0) { panic("%s: actual length, %d, is greater than " "initial length, %d\n", __FUNCTION__, xfer->actlen, xfer->sumlen); } else { /* just set some valid value */ xfer->actlen = xfer->sumlen; } } DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n", xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen, xfer->aframes, xfer->nframes); if (xfer->error) { /* end of control transfer, if any */ xfer->flags_int.control_act = 0; #if USB_HAVE_TT_SUPPORT switch (xfer->error) { case USB_ERR_NORMAL_COMPLETION: case USB_ERR_SHORT_XFER: case USB_ERR_STALLED: case USB_ERR_CANCELLED: /* nothing to do */ break; default: /* try to reset the TT, if any */ USB_BUS_LOCK(bus); uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint); USB_BUS_UNLOCK(bus); break; } #endif /* check if we should block the execution queue */ if ((xfer->error != USB_ERR_CANCELLED) && (xfer->flags.pipe_bof)) { DPRINTFN(2, "xfer=%p: Block On Failure " "on endpoint=%p\n", xfer, xfer->endpoint); goto done; } } else { /* check for short transfers */ if (xfer->actlen < xfer->sumlen) { /* end of control transfer, if any */ xfer->flags_int.control_act = 0; if (!xfer->flags_int.short_xfer_ok) { xfer->error = USB_ERR_SHORT_XFER; if (xfer->flags.pipe_bof) { DPRINTFN(2, "xfer=%p: Block On Failure on " "Short Transfer on endpoint %p.\n", xfer, xfer->endpoint); goto done; } } } else { /* * Check if we are in the middle of a * control transfer: */ if (xfer->flags_int.control_act) { DPRINTFN(5, "xfer=%p: Control transfer " "active on endpoint=%p\n", xfer, xfer->endpoint); goto done; } } } ep = xfer->endpoint; /* * If the current USB transfer is completing we need to start the * next one: */ USB_BUS_LOCK(bus); if (ep->endpoint_q[xfer->stream_id].curr == xfer) { usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL); if (ep->endpoint_q[xfer->stream_id].curr != NULL || TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) { /* there is another USB transfer waiting */ } else { /* this is the last USB transfer */ /* clear isochronous sync flag */ xfer->endpoint->is_synced = 0; } } USB_BUS_UNLOCK(bus); done: return (0); } /*------------------------------------------------------------------------* * usb_command_wrapper * * This function is used to execute commands non-recursivly on an USB * transfer. *------------------------------------------------------------------------*/ void usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer) { if (xfer) { /* * If the transfer is not already processing, * queue it! */ if (pq->curr != xfer) { usbd_transfer_enqueue(pq, xfer); if (pq->curr != NULL) { /* something is already processing */ DPRINTFN(6, "busy %p\n", pq->curr); return; } } } else { /* Get next element in queue */ pq->curr = NULL; } if (!pq->recurse_1) { /* clear third recurse flag */ pq->recurse_3 = 0; do { /* set two first recurse flags */ pq->recurse_1 = 1; pq->recurse_2 = 1; if (pq->curr == NULL) { xfer = TAILQ_FIRST(&pq->head); if (xfer) { TAILQ_REMOVE(&pq->head, xfer, wait_entry); xfer->wait_queue = NULL; pq->curr = xfer; } else { break; } } DPRINTFN(6, "cb %p (enter)\n", pq->curr); (pq->command) (pq); DPRINTFN(6, "cb %p (leave)\n", pq->curr); /* * Set third recurse flag to indicate * recursion happened: */ pq->recurse_3 = 1; } while (!pq->recurse_2); /* clear first recurse flag */ pq->recurse_1 = 0; } else { /* clear second recurse flag */ pq->recurse_2 = 0; } } /*------------------------------------------------------------------------* * usbd_ctrl_transfer_setup * * This function is used to setup the default USB control endpoint * transfer. *------------------------------------------------------------------------*/ void usbd_ctrl_transfer_setup(struct usb_device *udev) { struct usb_xfer *xfer; uint8_t no_resetup; uint8_t iface_index; /* check for root HUB */ if (udev->parent_hub == NULL) return; repeat: xfer = udev->ctrl_xfer[0]; if (xfer) { USB_XFER_LOCK(xfer); no_resetup = ((xfer->address == udev->address) && (udev->ctrl_ep_desc.wMaxPacketSize[0] == udev->ddesc.bMaxPacketSize)); if (udev->flags.usb_mode == USB_MODE_DEVICE) { if (no_resetup) { /* * NOTE: checking "xfer->address" and * starting the USB transfer must be * atomic! */ usbd_transfer_start(xfer); } } USB_XFER_UNLOCK(xfer); } else { no_resetup = 0; } if (no_resetup) { /* * All parameters are exactly the same like before. * Just return. */ return; } /* * Update wMaxPacketSize for the default control endpoint: */ udev->ctrl_ep_desc.wMaxPacketSize[0] = udev->ddesc.bMaxPacketSize; /* * Unsetup any existing USB transfer: */ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX); /* * Reset clear stall error counter. */ udev->clear_stall_errors = 0; /* * Try to setup a new USB transfer for the * default control endpoint: */ iface_index = 0; if (usbd_transfer_setup(udev, &iface_index, udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL, &udev->device_mtx)) { DPRINTFN(0, "could not setup default " "USB transfer\n"); } else { goto repeat; } } /*------------------------------------------------------------------------* * usbd_clear_data_toggle - factored out code * * NOTE: the intention of this function is not to reset the hardware * data toggle. *------------------------------------------------------------------------*/ void usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep) { USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); /* check that we have a valid case */ if (udev->flags.usb_mode == USB_MODE_HOST && udev->parent_hub != NULL && udev->bus->methods->clear_stall != NULL && ep->methods != NULL) { (udev->bus->methods->clear_stall) (udev, ep); } } /*------------------------------------------------------------------------* * usbd_clear_data_toggle - factored out code * * NOTE: the intention of this function is not to reset the hardware * data toggle on the USB device side. *------------------------------------------------------------------------*/ void usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep) { DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep); USB_BUS_LOCK(udev->bus); ep->toggle_next = 0; /* some hardware needs a callback to clear the data toggle */ usbd_clear_stall_locked(udev, ep); USB_BUS_UNLOCK(udev->bus); } /*------------------------------------------------------------------------* * usbd_clear_stall_callback - factored out clear stall callback * * Input parameters: * xfer1: Clear Stall Control Transfer * xfer2: Stalled USB Transfer * * This function is NULL safe. * * Return values: * 0: In progress * Else: Finished * * Clear stall config example: * * static const struct usb_config my_clearstall = { * .type = UE_CONTROL, * .endpoint = 0, * .direction = UE_DIR_ANY, * .interval = 50, //50 milliseconds * .bufsize = sizeof(struct usb_device_request), * .timeout = 1000, //1.000 seconds * .callback = &my_clear_stall_callback, // ** * .usb_mode = USB_MODE_HOST, * }; * * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback" * passing the correct parameters. *------------------------------------------------------------------------*/ uint8_t usbd_clear_stall_callback(struct usb_xfer *xfer1, struct usb_xfer *xfer2) { struct usb_device_request req; if (xfer2 == NULL) { /* looks like we are tearing down */ DPRINTF("NULL input parameter\n"); return (0); } USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED); USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED); switch (USB_GET_STATE(xfer1)) { case USB_ST_SETUP: /* * pre-clear the data toggle to DATA0 ("umass.c" and * "ata-usb.c" depends on this) */ usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint); /* setup a clear-stall packet */ req.bmRequestType = UT_WRITE_ENDPOINT; req.bRequest = UR_CLEAR_FEATURE; USETW(req.wValue, UF_ENDPOINT_HALT); req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress; req.wIndex[1] = 0; USETW(req.wLength, 0); /* * "usbd_transfer_setup_sub()" will ensure that * we have sufficient room in the buffer for * the request structure! */ /* copy in the transfer */ usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req)); /* set length */ xfer1->frlengths[0] = sizeof(req); xfer1->nframes = 1; usbd_transfer_submit(xfer1); return (0); case USB_ST_TRANSFERRED: break; default: /* Error */ if (xfer1->error == USB_ERR_CANCELLED) { return (0); } break; } return (1); /* Clear Stall Finished */ } /*------------------------------------------------------------------------* * usbd_transfer_poll * * The following function gets called from the USB keyboard driver and * UMASS when the system has paniced. * * NOTE: It is currently not possible to resume normal operation on * the USB controller which has been polled, due to clearing of the * "up_dsleep" and "up_msleep" flags. *------------------------------------------------------------------------*/ void usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max) { struct usb_xfer *xfer; struct usb_xfer_root *xroot; struct usb_device *udev; struct usb_proc_msg *pm; uint16_t n; uint16_t drop_bus; uint16_t drop_xfer; for (n = 0; n != max; n++) { /* Extra checks to avoid panic */ xfer = ppxfer[n]; if (xfer == NULL) continue; /* no USB transfer */ xroot = xfer->xroot; if (xroot == NULL) continue; /* no USB root */ udev = xroot->udev; if (udev == NULL) continue; /* no USB device */ if (udev->bus == NULL) continue; /* no BUS structure */ if (udev->bus->methods == NULL) continue; /* no BUS methods */ if (udev->bus->methods->xfer_poll == NULL) continue; /* no poll method */ /* make sure that the BUS mutex is not locked */ drop_bus = 0; while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) { mtx_unlock(&xroot->udev->bus->bus_mtx); drop_bus++; } /* make sure that the transfer mutex is not locked */ drop_xfer = 0; while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) { mtx_unlock(xroot->xfer_mtx); drop_xfer++; } /* Make sure cv_signal() and cv_broadcast() is not called */ USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0; USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0; USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0; USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0; USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0; /* poll USB hardware */ (udev->bus->methods->xfer_poll) (udev->bus); USB_BUS_LOCK(xroot->bus); /* check for clear stall */ if (udev->ctrl_xfer[1] != NULL) { /* poll clear stall start */ pm = &udev->cs_msg[0].hdr; (pm->pm_callback) (pm); /* poll clear stall done thread */ pm = &udev->ctrl_xfer[1]-> xroot->done_m[0].hdr; (pm->pm_callback) (pm); } /* poll done thread */ pm = &xroot->done_m[0].hdr; (pm->pm_callback) (pm); USB_BUS_UNLOCK(xroot->bus); /* restore transfer mutex */ while (drop_xfer--) mtx_lock(xroot->xfer_mtx); /* restore BUS mutex */ while (drop_bus--) mtx_lock(&xroot->udev->bus->bus_mtx); } } static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr, uint8_t type, enum usb_dev_speed speed) { static const uint16_t intr_range_max[USB_SPEED_MAX] = { [USB_SPEED_LOW] = 8, [USB_SPEED_FULL] = 64, [USB_SPEED_HIGH] = 1024, [USB_SPEED_VARIABLE] = 1024, [USB_SPEED_SUPER] = 1024, }; static const uint16_t isoc_range_max[USB_SPEED_MAX] = { [USB_SPEED_LOW] = 0, /* invalid */ [USB_SPEED_FULL] = 1023, [USB_SPEED_HIGH] = 1024, [USB_SPEED_VARIABLE] = 3584, [USB_SPEED_SUPER] = 1024, }; static const uint16_t control_min[USB_SPEED_MAX] = { [USB_SPEED_LOW] = 8, [USB_SPEED_FULL] = 8, [USB_SPEED_HIGH] = 64, [USB_SPEED_VARIABLE] = 512, [USB_SPEED_SUPER] = 512, }; static const uint16_t bulk_min[USB_SPEED_MAX] = { [USB_SPEED_LOW] = 8, [USB_SPEED_FULL] = 8, [USB_SPEED_HIGH] = 512, [USB_SPEED_VARIABLE] = 512, [USB_SPEED_SUPER] = 1024, }; uint16_t temp; memset(ptr, 0, sizeof(*ptr)); switch (type) { case UE_INTERRUPT: ptr->range.max = intr_range_max[speed]; break; case UE_ISOCHRONOUS: ptr->range.max = isoc_range_max[speed]; break; default: if (type == UE_BULK) temp = bulk_min[speed]; else /* UE_CONTROL */ temp = control_min[speed]; /* default is fixed */ ptr->fixed[0] = temp; ptr->fixed[1] = temp; ptr->fixed[2] = temp; ptr->fixed[3] = temp; if (speed == USB_SPEED_FULL) { /* multiple sizes */ ptr->fixed[1] = 16; ptr->fixed[2] = 32; ptr->fixed[3] = 64; } if ((speed == USB_SPEED_VARIABLE) && (type == UE_BULK)) { /* multiple sizes */ ptr->fixed[2] = 1024; ptr->fixed[3] = 1536; } break; } } void * usbd_xfer_softc(struct usb_xfer *xfer) { return (xfer->priv_sc); } void * usbd_xfer_get_priv(struct usb_xfer *xfer) { return (xfer->priv_fifo); } void usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr) { xfer->priv_fifo = ptr; } uint8_t usbd_xfer_state(struct usb_xfer *xfer) { return (xfer->usb_state); } void usbd_xfer_set_flag(struct usb_xfer *xfer, int flag) { switch (flag) { case USB_FORCE_SHORT_XFER: xfer->flags.force_short_xfer = 1; break; case USB_SHORT_XFER_OK: xfer->flags.short_xfer_ok = 1; break; case USB_MULTI_SHORT_OK: xfer->flags.short_frames_ok = 1; break; case USB_MANUAL_STATUS: xfer->flags.manual_status = 1; break; } } void usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag) { switch (flag) { case USB_FORCE_SHORT_XFER: xfer->flags.force_short_xfer = 0; break; case USB_SHORT_XFER_OK: xfer->flags.short_xfer_ok = 0; break; case USB_MULTI_SHORT_OK: xfer->flags.short_frames_ok = 0; break; case USB_MANUAL_STATUS: xfer->flags.manual_status = 0; break; } } /* * The following function returns in milliseconds when the isochronous * transfer was completed by the hardware. The returned value wraps * around 65536 milliseconds. */ uint16_t usbd_xfer_get_timestamp(struct usb_xfer *xfer) { return (xfer->isoc_time_complete); } /* * The following function returns non-zero if the max packet size * field was clamped to a valid value. Else it returns zero. */ uint8_t usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer) { return (xfer->flags_int.maxp_was_clamped); } diff --git a/sys/dev/usb/wlan/if_rum.c b/sys/dev/usb/wlan/if_rum.c index 08be7175ef47..323de8d60c43 100644 --- a/sys/dev/usb/wlan/if_rum.c +++ b/sys/dev/usb/wlan/if_rum.c @@ -1,3242 +1,3242 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005-2007 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * Copyright (c) 2007-2008 Hans Petter Selasky * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2501USB/RT2601USB chipset driver * http://www.ralinktech.com.tw/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rum_debug #include #include #include #include #ifdef USB_DEBUG static int rum_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum"); SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RWTUN, &rum_debug, 0, "Debug level"); #endif static const STRUCT_USB_HOST_ID rum_devs[] = { #define RUM_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } RUM_DEV(ABOCOM, HWU54DM), RUM_DEV(ABOCOM, RT2573_2), RUM_DEV(ABOCOM, RT2573_3), RUM_DEV(ABOCOM, RT2573_4), RUM_DEV(ABOCOM, WUG2700), RUM_DEV(AMIT, CGWLUSB2GO), RUM_DEV(ASUS, RT2573_1), RUM_DEV(ASUS, RT2573_2), RUM_DEV(BELKIN, F5D7050A), RUM_DEV(BELKIN, F5D9050V3), RUM_DEV(CISCOLINKSYS, WUSB54GC), RUM_DEV(CISCOLINKSYS, WUSB54GR), RUM_DEV(CONCEPTRONIC2, C54RU2), RUM_DEV(COREGA, CGWLUSB2GL), RUM_DEV(COREGA, CGWLUSB2GPX), RUM_DEV(DICKSMITH, CWD854F), RUM_DEV(DICKSMITH, RT2573), RUM_DEV(EDIMAX, EW7318USG), RUM_DEV(DLINK2, DWLG122C1), RUM_DEV(DLINK2, WUA1340), RUM_DEV(DLINK2, DWA111), RUM_DEV(DLINK2, DWA110), RUM_DEV(GIGABYTE, GNWB01GS), RUM_DEV(GIGABYTE, GNWI05GS), RUM_DEV(GIGASET, RT2573), RUM_DEV(GOODWAY, RT2573), RUM_DEV(GUILLEMOT, HWGUSB254LB), RUM_DEV(GUILLEMOT, HWGUSB254V2AP), RUM_DEV(HUAWEI3COM, WUB320G), RUM_DEV(MELCO, G54HP), RUM_DEV(MELCO, SG54HP), RUM_DEV(MELCO, SG54HG), RUM_DEV(MELCO, WLIUCG), RUM_DEV(MELCO, WLRUCG), RUM_DEV(MELCO, WLRUCGAOSS), RUM_DEV(MSI, RT2573_1), RUM_DEV(MSI, RT2573_2), RUM_DEV(MSI, RT2573_3), RUM_DEV(MSI, RT2573_4), RUM_DEV(NOVATECH, RT2573), RUM_DEV(PLANEX2, GWUS54HP), RUM_DEV(PLANEX2, GWUS54MINI2), RUM_DEV(PLANEX2, GWUSMM), RUM_DEV(QCOM, RT2573), RUM_DEV(QCOM, RT2573_2), RUM_DEV(QCOM, RT2573_3), RUM_DEV(RALINK, RT2573), RUM_DEV(RALINK, RT2573_2), RUM_DEV(RALINK, RT2671), RUM_DEV(SITECOMEU, WL113R2), RUM_DEV(SITECOMEU, WL172), RUM_DEV(SPARKLAN, RT2573), RUM_DEV(SURECOM, RT2573), #undef RUM_DEV }; static device_probe_t rum_match; static device_attach_t rum_attach; static device_detach_t rum_detach; static usb_callback_t rum_bulk_read_callback; static usb_callback_t rum_bulk_write_callback; static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data); static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int); static struct ieee80211vap *rum_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rum_vap_delete(struct ieee80211vap *); static void rum_cmdq_cb(void *, int); static int rum_cmd_sleepable(struct rum_softc *, const void *, size_t, uint8_t, CMD_FUNC_PROTO); static void rum_tx_free(struct rum_tx_data *, int); static void rum_setup_tx_list(struct rum_softc *); static void rum_unsetup_tx_list(struct rum_softc *); static void rum_beacon_miss(struct ieee80211vap *); static void rum_sta_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static int rum_set_power_state(struct rum_softc *, int); static int rum_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint8_t rum_crypto_mode(struct rum_softc *, u_int, int); static void rum_setup_tx_desc(struct rum_softc *, struct rum_tx_desc *, struct ieee80211_key *, uint32_t, uint8_t, uint8_t, int, int, int); static uint32_t rum_tx_crypto_flags(struct rum_softc *, struct ieee80211_node *, const struct ieee80211_key *); static int rum_tx_mgt(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_tx_raw(struct rum_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int rum_tx_data(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_transmit(struct ieee80211com *, struct mbuf *); static void rum_start(struct rum_softc *); static void rum_parent(struct ieee80211com *); static void rum_eeprom_read(struct rum_softc *, uint16_t, void *, int); static uint32_t rum_read(struct rum_softc *, uint16_t); static void rum_read_multi(struct rum_softc *, uint16_t, void *, int); static usb_error_t rum_write(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_write_multi(struct rum_softc *, uint16_t, void *, size_t); static usb_error_t rum_setbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_clrbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_modbits(struct rum_softc *, uint16_t, uint32_t, uint32_t); static int rum_bbp_busy(struct rum_softc *); static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t); static uint8_t rum_bbp_read(struct rum_softc *, uint8_t); static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t); static void rum_select_antenna(struct rum_softc *); static void rum_enable_mrr(struct rum_softc *); static void rum_set_txpreamble(struct rum_softc *); static void rum_set_basicrates(struct rum_softc *); static void rum_select_band(struct rum_softc *, struct ieee80211_channel *); static void rum_set_chan(struct rum_softc *, struct ieee80211_channel *); static void rum_set_maxretry(struct rum_softc *, struct ieee80211vap *); static int rum_enable_tsf_sync(struct rum_softc *); static void rum_enable_tsf(struct rum_softc *); static void rum_abort_tsf_sync(struct rum_softc *); static void rum_get_tsf(struct rum_softc *, uint64_t *); static void rum_update_slot_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_slot(struct ieee80211com *); static int rum_wme_update(struct ieee80211com *); static void rum_set_bssid(struct rum_softc *, const uint8_t *); static void rum_set_macaddr(struct rum_softc *, const uint8_t *); static void rum_update_mcast(struct ieee80211com *); static void rum_update_promisc(struct ieee80211com *); static void rum_setpromisc(struct rum_softc *); static const char *rum_get_rf(int); static void rum_read_eeprom(struct rum_softc *); static int rum_bbp_wakeup(struct rum_softc *); static int rum_bbp_init(struct rum_softc *); static void rum_clr_shkey_regs(struct rum_softc *); static int rum_init(struct rum_softc *); static void rum_stop(struct rum_softc *); static void rum_load_microcode(struct rum_softc *, const uint8_t *, size_t); static int rum_set_sleep_time(struct rum_softc *, uint16_t); static int rum_reset(struct ieee80211vap *, u_long); static int rum_set_beacon(struct rum_softc *, struct ieee80211vap *); static int rum_alloc_beacon(struct rum_softc *, struct ieee80211vap *); static void rum_update_beacon_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_beacon(struct ieee80211vap *, int); static int rum_common_key_set(struct rum_softc *, struct ieee80211_key *, uint16_t); static void rum_group_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_group_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static int rum_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int rum_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int rum_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rum_scan_start(struct ieee80211com *); static void rum_scan_end(struct ieee80211com *); static void rum_set_channel(struct ieee80211com *); static int rum_get_rssi(struct rum_softc *, uint8_t); static void rum_ratectl_start(struct rum_softc *, struct ieee80211_node *); static void rum_ratectl_timeout(void *); static void rum_ratectl_task(void *, int); static int rum_pause(struct rum_softc *, int); static const struct { uint32_t reg; uint32_t val; } rum_def_mac[] = { { RT2573_TXRX_CSR0, 0x025fb032 }, { RT2573_TXRX_CSR1, 0x9eaa9eaf }, { RT2573_TXRX_CSR2, 0x8a8b8c8d }, { RT2573_TXRX_CSR3, 0x00858687 }, { RT2573_TXRX_CSR7, 0x2e31353b }, { RT2573_TXRX_CSR8, 0x2a2a2a2c }, { RT2573_TXRX_CSR15, 0x0000000f }, { RT2573_MAC_CSR6, 0x00000fff }, { RT2573_MAC_CSR8, 0x016c030a }, { RT2573_MAC_CSR10, 0x00000718 }, { RT2573_MAC_CSR12, 0x00000004 }, { RT2573_MAC_CSR13, 0x00007f00 }, { RT2573_SEC_CSR2, 0x00000000 }, { RT2573_SEC_CSR3, 0x00000000 }, { RT2573_SEC_CSR4, 0x00000000 }, { RT2573_PHY_CSR1, 0x000023b0 }, { RT2573_PHY_CSR5, 0x00040a06 }, { RT2573_PHY_CSR6, 0x00080606 }, { RT2573_PHY_CSR7, 0x00000408 }, { RT2573_AIFSN_CSR, 0x00002273 }, { RT2573_CWMIN_CSR, 0x00002344 }, { RT2573_CWMAX_CSR, 0x000034aa } }; static const struct { uint8_t reg; uint8_t val; } rum_def_bbp[] = { { 3, 0x80 }, { 15, 0x30 }, { 17, 0x20 }, { 21, 0xc8 }, { 22, 0x38 }, { 23, 0x06 }, { 24, 0xfe }, { 25, 0x0a }, { 26, 0x0d }, { 32, 0x0b }, { 34, 0x12 }, { 37, 0x07 }, { 39, 0xf8 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 60, 0x10 }, { 61, 0x04 }, { 62, 0x04 }, { 75, 0xfe }, { 86, 0xfe }, { 88, 0xfe }, { 90, 0x0f }, { 99, 0x00 }, { 102, 0x16 }, { 107, 0x04 } }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rum_rf5226[] = { { 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 }, { 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 }, { 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 }, { 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 }, { 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 }, { 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 }, { 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 }, { 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 }, { 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 }, { 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 }, { 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 }, { 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 }, { 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 }, { 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 }, { 34, 0x00b03, 0x20266, 0x36014, 0x30282 }, { 38, 0x00b03, 0x20267, 0x36014, 0x30284 }, { 42, 0x00b03, 0x20268, 0x36014, 0x30286 }, { 46, 0x00b03, 0x20269, 0x36014, 0x30288 }, { 36, 0x00b03, 0x00266, 0x26014, 0x30288 }, { 40, 0x00b03, 0x00268, 0x26014, 0x30280 }, { 44, 0x00b03, 0x00269, 0x26014, 0x30282 }, { 48, 0x00b03, 0x0026a, 0x26014, 0x30284 }, { 52, 0x00b03, 0x0026b, 0x26014, 0x30286 }, { 56, 0x00b03, 0x0026c, 0x26014, 0x30288 }, { 60, 0x00b03, 0x0026e, 0x26014, 0x30280 }, { 64, 0x00b03, 0x0026f, 0x26014, 0x30282 }, { 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 }, { 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 }, { 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 }, { 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 }, { 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 }, { 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 }, { 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 }, { 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 }, { 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 }, { 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 }, { 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 }, { 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 }, { 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 }, { 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 }, { 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 }, { 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 } }, rum_rf5225[] = { { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, { 34, 0x00b33, 0x01266, 0x26014, 0x30282 }, { 38, 0x00b33, 0x01267, 0x26014, 0x30284 }, { 42, 0x00b33, 0x01268, 0x26014, 0x30286 }, { 46, 0x00b33, 0x01269, 0x26014, 0x30288 }, { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } }; static const struct usb_config rum_config[RUM_N_TRANSFER] = { [RUM_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (MCLBYTES + RT2573_TX_DESC_SIZE + 8), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = rum_bulk_write_callback, .timeout = 5000, /* ms */ }, [RUM_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (MCLBYTES + RT2573_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = rum_bulk_read_callback, }, }; static int rum_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2573_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(rum_devs, sizeof(rum_devs), uaa)); } static int rum_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; uint8_t iface_index; int error, ntries; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; RUM_LOCK_INIT(sc); RUM_CMDQ_LOCK_INIT(sc); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RT2573_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rum_config, RUM_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUM_LOCK(sc); /* retrieve RT2573 rev. no */ for (ntries = 0; ntries < 100; ntries++) { if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for chip to settle\n"); RUM_UNLOCK(sc); goto detach; } /* retrieve MAC address and various other things from EEPROM */ rum_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n", tmp, rum_get_rf(sc->rf_rev)); rum_load_microcode(sc, rt2573_ucode, sizeof(rt2573_ucode)); RUM_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_PMGT /* Station-side power mgmt */ | IEEE80211_C_SWSLEEP /* net80211 managed power mgmt */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_TKIP; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226) setbit(bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_update_promisc = rum_update_promisc; ic->ic_raw_xmit = rum_raw_xmit; ic->ic_scan_start = rum_scan_start; ic->ic_scan_end = rum_scan_end; ic->ic_set_channel = rum_set_channel; ic->ic_transmit = rum_transmit; ic->ic_parent = rum_parent; ic->ic_vap_create = rum_vap_create; ic->ic_vap_delete = rum_vap_delete; ic->ic_updateslot = rum_update_slot; ic->ic_wme.wme_update = rum_wme_update; ic->ic_update_mcast = rum_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2573_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2573_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, rum_cmdq_cb, sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: rum_detach(self); return (ENXIO); /* failure */ } static int rum_detach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; /* Prevent further ioctls */ RUM_LOCK(sc); sc->sc_detached = 1; RUM_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUM_N_TRANSFER); /* free TX list, if any */ RUM_LOCK(sc); rum_unsetup_tx_list(sc); RUM_UNLOCK(sc); if (ic->ic_softc == sc) { ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } mbufq_drain(&sc->sc_snd); RUM_CMDQ_LOCK_DESTROY(sc); RUM_LOCK_DESTROY(sc); return (0); } static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (rum_pause(sc, hz / 100)) break; } return (err); } static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int request) { struct usb_device_request req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_MCU_CNTL; USETW(req.wValue, request); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (rum_do_request(sc, &req, NULL)); } static struct ieee80211vap * rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; rvp = malloc(sizeof(struct rum_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = rum_newstate; vap->iv_key_alloc = rum_key_alloc; vap->iv_key_set = rum_key_set; vap->iv_key_delete = rum_key_delete; vap->iv_update_beacon = rum_update_beacon; vap->iv_reset = rum_reset; vap->iv_max_aid = RT2573_ADDR_MAX; if (opmode == IEEE80211_M_STA) { /* * Move device to the sleep state when * beacon is received and there is no data for us. * * Used only for IEEE80211_S_SLEEP state. */ rvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = rum_sta_recv_mgmt; /* Ignored while sleeping. */ rvp->bmiss = vap->iv_bmiss; vap->iv_bmiss = rum_beacon_miss; } usb_callout_init_mtx(&rvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&rvp->ratectl_task, 0, rum_ratectl_task, rvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void rum_vap_delete(struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; m_freem(rvp->bcn_mbuf); usb_callout_drain(&rvp->ratectl_ch); ieee80211_draintask(ic, &rvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } static void rum_cmdq_cb(void *arg, int pending) { struct rum_softc *sc = arg; struct rum_cmdq *rc; RUM_CMDQ_LOCK(sc); while (sc->cmdq[sc->cmdq_first].func != NULL) { rc = &sc->cmdq[sc->cmdq_first]; RUM_CMDQ_UNLOCK(sc); RUM_LOCK(sc); rc->func(sc, &rc->data, rc->rvp_id); RUM_UNLOCK(sc); RUM_CMDQ_LOCK(sc); memset(rc, 0, sizeof (*rc)); sc->cmdq_first = (sc->cmdq_first + 1) % RUM_CMDQ_SIZE; } RUM_CMDQ_UNLOCK(sc); } static int rum_cmd_sleepable(struct rum_softc *sc, const void *ptr, size_t len, uint8_t rvp_id, CMD_FUNC_PROTO) { struct ieee80211com *ic = &sc->sc_ic; KASSERT(len <= sizeof(union sec_param), ("buffer overflow")); RUM_CMDQ_LOCK(sc); if (sc->cmdq[sc->cmdq_last].func != NULL) { device_printf(sc->sc_dev, "%s: cmdq overflow\n", __func__); RUM_CMDQ_UNLOCK(sc); return EAGAIN; } if (ptr != NULL) memcpy(&sc->cmdq[sc->cmdq_last].data, ptr, len); sc->cmdq[sc->cmdq_last].rvp_id = rvp_id; sc->cmdq[sc->cmdq_last].func = func; sc->cmdq_last = (sc->cmdq_last + 1) % RUM_CMDQ_SIZE; RUM_CMDQ_UNLOCK(sc); ieee80211_runtask(ic, &sc->cmdq_task); return 0; } static void rum_tx_free(struct rum_tx_data *data, int txerr) { struct rum_softc *sc = data->sc; if (data->m != NULL) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void rum_setup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void rum_unsetup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static void rum_beacon_miss(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); int sleep; RUM_LOCK(sc); if (sc->sc_sleeping && sc->sc_sleep_end < ticks) { DPRINTFN(12, "dropping 'sleeping' bit, " "device must be awake now\n"); sc->sc_sleeping = 0; } sleep = sc->sc_sleeping; RUM_UNLOCK(sc); if (!sleep) rvp->bmiss(vap); #ifdef USB_DEBUG else DPRINTFN(13, "bmiss event is ignored whilst sleeping\n"); #endif } static void rum_sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); if (vap->iv_state == IEEE80211_S_SLEEP && subtype == IEEE80211_FC0_SUBTYPE_BEACON) { RUM_LOCK(sc); DPRINTFN(12, "beacon, mybss %d (flags %02X)\n", !!(sc->last_rx_flags & RT2573_RX_MYBSS), sc->last_rx_flags); if ((sc->last_rx_flags & (RT2573_RX_MYBSS | RT2573_RX_BC)) == (RT2573_RX_MYBSS | RT2573_RX_BC)) { /* * Put it to sleep here; in case if there is a data * for us, iv_recv_mgmt() will wakeup the device via * SLEEP -> RUN state transition. */ rum_set_power_state(sc, 1); } RUM_UNLOCK(sc); } rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); } static int rum_set_power_state(struct rum_softc *sc, int sleep) { usb_error_t uerror; RUM_LOCK_ASSERT(sc); DPRINTFN(12, "moving to %s state (sleep time %u)\n", sleep ? "sleep" : "awake", sc->sc_sleep_time); uerror = rum_do_mcu_request(sc, sleep ? RT2573_MCU_SLEEP : RT2573_MCU_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "%s: could not change power state: %s\n", __func__, usbd_errstr(uerror)); return (EIO); } sc->sc_sleeping = !!sleep; sc->sc_sleep_end = sleep ? ticks + sc->sc_sleep_time : 0; return (0); } static int rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; usb_error_t uerror; int ret = 0; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUM_LOCK(sc); usb_callout_stop(&rvp->ratectl_ch); if (ostate == IEEE80211_S_SLEEP && vap->iv_opmode == IEEE80211_M_STA) { rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); /* * Ignore any errors; * any subsequent TX will wakeup it anyway */ (void) rum_set_power_state(sc, 0); } switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) rum_abort_tsf_sync(sc); break; case IEEE80211_S_RUN: if (ostate == IEEE80211_S_SLEEP) break; /* already handled */ ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { ret = EINVAL; goto run_fail; } rum_update_slot_cb(sc, NULL, 0); rum_enable_mrr(sc); rum_set_txpreamble(sc); rum_set_basicrates(sc); rum_set_maxretry(sc, vap); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); rum_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { if ((ret = rum_alloc_beacon(sc, vap)) != 0) goto run_fail; } if (vap->iv_opmode != IEEE80211_M_MONITOR && vap->iv_opmode != IEEE80211_M_AHDEMO) { if ((ret = rum_enable_tsf_sync(sc)) != 0) goto run_fail; } else rum_enable_tsf(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) rum_ratectl_start(sc, ni); run_fail: ieee80211_free_node(ni); break; case IEEE80211_S_SLEEP: /* Implemented for STA mode only. */ if (vap->iv_opmode != IEEE80211_M_STA) break; uerror = rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } uerror = rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } ret = rum_set_power_state(sc, 1); if (ret != 0) { device_printf(sc->sc_dev, "%s: could not move to the SLEEP state: %s\n", __func__, usbd_errstr(uerror)); } break; default: break; } RUM_UNLOCK(sc); IEEE80211_LOCK(ic); return (ret == 0 ? rvp->newstate(vap, nstate, arg) : ret); } static void rum_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211vap *vap; struct rum_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; unsigned int len; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", actlen); /* free resources */ data = usbd_xfer_get_priv(xfer); rum_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(MCLBYTES + RT2573_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (MCLBYTES + RT2573_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RT2573_TX_DESC_SIZE); usbd_m_copy_in(pc, RT2573_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; rum_get_tsf(sc, &tap->wt_tsf); tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* align end on a 4-bytes boundary */ len = (RT2573_TX_DESC_SIZE + m->m_pkthdr.len + 3) & ~3; if ((len % 64) == 0) len += 4; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } rum_start(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); counter_u64_add(sc->sc_ic.ic_oerrors, 1); data = usbd_xfer_get_priv(xfer); if (data != NULL) { rum_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void rum_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; uint8_t rssi = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < (int)(RT2573_RX_DESC_SIZE + IEEE80211_MIN_LEN)) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len -= RT2573_RX_DESC_SIZE; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->sc_rx_desc, RT2573_RX_DESC_SIZE); rssi = rum_get_rssi(sc, sc->sc_rx_desc.rssi); flags = le32toh(sc->sc_rx_desc.flags); sc->last_rx_flags = flags; if (flags & RT2573_RX_CRC_ERROR) { /* * This should not happen since we did not * request to receive those frames when we * filled RUM_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if ((flags & RT2573_RX_DEC_MASK) != RT2573_RX_DEC_OK) { switch (flags & RT2573_RX_DEC_MASK) { case RT2573_RX_IV_ERROR: DPRINTFN(5, "IV/EIV error\n"); break; case RT2573_RX_MIC_ERROR: DPRINTFN(5, "MIC error\n"); break; case RT2573_RX_KEY_ERROR: DPRINTFN(5, "Key error\n"); break; } counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } usbd_copy_out(pc, RT2573_RX_DESC_SIZE, mtod(m, uint8_t *), len); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && (flags & RT2573_RX_CIP_MASK) != RT2573_RX_CIP_MODE(RT2573_MODE_NOSEC)) { wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; m->m_flags |= M_WEP; } /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; if (ieee80211_radiotap_active(ic)) { struct rum_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RT2573_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); rum_get_tsf(sc, &tap->wr_tsf); tap->wr_antsignal = RT2573_NOISE_FLOOR + rssi; tap->wr_antnoise = RT2573_NOISE_FLOOR; tap->wr_antenna = sc->rx_ant; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RUM_UNLOCK(sc); if (m) { if (m->m_len >= sizeof(struct ieee80211_frame_min)) ni = ieee80211_find_rxnode(ic, wh); else ni = NULL; if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, RT2573_NOISE_FLOOR); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2573_NOISE_FLOOR); } RUM_LOCK(sc); rum_start(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t rum_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } /* * Map net80211 cipher to RT2573 security mode. */ static uint8_t rum_crypto_mode(struct rum_softc *sc, u_int cipher, int keylen) { switch (cipher) { case IEEE80211_CIPHER_WEP: return (keylen < 8 ? RT2573_MODE_WEP40 : RT2573_MODE_WEP104); case IEEE80211_CIPHER_TKIP: return RT2573_MODE_TKIP; case IEEE80211_CIPHER_AES_CCM: return RT2573_MODE_AES_CCMP; default: device_printf(sc->sc_dev, "unknown cipher %d\n", cipher); return 0; } } static void rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc, struct ieee80211_key *k, uint32_t flags, uint8_t xflags, uint8_t qid, int hdrlen, int len, int rate) { struct ieee80211com *ic = &sc->sc_ic; struct wmeParams *wmep = &sc->wme_params[qid]; uint16_t plcp_length; int remainder; flags |= RT2573_TX_VALID; flags |= len << 16; if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { const struct ieee80211_cipher *cip = k->wk_cipher; len += cip->ic_header + cip->ic_trailer + cip->ic_miclen; desc->eiv = 0; /* for WEP */ cip->ic_setiv(k, (uint8_t *)&desc->iv); } /* setup PLCP fields */ desc->plcp_signal = rum_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { flags |= RT2573_TX_OFDM; plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { if (rate == 0) rate = 2; /* avoid division by zero */ - plcp_length = (16 * len + rate - 1) / rate; + plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2573_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->flags = htole32(flags); desc->hdrlen = hdrlen; desc->xflags = xflags; desc->wme = htole16(RT2573_QID(qid) | RT2573_AIFSN(wmep->wmep_aifsn) | RT2573_LOGCWMIN(wmep->wmep_logcwmin) | RT2573_LOGCWMAX(wmep->wmep_logcwmax)); } static int rum_sendprot(struct rum_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rum_tx_data *data; struct mbuf *mprot; int protrate, pktlen, flags, isshort; uint16_t dur; RUM_LOCK_ASSERT(sc); KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = 0; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2573_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return (ENOBUFS); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; rum_setup_tx_desc(sc, &data->desc, NULL, flags, 0, 0, 0, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static uint32_t rum_tx_crypto_flags(struct rum_softc *sc, struct ieee80211_node *ni, const struct ieee80211_key *k) { struct ieee80211vap *vap = ni->ni_vap; u_int cipher; uint32_t flags = 0; uint8_t mode, pos; if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { cipher = k->wk_cipher->ic_cipher; pos = k->wk_keyix; mode = rum_crypto_mode(sc, cipher, k->wk_keylen); if (mode == 0) return 0; flags |= RT2573_TX_CIP_MODE(mode); /* Do not trust GROUP flag */ if (!(k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) flags |= RT2573_TX_KEY_PAIR; else pos += 0 * RT2573_SKEY_MAX; /* vap id */ flags |= RT2573_TX_KEY_ID(pos); if (cipher == IEEE80211_CIPHER_TKIP) flags |= RT2573_TX_TKIPMIC; } return flags; } static int rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, xflags = 0; int hdrlen; RUM_LOCK_ASSERT(sc); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); ac = M_WME_GETAC(m0); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) return (ENOENT); if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) return (ENOBUFS); wh = mtod(m0, struct ieee80211_frame *); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if (type == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2573_TX_TIMESTAMP; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return (0); } static int rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct rum_tx_data *data; uint32_t flags; uint8_t ac, type, xflags = 0; int rate, error; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ac = params->ibp_pri & 3; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) return (EINVAL); flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2573_TX_NEED_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rum_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) return (ENOBUFS); flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ rum_setup_tx_desc(sc, &data->desc, NULL, flags, xflags, ac, 0, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, qos, xflags = 0; int error, hdrlen, rate; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); if (IEEE80211_QOS_HAS_SEQ(wh)) qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; else qos = 0; ac = M_WME_GETAC(m0); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = ni->ni_txrate; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) { m_freem(m0); return (ENOENT); } if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rum_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } } if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rum_softc *sc = ic->ic_softc; int error; RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RUM_UNLOCK(sc); return (error); } rum_start(sc); RUM_UNLOCK(sc); return (0); } static void rum_start(struct rum_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RUM_LOCK_ASSERT(sc); if (!sc->sc_running) return; while (sc->tx_nfree >= RUM_TX_MINFREE && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rum_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } } } static void rum_parent(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); RUM_LOCK(sc); if (sc->sc_detached) { RUM_UNLOCK(sc); return; } RUM_UNLOCK(sc); if (ic->ic_nrunning > 0) { if (rum_init(sc) == 0) ieee80211_start_all(ic); else ieee80211_stop(vap); } else rum_stop(sc); } static void rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint32_t rum_read(struct rum_softc *sc, uint16_t reg) { uint32_t val; rum_read_multi(sc, reg, &val, sizeof val); return le32toh(val); } static void rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi read MAC register: %s\n", usbd_errstr(error)); } } static usb_error_t rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val) { uint32_t tmp = htole32(val); return (rum_write_multi(sc, reg, &tmp, sizeof tmp)); } static usb_error_t rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len) { struct usb_device_request req; usb_error_t error; size_t offset; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_WRITE_MULTI_MAC; USETW(req.wValue, 0); /* write at most 64 bytes at a time */ for (offset = 0; offset < len; offset += 64) { USETW(req.wIndex, reg + offset); USETW(req.wLength, MIN(len - offset, 64)); error = rum_do_request(sc, &req, (char *)buf + offset); if (error != 0) { device_printf(sc->sc_dev, "could not multi write MAC register: %s\n", usbd_errstr(error)); return (error); } } return (USB_ERR_NORMAL_COMPLETION); } static usb_error_t rum_setbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) | mask)); } static usb_error_t rum_clrbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) & ~mask)); } static usb_error_t rum_modbits(struct rum_softc *sc, uint16_t reg, uint32_t set, uint32_t unset) { return (rum_write(sc, reg, (rum_read(sc, reg) & ~unset) | set)); } static int rum_bbp_busy(struct rum_softc *sc) { int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) return (ETIMEDOUT); return (0); } static void rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val; rum_write(sc, RT2573_PHY_CSR3, tmp); } static uint8_t rum_bbp_read(struct rum_softc *sc, uint8_t reg) { uint32_t val; int ntries; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8; rum_write(sc, RT2573_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = rum_read(sc, RT2573_PHY_CSR3); if (!(val & RT2573_BBP_BUSY)) return val & 0xff; if (rum_pause(sc, hz / 100)) break; } device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } static void rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 | (reg & 3); rum_write(sc, RT2573_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff); } static void rum_select_antenna(struct rum_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rum_bbp_read(sc, 4); bbp77 = rum_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); rum_bbp_write(sc, 4, bbp4); rum_bbp_write(sc, 77, bbp77); rum_write(sc, RT2573_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rum_enable_mrr(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED | RT2573_MRR_CCK_FALLBACK); } else { rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED, RT2573_MRR_CCK_FALLBACK); } } static void rum_set_txpreamble(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); else rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); } static void rum_set_basicrates(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { /* 11a basic rates: 6, 12, 24Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x150); } else { /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0xf); } } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } sc->bbp17 = bbp17; rum_bbp_write(sc, 17, bbp17); rum_bbp_write(sc, 96, bbp96); rum_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rum_bbp_write(sc, 75, 0x80); rum_bbp_write(sc, 86, 0x80); rum_bbp_write(sc, 88, 0x80); } rum_bbp_write(sc, 35, bbp35); rum_bbp_write(sc, 97, bbp97); rum_bbp_write(sc, 98, bbp98); if (IEEE80211_IS_CHAN_2GHZ(c)) { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_2GHZ, RT2573_PA_PE_5GHZ); } else { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_5GHZ, RT2573_PA_PE_2GHZ); } } static void rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT; int8_t power; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != ic->ic_curchan->ic_flags) { rum_select_band(sc, c); rum_select_antenna(sc); } ic->ic_curchan = c; rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_pause(sc, hz / 100); /* enable smart mode for MIMO-capable RFs */ bbp3 = rum_bbp_read(sc, 3); bbp3 &= ~RT2573_SMART_MODE; if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) bbp3 |= RT2573_SMART_MODE; rum_bbp_write(sc, 3, bbp3); if (bbp94 != RT2573_BBPR94_DEFAULT) rum_bbp_write(sc, 94, bbp94); /* give the chip some extra time to do the switchover */ rum_pause(sc, hz / 100); } static void rum_set_maxretry(struct rum_softc *sc, struct ieee80211vap *vap) { const struct ieee80211_txparam *tp; struct ieee80211_node *ni = vap->iv_bss; struct rum_vap *rvp = RUM_VAP(vap); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; rvp->maxretry = tp->maxretry < 0xf ? tp->maxretry : 0xf; rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_RETRY(rvp->maxretry) | RT2573_LONG_RETRY(rvp->maxretry), RT2573_SHORT_RETRY_MASK | RT2573_LONG_RETRY_MASK); } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static int rum_enable_tsf_sync(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; uint16_t bintval; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ if (rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8) != 0) return EIO; } tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ bintval = vap->iv_bss->ni_intval; tmp |= bintval * 16; tmp |= RT2573_TSF_TIMER_EN | RT2573_TBTT_TIMER_EN; switch (vap->iv_opmode) { case IEEE80211_M_STA: /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_STA); break; case IEEE80211_M_IBSS: /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_IBSS); tmp |= RT2573_BCN_TX_EN; break; case IEEE80211_M_HOSTAP: /* SYNC with nobody */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_HOSTAP); tmp |= RT2573_BCN_TX_EN; break; default: device_printf(sc->sc_dev, "Enabling TSF failed. undefined opmode %d\n", vap->iv_opmode); return EINVAL; } if (rum_write(sc, RT2573_TXRX_CSR9, tmp) != 0) return EIO; /* refresh current sleep time */ return (rum_set_sleep_time(sc, bintval)); } static void rum_enable_tsf(struct rum_softc *sc) { rum_modbits(sc, RT2573_TXRX_CSR9, RT2573_TSF_TIMER_EN | RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_DIS), 0x00ffffff); } static void rum_abort_tsf_sync(struct rum_softc *sc) { rum_clrbits(sc, RT2573_TXRX_CSR9, 0x00ffffff); } static void rum_get_tsf(struct rum_softc *sc, uint64_t *buf) { rum_read_multi(sc, RT2573_TXRX_CSR12, buf, sizeof (*buf)); } static void rum_update_slot_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); rum_modbits(sc, RT2573_MAC_CSR9, slottime, 0xff); DPRINTF("setting slot time to %uus\n", slottime); } static void rum_update_slot(struct ieee80211com *ic) { rum_cmd_sleepable(ic->ic_softc, NULL, 0, 0, rum_update_slot_cb); } static int rum_wme_update(struct ieee80211com *ic) { const struct wmeParams *chanp = ic->ic_wme.wme_chanParams.cap_wmeParams; struct rum_softc *sc = ic->ic_softc; int error = 0; RUM_LOCK(sc); error = rum_write(sc, RT2573_AIFSN_CSR, chanp[WME_AC_VO].wmep_aifsn << 12 | chanp[WME_AC_VI].wmep_aifsn << 8 | chanp[WME_AC_BK].wmep_aifsn << 4 | chanp[WME_AC_BE].wmep_aifsn); if (error) goto print_err; error = rum_write(sc, RT2573_CWMIN_CSR, chanp[WME_AC_VO].wmep_logcwmin << 12 | chanp[WME_AC_VI].wmep_logcwmin << 8 | chanp[WME_AC_BK].wmep_logcwmin << 4 | chanp[WME_AC_BE].wmep_logcwmin); if (error) goto print_err; error = rum_write(sc, RT2573_CWMAX_CSR, chanp[WME_AC_VO].wmep_logcwmax << 12 | chanp[WME_AC_VI].wmep_logcwmax << 8 | chanp[WME_AC_BK].wmep_logcwmax << 4 | chanp[WME_AC_BE].wmep_logcwmax); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP01_CSR, chanp[WME_AC_BK].wmep_txopLimit << 16 | chanp[WME_AC_BE].wmep_txopLimit); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP23_CSR, chanp[WME_AC_VO].wmep_txopLimit << 16 | chanp[WME_AC_VI].wmep_txopLimit); if (error) goto print_err; memcpy(sc->wme_params, chanp, sizeof(*chanp) * WME_NUM_AC); print_err: RUM_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: WME update failed, error %d\n", __func__, error); } return (error); } static void rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid) { rum_write(sc, RT2573_MAC_CSR4, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); rum_write(sc, RT2573_MAC_CSR5, bssid[4] | bssid[5] << 8 | RT2573_NUM_BSSID_MSK(1)); } static void rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr) { rum_write(sc, RT2573_MAC_CSR2, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); rum_write(sc, RT2573_MAC_CSR3, addr[4] | addr[5] << 8 | 0xff << 16); } static void rum_setpromisc(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_promisc == 0) rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); else rum_clrbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); DPRINTF("%s promiscuous mode\n", ic->ic_promisc > 0 ? "entering" : "leaving"); } static void rum_update_promisc(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); if (sc->sc_running) rum_setpromisc(sc); RUM_UNLOCK(sc); } static void rum_update_mcast(struct ieee80211com *ic) { /* Ignore. */ } static const char * rum_get_rf(int rev) { switch (rev) { case RT2573_RF_2527: return "RT2527 (MIMO XR)"; case RT2573_RF_2528: return "RT2528"; case RT2573_RF_5225: return "RT5225 (MIMO XR)"; case RT2573_RF_5226: return "RT5226"; default: return "unknown"; } } static void rum_read_eeprom(struct rum_softc *sc) { uint16_t val; #ifdef RUM_DEBUG int i; #endif /* read MAC address */ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_ic.ic_macaddr, 6); rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF("RF revision=%d\n", sc->rf_rev); rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2); val = le16toh(val); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF("RF freq=%d\n", sc->rffreq); /* read Tx power for all a/b/g channels */ rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14); /* XXX default Tx power for 802.11a channels */ memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) DPRINTF("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i]); #endif /* read default values for BBP registers */ rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; DPRINTF("BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif } static int rum_bbp_wakeup(struct rum_softc *sc) { unsigned int ntries; for (ntries = 0; ntries < 100; ntries++) { if (rum_read(sc, RT2573_MAC_CSR12) & 8) break; rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */ if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); return (ETIMEDOUT); } return (0); } static int rum_bbp_init(struct rum_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { const uint8_t val = rum_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rum_def_bbp); i++) rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rum_clr_shkey_regs(struct rum_softc *sc) { rum_write(sc, RT2573_SEC_CSR0, 0); rum_write(sc, RT2573_SEC_CSR1, 0); rum_write(sc, RT2573_SEC_CSR5, 0); } static int rum_init(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i, ret; RUM_LOCK(sc); if (sc->sc_running) { ret = 0; goto end; } /* initialize MAC registers to default values */ for (i = 0; i < nitems(rum_def_mac); i++) rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val); /* reset some WME parameters to default values */ sc->wme_params[0].wmep_aifsn = 2; sc->wme_params[0].wmep_logcwmin = 4; sc->wme_params[0].wmep_logcwmax = 10; /* set host ready */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ if ((ret = rum_bbp_wakeup(sc)) != 0) goto end; if ((ret = rum_bbp_init(sc)) != 0) goto end; /* select default channel */ rum_select_band(sc, ic->ic_curchan); rum_select_antenna(sc); rum_set_chan(sc, ic->ic_curchan); /* clear STA registers */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); /* clear security registers (if required) */ if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } rum_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* initialize ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_HOST_READY); /* * Allocate Tx and Rx xfer queues. */ rum_setup_tx_list(sc); /* update Rx filter */ tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff; tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR | RT2573_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2573_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2573_DROP_NOT_TO_ME; } rum_write(sc, RT2573_TXRX_CSR0, tmp); sc->sc_running = 1; usbd_xfer_set_stall(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_start(sc->sc_xfer[RUM_BULK_RD]); end: RUM_UNLOCK(sc); if (ret != 0) rum_stop(sc); return ret; } static void rum_stop(struct rum_softc *sc) { RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return; } sc->sc_running = 0; RUM_UNLOCK(sc); /* * Drain the USB transfers, if not already drained: */ usbd_transfer_drain(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[RUM_BULK_RD]); RUM_LOCK(sc); rum_unsetup_tx_list(sc); /* disable Rx */ rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DISABLE_RX); /* reset ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); RUM_UNLOCK(sc); } static void rum_load_microcode(struct rum_softc *sc, const uint8_t *ucode, size_t size) { uint16_t reg = RT2573_MCU_CODE_BASE; usb_error_t err; /* copy firmware image into NIC */ for (; size >= 4; reg += 4, ucode += 4, size -= 4) { err = rum_write(sc, reg, UGETDW(ucode)); if (err) { /* firmware already loaded ? */ device_printf(sc->sc_dev, "Firmware load " "failure! (ignored)\n"); break; } } err = rum_do_mcu_request(sc, RT2573_MCU_RUN); if (err != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "could not run firmware: %s\n", usbd_errstr(err)); } /* give the chip some time to boot */ rum_pause(sc, hz / 8); } static int rum_set_sleep_time(struct rum_softc *sc, uint16_t bintval) { struct ieee80211com *ic = &sc->sc_ic; usb_error_t uerror; int exp, delay; RUM_LOCK_ASSERT(sc); exp = ic->ic_lintval / bintval; delay = ic->ic_lintval % bintval; if (exp > RT2573_TBCN_EXP_MAX) exp = RT2573_TBCN_EXP_MAX; if (delay > RT2573_TBCN_DELAY_MAX) delay = RT2573_TBCN_DELAY_MAX; uerror = rum_modbits(sc, RT2573_MAC_CSR11, RT2573_TBCN_EXP(exp) | RT2573_TBCN_DELAY(delay), RT2573_TBCN_EXP(RT2573_TBCN_EXP_MAX) | RT2573_TBCN_DELAY(RT2573_TBCN_DELAY_MAX)); if (uerror != USB_ERR_NORMAL_COMPLETION) return (EIO); sc->sc_sleep_time = IEEE80211_TU_TO_TICKS(exp * bintval + delay); return (0); } static int rum_reset(struct ieee80211vap *vap, u_long cmd) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct rum_softc *sc = ic->ic_softc; int error; switch (cmd) { case IEEE80211_IOC_POWERSAVE: error = 0; break; case IEEE80211_IOC_POWERSAVESLEEP: ni = ieee80211_ref_node(vap->iv_bss); RUM_LOCK(sc); error = rum_set_sleep_time(sc, ni->ni_intval); if (vap->iv_state == IEEE80211_S_SLEEP) { /* Use new values for wakeup timer. */ rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); } /* XXX send reassoc */ RUM_UNLOCK(sc); ieee80211_free_node(ni); break; default: error = ENETRESET; break; } return (error); } static int rum_set_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_vap *rvp = RUM_VAP(vap); struct mbuf *m = rvp->bcn_mbuf; const struct ieee80211_txparam *tp; struct rum_tx_desc desc; RUM_LOCK_ASSERT(sc); if (m == NULL) return EINVAL; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return EINVAL; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; rum_setup_tx_desc(sc, &desc, NULL, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ, 0, 0, m->m_pkthdr.len, tp->mgmtrate); /* copy the Tx descriptor into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0), (uint8_t *)&desc, RT2573_TX_DESC_SIZE) != 0) return EIO; /* copy beacon header and payload into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0) + RT2573_TX_DESC_SIZE, mtod(m, uint8_t *), m->m_pkthdr.len) != 0) return EIO; return 0; } static int rum_alloc_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (ni->ni_chan == IEEE80211_CHAN_ANYC) return EINVAL; m = ieee80211_beacon_alloc(ni); if (m == NULL) return ENOMEM; if (rvp->bcn_mbuf != NULL) m_freem(rvp->bcn_mbuf); rvp->bcn_mbuf = m; return (rum_set_beacon(sc, vap)); } static void rum_update_beacon_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211vap *vap = data->vap; rum_set_beacon(sc, vap); } static void rum_update_beacon(struct ieee80211vap *vap, int item) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m = rvp->bcn_mbuf; int mcast = 0; RUM_LOCK(sc); if (m == NULL) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); RUM_UNLOCK(sc); return; } rvp->bcn_mbuf = m; } switch (item) { case IEEE80211_BEACON_ERP: rum_update_slot(ic); break; case IEEE80211_BEACON_TIM: mcast = 1; /*TODO*/ break; default: break; } RUM_UNLOCK(sc); setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, m, mcast); rum_cmd_sleepable(sc, &vap, sizeof(vap), 0, rum_update_beacon_cb); } static int rum_common_key_set(struct rum_softc *sc, struct ieee80211_key *k, uint16_t base) { if (rum_write_multi(sc, base, k->wk_key, k->wk_keylen)) return EIO; if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) { if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE, k->wk_txmic, 8)) return EIO; if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE + 8, k->wk_rxmic, 8)) return EIO; } return 0; } static void rum_group_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t mode; if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting group key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_SKEY(rvp_id, k->wk_keyix)) != 0) goto print_err; /* Set cipher mode. */ if (rum_modbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, mode << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, RT2573_SEC_CSR0, 1 << (rvp_id * RT2573_SKEY_MAX + k->wk_keyix)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_group_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); rum_clrbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX); rum_clrbits(sc, RT2573_SEC_CSR0, rvp_id * RT2573_SKEY_MAX + k->wk_keyix); } static void rum_pair_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t buf[IEEE80211_ADDR_LEN + 1]; uint8_t mode; mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting pairwise key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_PKEY(k->wk_keyix)) != 0) goto print_err; IEEE80211_ADDR_COPY(buf, k->wk_macaddr); buf[IEEE80211_ADDR_LEN] = mode; /* Set transmitter address and cipher mode. */ if (rum_write_multi(sc, RT2573_ADDR_ENTRY(k->wk_keyix), buf, sizeof buf) != 0) goto print_err; /* Enable key table lookup for this vap. */ if (sc->vap_key_count[rvp_id]++ == 0) if (rum_setbits(sc, RT2573_SEC_CSR4, 1 << rvp_id) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, k->wk_keyix < 32 ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set pairwise key %d, vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_pair_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing key %d\n", __func__, k->wk_keyix); rum_clrbits(sc, (k->wk_keyix < 32) ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)); sc->keys_bmap &= ~(1ULL << k->wk_keyix); if (--sc->vap_key_count[rvp_id] == 0) rum_clrbits(sc, RT2573_SEC_CSR4, 1 << rvp_id); } static int rum_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct rum_softc *sc = vap->iv_ic->ic_softc; uint8_t i; if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { RUM_LOCK(sc); for (i = 0; i < RT2573_ADDR_MAX; i++) { if ((sc->keys_bmap & (1ULL << i)) == 0) { sc->keys_bmap |= (1ULL << i); *keyix = i; break; } } RUM_UNLOCK(sc); if (i == RT2573_ADDR_MAX) { device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return 0; } } else *keyix = 0; } else { *keyix = k - vap->iv_nw_keys; } *rxkeyix = *keyix; return 1; } static int rum_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_set_cb : rum_pair_key_set_cb); } static int rum_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_del_cb : rum_pair_key_del_cb); } static int rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct rum_softc *sc = ni->ni_ic->ic_softc; int ret; RUM_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { ret = ENETDOWN; goto bad; } if (sc->tx_nfree < RUM_TX_MINFREE) { ret = EIO; goto bad; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if ((ret = rum_tx_mgt(sc, m, ni)) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if ((ret = rum_tx_raw(sc, m, ni, params)) != 0) goto bad; } RUM_UNLOCK(sc); return 0; bad: RUM_UNLOCK(sc); m_freem(m); return ret; } static void rum_ratectl_start(struct rum_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rum_vap *rvp = RUM_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); } static void rum_ratectl_timeout(void *arg) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &rvp->ratectl_task); } static void rum_ratectl_task(void *arg, int pending) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_node *ni; int ok[3], fail; int sum, success, retrycnt; RUM_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof(sc->sta)); ok[0] = (le32toh(sc->sta[4]) & 0xffff); /* TX ok w/o retry */ ok[1] = (le32toh(sc->sta[4]) >> 16); /* TX ok w/ one retry */ ok[2] = (le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ multiple retries */ fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */ success = ok[0] + ok[1] + ok[2]; sum = success + fail; /* XXX at least */ retrycnt = ok[1] + ok[2] * 2 + fail * (rvp->maxretry + 1); if (sum != 0) { ni = ieee80211_ref_node(vap->iv_bss); ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt); (void) ieee80211_ratectl_rate(ni, NULL, 0); ieee80211_free_node(ni); } /* count TX retry-fail as Tx errors */ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, fail); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); RUM_UNLOCK(sc); } static void rum_scan_start(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_abort_tsf_sync(sc); rum_set_bssid(sc, ieee80211broadcastaddr); RUM_UNLOCK(sc); } static void rum_scan_end(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { RUM_LOCK(sc); if (ic->ic_opmode != IEEE80211_M_AHDEMO) rum_enable_tsf_sync(sc); else rum_enable_tsf(sc); rum_set_bssid(sc, sc->sc_bssid); RUM_UNLOCK(sc); } } static void rum_set_channel(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_set_chan(sc, ic->ic_curchan); RUM_UNLOCK(sc); } static int rum_get_rssi(struct rum_softc *sc, uint8_t raw) { struct ieee80211com *ic = &sc->sc_ic; int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No RSSI mapping * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2573_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (!sc->ext_5ghz_lna && lna != 1) rssi += 4; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static int rum_pause(struct rum_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } static device_method_t rum_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rum_match), DEVMETHOD(device_attach, rum_attach), DEVMETHOD(device_detach, rum_detach), DEVMETHOD_END }; static driver_t rum_driver = { .name = "rum", .methods = rum_methods, .size = sizeof(struct rum_softc), }; static devclass_t rum_devclass; DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, NULL, 0); MODULE_DEPEND(rum, wlan, 1, 1, 1); MODULE_DEPEND(rum, usb, 1, 1, 1); MODULE_VERSION(rum, 1); USB_PNP_HOST_INFO(rum_devs); diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c index 53cfbbdc55a2..259c1581a29b 100644 --- a/sys/dev/usb/wlan/if_ural.c +++ b/sys/dev/usb/wlan/if_ural.c @@ -1,2219 +1,2219 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Copyright (c) 2006, 2008 * Hans Petter Selasky * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2500USB chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR ural_debug #include #include #include #ifdef USB_DEBUG static int ural_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural"); SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RWTUN, &ural_debug, 0, "Debug level"); #endif #define URAL_RSSI(rssi) \ ((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \ ((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0) /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID ural_devs[] = { #define URAL_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } URAL_DEV(ASUS, WL167G), URAL_DEV(ASUS, RT2570), URAL_DEV(BELKIN, F5D7050), URAL_DEV(BELKIN, F5D7051), URAL_DEV(CISCOLINKSYS, HU200TS), URAL_DEV(CISCOLINKSYS, WUSB54G), URAL_DEV(CISCOLINKSYS, WUSB54GP), URAL_DEV(CONCEPTRONIC2, C54RU), URAL_DEV(DLINK, DWLG122), URAL_DEV(GIGABYTE, GN54G), URAL_DEV(GIGABYTE, GNWBKG), URAL_DEV(GUILLEMOT, HWGUSB254), URAL_DEV(MELCO, KG54), URAL_DEV(MELCO, KG54AI), URAL_DEV(MELCO, KG54YB), URAL_DEV(MELCO, NINWIFI), URAL_DEV(MSI, RT2570), URAL_DEV(MSI, RT2570_2), URAL_DEV(MSI, RT2570_3), URAL_DEV(NOVATECH, NV902), URAL_DEV(RALINK, RT2570), URAL_DEV(RALINK, RT2570_2), URAL_DEV(RALINK, RT2570_3), URAL_DEV(SIEMENS2, WL54G), URAL_DEV(SMC, 2862WG), URAL_DEV(SPHAIRON, UB801R), URAL_DEV(SURECOM, RT2570), URAL_DEV(VTECH, RT2570), URAL_DEV(ZINWELL, RT2570), #undef URAL_DEV }; static usb_callback_t ural_bulk_read_callback; static usb_callback_t ural_bulk_write_callback; static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data); static struct ieee80211vap *ural_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ural_vap_delete(struct ieee80211vap *); static void ural_tx_free(struct ural_tx_data *, int); static void ural_setup_tx_list(struct ural_softc *); static void ural_unsetup_tx_list(struct ural_softc *); static int ural_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, uint32_t, int, int); static int ural_tx_bcn(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_mgt(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_data(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_transmit(struct ieee80211com *, struct mbuf *); static void ural_start(struct ural_softc *); static void ural_parent(struct ieee80211com *); static void ural_set_testmode(struct ural_softc *); static void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); static uint16_t ural_read(struct ural_softc *, uint16_t); static void ural_read_multi(struct ural_softc *, uint16_t, void *, int); static void ural_write(struct ural_softc *, uint16_t, uint16_t); static void ural_write_multi(struct ural_softc *, uint16_t, void *, int) __unused; static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); static uint8_t ural_bbp_read(struct ural_softc *, uint8_t); static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); static void ural_scan_start(struct ieee80211com *); static void ural_scan_end(struct ieee80211com *); static void ural_set_channel(struct ieee80211com *); static void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); static void ural_disable_rf_tune(struct ural_softc *); static void ural_enable_tsf_sync(struct ural_softc *); static void ural_enable_tsf(struct ural_softc *); static void ural_update_slot(struct ural_softc *); static void ural_set_txpreamble(struct ural_softc *); static void ural_set_basicrates(struct ural_softc *, const struct ieee80211_channel *); static void ural_set_bssid(struct ural_softc *, const uint8_t *); static void ural_set_macaddr(struct ural_softc *, const uint8_t *); static void ural_update_promisc(struct ieee80211com *); static void ural_setpromisc(struct ural_softc *); static const char *ural_get_rf(int); static void ural_read_eeprom(struct ural_softc *); static int ural_bbp_init(struct ural_softc *); static void ural_set_txantenna(struct ural_softc *, int); static void ural_set_rxantenna(struct ural_softc *, int); static void ural_init(struct ural_softc *); static void ural_stop(struct ural_softc *); static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ural_ratectl_start(struct ural_softc *, struct ieee80211_node *); static void ural_ratectl_timeout(void *); static void ural_ratectl_task(void *, int); static int ural_pause(struct ural_softc *sc, int timeout); /* * Default values for MAC registers; values taken from the reference driver. */ static const struct { uint16_t reg; uint16_t val; } ural_def_mac[] = { { RAL_TXRX_CSR5, 0x8c8d }, { RAL_TXRX_CSR6, 0x8b8a }, { RAL_TXRX_CSR7, 0x8687 }, { RAL_TXRX_CSR8, 0x0085 }, { RAL_MAC_CSR13, 0x1111 }, { RAL_MAC_CSR14, 0x1e11 }, { RAL_TXRX_CSR21, 0xe78f }, { RAL_MAC_CSR9, 0xff1d }, { RAL_MAC_CSR11, 0x0002 }, { RAL_MAC_CSR22, 0x0053 }, { RAL_MAC_CSR15, 0x0000 }, { RAL_MAC_CSR8, RAL_FRAME_SIZE }, { RAL_TXRX_CSR19, 0x0000 }, { RAL_TXRX_CSR18, 0x005a }, { RAL_PHY_CSR2, 0x0000 }, { RAL_TXRX_CSR0, 0x1ec0 }, { RAL_PHY_CSR4, 0x000f } }; /* * Default values for BBP registers; values taken from the reference driver. */ static const struct { uint8_t reg; uint8_t val; } ural_def_bbp[] = { { 3, 0x02 }, { 4, 0x19 }, { 14, 0x1c }, { 15, 0x30 }, { 16, 0xac }, { 17, 0x48 }, { 18, 0x18 }, { 19, 0xff }, { 20, 0x1e }, { 21, 0x08 }, { 22, 0x08 }, { 23, 0x08 }, { 24, 0x80 }, { 25, 0x50 }, { 26, 0x08 }, { 27, 0x23 }, { 30, 0x10 }, { 31, 0x2b }, { 32, 0xb9 }, { 34, 0x12 }, { 35, 0x50 }, { 39, 0xc4 }, { 40, 0x02 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 56, 0x08 }, { 57, 0x10 }, { 58, 0x08 }, { 61, 0x60 }, { 62, 0x10 }, { 75, 0xff } }; /* * Default values for RF register R2 indexed by channel numbers. */ static const uint32_t ural_rf2522_r2[] = { 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e }; static const uint32_t ural_rf2523_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2524_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2525_r2[] = { 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 }; static const uint32_t ural_rf2525_hi_r2[] = { 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e }; static const uint32_t ural_rf2525e_r2[] = { 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b }; static const uint32_t ural_rf2526_hi_r2[] = { 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 }; static const uint32_t ural_rf2526_r2[] = { 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d }; /* * For dual-band RF, RF registers R1 and R4 also depend on channel number; * values taken from the reference driver. */ static const struct { uint8_t chan; uint32_t r1; uint32_t r2; uint32_t r4; } ural_rf5222[] = { { 1, 0x08808, 0x0044d, 0x00282 }, { 2, 0x08808, 0x0044e, 0x00282 }, { 3, 0x08808, 0x0044f, 0x00282 }, { 4, 0x08808, 0x00460, 0x00282 }, { 5, 0x08808, 0x00461, 0x00282 }, { 6, 0x08808, 0x00462, 0x00282 }, { 7, 0x08808, 0x00463, 0x00282 }, { 8, 0x08808, 0x00464, 0x00282 }, { 9, 0x08808, 0x00465, 0x00282 }, { 10, 0x08808, 0x00466, 0x00282 }, { 11, 0x08808, 0x00467, 0x00282 }, { 12, 0x08808, 0x00468, 0x00282 }, { 13, 0x08808, 0x00469, 0x00282 }, { 14, 0x08808, 0x0046b, 0x00286 }, { 36, 0x08804, 0x06225, 0x00287 }, { 40, 0x08804, 0x06226, 0x00287 }, { 44, 0x08804, 0x06227, 0x00287 }, { 48, 0x08804, 0x06228, 0x00287 }, { 52, 0x08804, 0x06229, 0x00287 }, { 56, 0x08804, 0x0622a, 0x00287 }, { 60, 0x08804, 0x0622b, 0x00287 }, { 64, 0x08804, 0x0622c, 0x00287 }, { 100, 0x08804, 0x02200, 0x00283 }, { 104, 0x08804, 0x02201, 0x00283 }, { 108, 0x08804, 0x02202, 0x00283 }, { 112, 0x08804, 0x02203, 0x00283 }, { 116, 0x08804, 0x02204, 0x00283 }, { 120, 0x08804, 0x02205, 0x00283 }, { 124, 0x08804, 0x02206, 0x00283 }, { 128, 0x08804, 0x02207, 0x00283 }, { 132, 0x08804, 0x02208, 0x00283 }, { 136, 0x08804, 0x02209, 0x00283 }, { 140, 0x08804, 0x0220a, 0x00283 }, { 149, 0x08808, 0x02429, 0x00281 }, { 153, 0x08808, 0x0242b, 0x00281 }, { 157, 0x08808, 0x0242d, 0x00281 }, { 161, 0x08808, 0x0242f, 0x00281 } }; static const struct usb_config ural_config[URAL_N_TRANSFER] = { [URAL_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE + 4), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = ural_bulk_write_callback, .timeout = 5000, /* ms */ }, [URAL_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (RAL_FRAME_SIZE + RAL_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = ural_bulk_read_callback, }, }; static device_probe_t ural_match; static device_attach_t ural_attach; static device_detach_t ural_detach; static device_method_t ural_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ural_match), DEVMETHOD(device_attach, ural_attach), DEVMETHOD(device_detach, ural_detach), DEVMETHOD_END }; static driver_t ural_driver = { .name = "ural", .methods = ural_methods, .size = sizeof(struct ural_softc), }; static devclass_t ural_devclass; DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, NULL, 0); MODULE_DEPEND(ural, usb, 1, 1, 1); MODULE_DEPEND(ural, wlan, 1, 1, 1); MODULE_VERSION(ural, 1); USB_PNP_HOST_INFO(ural_devs); static int ural_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RAL_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(ural_devs, sizeof(ural_devs), uaa)); } static int ural_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct ural_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; uint8_t iface_index; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RAL_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, ural_config, URAL_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RAL_LOCK(sc); /* retrieve RT2570 rev. no */ sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); /* retrieve MAC address and various other things from EEPROM */ ural_read_eeprom(sc); RAL_UNLOCK(sc); device_printf(self, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n", sc->asic_rev, ural_get_rf(sc->rf_rev)); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->rf_rev == RAL_RF_5222) setbit(bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_update_promisc = ural_update_promisc; ic->ic_raw_xmit = ural_raw_xmit; ic->ic_scan_start = ural_scan_start; ic->ic_scan_end = ural_scan_end; ic->ic_set_channel = ural_set_channel; ic->ic_parent = ural_parent; ic->ic_transmit = ural_transmit; ic->ic_vap_create = ural_vap_create; ic->ic_vap_delete = ural_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RAL_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RAL_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); detach: ural_detach(self); return (ENXIO); /* failure */ } static int ural_detach(device_t self) { struct ural_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; /* prevent further ioctls */ RAL_LOCK(sc); sc->sc_detached = 1; RAL_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URAL_N_TRANSFER); /* free TX list, if any */ RAL_LOCK(sc); ural_unsetup_tx_list(sc); RAL_UNLOCK(sc); if (ic->ic_softc == sc) ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t ural_do_request(struct ural_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (ural_pause(sc, hz / 100)) break; } return (err); } static struct ieee80211vap * ural_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ural_softc *sc = ic->ic_softc; struct ural_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; uvp = malloc(sizeof(struct ural_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = ural_newstate; usb_callout_init_mtx(&uvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&uvp->ratectl_task, 0, ural_ratectl_task, uvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void ural_vap_delete(struct ieee80211vap *vap) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; usb_callout_drain(&uvp->ratectl_ch); ieee80211_draintask(ic, &uvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void ural_tx_free(struct ural_tx_data *data, int txerr) { struct ural_softc *sc = data->sc; if (data->m != NULL) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void ural_setup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void ural_unsetup_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ural_softc *sc = ic->ic_softc; const struct ieee80211_txparam *tp; struct ieee80211_node *ni; struct mbuf *m; DPRINTF("%s -> %s\n", ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RAL_LOCK(sc); usb_callout_stop(&uvp->ratectl_ch); switch (nstate) { case IEEE80211_S_INIT: if (vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); /* force tx led to stop blinking */ ural_write(sc, RAL_MAC_CSR20, 0); } break; case IEEE80211_S_RUN: ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) goto fail; ural_update_slot(sc); ural_set_txpreamble(sc); ural_set_basicrates(sc, ic->ic_bsschan); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); ural_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); goto fail; } ieee80211_ref_node(ni); if (ural_tx_bcn(sc, m, ni) != 0) { device_printf(sc->sc_dev, "could not send beacon\n"); goto fail; } } /* make tx led blink on tx (controlled by ASIC) */ ural_write(sc, RAL_MAC_CSR20, 1); if (vap->iv_opmode != IEEE80211_M_MONITOR) ural_enable_tsf_sync(sc); else ural_enable_tsf(sc); /* enable automatic rate adaptation */ /* XXX should use ic_bsschan but not valid until after newstate call below */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ural_ratectl_start(sc, ni); ieee80211_free_node(ni); break; default: break; } RAL_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); fail: RAL_UNLOCK(sc); IEEE80211_LOCK(ic); ieee80211_free_node(ni); return (-1); } static void ural_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ieee80211vap *vap; struct ural_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", len); /* free resources */ data = usbd_xfer_get_priv(xfer); ural_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(RAL_FRAME_SIZE + RAL_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RAL_TX_DESC_SIZE); usbd_m_copy_in(pc, RAL_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* xfer length needs to be a multiple of two! */ len = (RAL_TX_DESC_SIZE + m->m_pkthdr.len + 1) & ~1; if ((len % 64) == 0) len += 2; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } ural_start(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); data = usbd_xfer_get_priv(xfer); if (data != NULL) { ural_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error == USB_ERR_STALLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); break; } } static void ural_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ural_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; int8_t rssi = 0, nf = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < (int)(RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN)) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len -= RAL_RX_DESC_SIZE; /* rx descriptor is located at the end */ pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, len, &sc->sc_rx_desc, RAL_RX_DESC_SIZE); rssi = URAL_RSSI(sc->sc_rx_desc.rssi); nf = RAL_NOISE_FLOOR; flags = le32toh(sc->sc_rx_desc.flags); if (flags & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { /* * This should not happen since we did not * request to receive those frames when we * filled RAL_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } usbd_copy_out(pc, 0, mtod(m, uint8_t *), len); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; if (ieee80211_radiotap_active(ic)) { struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; /* XXX set once */ tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RAL_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } /* Strip trailing 802.11 MAC FCS. */ m_adj(m, -IEEE80211_CRC_LEN); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RAL_UNLOCK(sc); if (m) { ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); } RAL_LOCK(sc); ural_start(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t ural_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, uint32_t flags, int len, int rate) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RAL_TX_NEWSEQ); desc->flags |= htole32(len << 16); desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5)); desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame))); /* setup PLCP fields */ desc->plcp_signal = ural_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RAL_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { if (rate == 0) rate = 2; /* avoid division by zero */ - plcp_length = (16 * len + rate - 1) / rate; + plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RAL_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->iv = 0; desc->eiv = 0; } #define RAL_TX_TIMEOUT 5000 static int ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ural_tx_data *data; if (sc->tx_nfree == 0) { m_freem(m0); ieee80211_free_node(ni); return (EIO); } if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) { m_freem(m0); ieee80211_free_node(ni); return (ENXIO); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; ural_setup_tx_desc(sc, &data->desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending beacon frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return (0); } static int ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ural_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; uint32_t flags; uint16_t dur; RAL_LOCK_ASSERT(sc, MA_OWNED); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RAL_TX_TIMESTAMP; } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%u rate=%u\n", m0->m_pkthdr.len, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_sendprot(struct ural_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct ural_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort; uint16_t dur; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RAL_TX_RETRY(7); if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RAL_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; ural_setup_tx_desc(sc, &data->desc, flags, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; uint32_t flags; int error; int rate; RAL_LOCK_ASSERT(sc, MA_OWNED); KASSERT(params != NULL, ("no raw xmit params")); rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RAL_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = ural_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ural_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; int error, rate; RAL_LOCK_ASSERT(sc, MA_OWNED); wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = ni->ni_txrate; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = ural_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RAL_TX_IFS_SIFS; } } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; flags |= RAL_TX_RETRY(7); dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending data frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]); return 0; } static int ural_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ural_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if (!sc->sc_running) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } ural_start(sc); RAL_UNLOCK(sc); return (0); } static void ural_start(struct ural_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_running == 0) return; while (sc->tx_nfree >= RAL_TX_MINFREE && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ural_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } } } static void ural_parent(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (sc->sc_detached) { RAL_UNLOCK(sc); return; } if (ic->ic_nrunning > 0) { if (sc->sc_running == 0) { ural_init(sc); startall = 1; } else ural_setpromisc(sc); } else if (sc->sc_running) ural_stop(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void ural_set_testmode(struct ural_softc *sc) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_VENDOR_REQUEST; USETW(req.wValue, 4); USETW(req.wIndex, 1); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not set test mode: %s\n", usbd_errstr(error)); } } static void ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint16_t ural_read(struct ural_softc *sc, uint16_t reg) { struct usb_device_request req; usb_error_t error; uint16_t val; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, sizeof (uint16_t)); error = ural_do_request(sc, &req, &val); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); return 0; } return le16toh(val); } static void ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); } } static void ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MAC; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); error = ural_do_request(sc, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = ural_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) { uint16_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = reg << 8 | val; ural_write(sc, RAL_PHY_CSR7, tmp); } static uint8_t ural_bbp_read(struct ural_softc *sc, uint8_t reg) { uint16_t val; int ntries; val = RAL_BBP_WRITE | reg << 8; ural_write(sc, RAL_PHY_CSR7, val); for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } return ural_read(sc, RAL_PHY_CSR7) & 0xff; } static void ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); ural_write(sc, RAL_PHY_CSR10, tmp >> 16); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void ural_scan_start(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_write(sc, RAL_TXRX_CSR19, 0); ural_set_bssid(sc, ieee80211broadcastaddr); RAL_UNLOCK(sc); } static void ural_scan_end(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_enable_tsf_sync(sc); ural_set_bssid(sc, sc->sc_bssid); RAL_UNLOCK(sc); } static void ural_set_channel(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); ural_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } static void ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint8_t power, tmp; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RAL_RF_2522: ural_rf_write(sc, RAL_RF1, 0x00814); ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RAL_RF_2523: ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2524: ural_rf_write(sc, RAL_RF1, 0x0c808); ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525E: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RAL_RF_2526: ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RAL_RF_5222: for (i = 0; ural_rf5222[i].chan != chan; i++); ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1); ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4); break; } if (ic->ic_opmode != IEEE80211_M_MONITOR && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = ural_bbp_read(sc, 70); tmp &= ~RAL_JAPAN_FILTER; if (chan == 14) tmp |= RAL_JAPAN_FILTER; ural_bbp_write(sc, 70, tmp); /* clear CRC errors */ ural_read(sc, RAL_STA_CSR0); ural_pause(sc, hz / 100); ural_disable_rf_tune(sc); } /* XXX doesn't belong here */ /* update basic rate set */ ural_set_basicrates(sc, c); /* give the hardware some time to do the switchover */ ural_pause(sc, hz / 100); } /* * Disable RF auto-tuning. */ static void ural_disable_rf_tune(struct ural_softc *sc) { uint32_t tmp; if (sc->rf_rev != RAL_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; ural_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; ural_rf_write(sc, RAL_RF3, tmp); DPRINTFN(2, "disabling RF autotune\n"); } /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void ural_enable_tsf_sync(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload, tmp; /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); tmp = (16 * vap->iv_bss->ni_intval) << 4; ural_write(sc, RAL_TXRX_CSR18, tmp); logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0; preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6; tmp = logcwmin << 12 | preload; ural_write(sc, RAL_TXRX_CSR20, tmp); /* finally, enable TSF synchronization */ tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RAL_ENABLE_TSF_SYNC(1); else tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; ural_write(sc, RAL_TXRX_CSR19, tmp); DPRINTF("enabling TSF synchronization\n"); } static void ural_enable_tsf(struct ural_softc *sc) { /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); ural_write(sc, RAL_TXRX_CSR19, RAL_ENABLE_TSF | RAL_ENABLE_TSF_SYNC(2)); } #define RAL_RXTX_TURNAROUND 5 /* us */ static void ural_update_slot(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t slottime, sifs, eifs; slottime = IEEE80211_GET_SLOTTIME(ic); /* * These settings may sound a bit inconsistent but this is what the * reference driver does. */ if (ic->ic_curmode == IEEE80211_MODE_11B) { sifs = 16 - RAL_RXTX_TURNAROUND; eifs = 364; } else { sifs = 10 - RAL_RXTX_TURNAROUND; eifs = 64; } ural_write(sc, RAL_MAC_CSR10, slottime); ural_write(sc, RAL_MAC_CSR11, sifs); ural_write(sc, RAL_MAC_CSR12, eifs); } static void ural_set_txpreamble(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR10); tmp &= ~RAL_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RAL_SHORT_PREAMBLE; ural_write(sc, RAL_TXRX_CSR10, tmp); } static void ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c) { /* XXX wrong, take from rate set */ /* update basic rate set */ if (IEEE80211_IS_CHAN_5GHZ(c)) { /* 11a basic rates: 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x150); } else if (IEEE80211_IS_CHAN_ANYG(c)) { /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); } else { /* 11b basic rates: 1, 2Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x3); } } static void ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) { uint16_t tmp; tmp = bssid[0] | bssid[1] << 8; ural_write(sc, RAL_MAC_CSR5, tmp); tmp = bssid[2] | bssid[3] << 8; ural_write(sc, RAL_MAC_CSR6, tmp); tmp = bssid[4] | bssid[5] << 8; ural_write(sc, RAL_MAC_CSR7, tmp); DPRINTF("setting BSSID to %6D\n", bssid, ":"); } static void ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr) { uint16_t tmp; tmp = addr[0] | addr[1] << 8; ural_write(sc, RAL_MAC_CSR2, tmp); tmp = addr[2] | addr[3] << 8; ural_write(sc, RAL_MAC_CSR3, tmp); tmp = addr[4] | addr[5] << 8; ural_write(sc, RAL_MAC_CSR4, tmp); DPRINTF("setting MAC address to %6D\n", addr, ":"); } static void ural_setpromisc(struct ural_softc *sc) { uint32_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR2); tmp &= ~RAL_DROP_NOT_TO_ME; if (sc->sc_ic.ic_promisc == 0) tmp |= RAL_DROP_NOT_TO_ME; ural_write(sc, RAL_TXRX_CSR2, tmp); DPRINTF("%s promiscuous mode\n", sc->sc_ic.ic_promisc ? "entering" : "leaving"); } static void ural_update_promisc(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); if (sc->sc_running) ural_setpromisc(sc); RAL_UNLOCK(sc); } static const char * ural_get_rf(int rev) { switch (rev) { case RAL_RF_2522: return "RT2522"; case RAL_RF_2523: return "RT2523"; case RAL_RF_2524: return "RT2524"; case RAL_RF_2525: return "RT2525"; case RAL_RF_2525E: return "RT2525e"; case RAL_RF_2526: return "RT2526"; case RAL_RF_5222: return "RT5222"; default: return "unknown"; } } static void ural_read_eeprom(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint16_t val; ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read MAC address */ ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_macaddr, 6); /* read default values for BBP registers */ ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); /* read Tx power for all b/g channels */ ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); } static int ural_bbp_init(struct ural_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(ural_def_bbp); i++) ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); #if 0 /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0xff) continue; ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif return 0; } static void ural_set_txantenna(struct ural_softc *sc, int antenna) { uint16_t tmp; uint8_t tx; tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; if (antenna == 1) tx |= RAL_BBP_ANTA; else if (antenna == 2) tx |= RAL_BBP_ANTB; else tx |= RAL_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || sc->rf_rev == RAL_RF_5222) tx |= RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_TX, tx); /* update values in PHY_CSR5 and PHY_CSR6 */ tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); } static void ural_set_rxantenna(struct ural_softc *sc, int antenna) { uint8_t rx; rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; if (antenna == 1) rx |= RAL_BBP_ANTA; else if (antenna == 2) rx |= RAL_BBP_ANTB; else rx |= RAL_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) rx &= ~RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_RX, rx); } static void ural_init(struct ural_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t tmp; int i, ntries; RAL_LOCK_ASSERT(sc, MA_OWNED); ural_set_testmode(sc); ural_write(sc, 0x308, 0x00f0); /* XXX magic */ ural_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < nitems(ural_def_mac); i++) ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); /* wait for BBP and RF to wake up (this can take a long time!) */ for (ntries = 0; ntries < 100; ntries++) { tmp = ural_read(sc, RAL_MAC_CSR17); if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == (RAL_BBP_AWAKE | RAL_RF_AWAKE)) break; if (ural_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } /* we're ready! */ ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); /* set basic rate set (will be updated later) */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); if (ural_bbp_init(sc) != 0) goto fail; ural_set_chan(sc, ic->ic_curchan); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); ural_set_txantenna(sc, sc->tx_ant); ural_set_rxantenna(sc, sc->rx_ant); ural_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* * Allocate Tx and Rx xfer queues. */ ural_setup_tx_list(sc); /* kick Rx */ tmp = RAL_DROP_PHY | RAL_DROP_CRC; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RAL_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RAL_DROP_NOT_TO_ME; } ural_write(sc, RAL_TXRX_CSR2, tmp); sc->sc_running = 1; usbd_xfer_set_stall(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_start(sc->sc_xfer[URAL_BULK_RD]); return; fail: ural_stop(sc); } static void ural_stop(struct ural_softc *sc) { RAL_LOCK_ASSERT(sc, MA_OWNED); sc->sc_running = 0; /* * Drain all the transfers, if not already drained: */ RAL_UNLOCK(sc); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[URAL_BULK_RD]); RAL_LOCK(sc); ural_unsetup_tx_list(sc); /* disable Rx */ ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); /* reset ASIC and BBP (but won't reset MAC registers!) */ ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); /* wait a little */ ural_pause(sc, hz / 10); ural_write(sc, RAL_MAC_CSR1, 0); /* wait a little */ ural_pause(sc, hz / 10); } static int ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ural_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->tx_nfree < RAL_TX_MINFREE) { RAL_UNLOCK(sc); m_freem(m); return EIO; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ural_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ural_tx_raw(sc, m, ni, params) != 0) goto bad; } RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } static void ural_ratectl_start(struct ural_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ural_vap *uvp = URAL_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); } static void ural_ratectl_timeout(void *arg) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &uvp->ratectl_task); } static void ural_ratectl_task(void *arg, int pending) { struct ural_vap *uvp = arg; struct ieee80211vap *vap = &uvp->vap; struct ieee80211com *ic = vap->iv_ic; struct ural_softc *sc = ic->ic_softc; struct ieee80211_node *ni; int ok, fail; int sum, retrycnt; ni = ieee80211_ref_node(vap->iv_bss); RAL_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof(sc->sta)); ok = sc->sta[7] + /* TX ok w/o retry */ sc->sta[8]; /* TX ok w/ retry */ fail = sc->sta[9]; /* TX retry-fail count */ sum = ok+fail; retrycnt = sc->sta[8] + fail; ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt); (void) ieee80211_ratectl_rate(ni, NULL, 0); /* count TX retry-fail as Tx errors */ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, fail); usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp); RAL_UNLOCK(sc); ieee80211_free_node(ni); } static int ural_pause(struct ural_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } diff --git a/sys/dev/vt/hw/vga/vt_vga.c b/sys/dev/vt/hw/vga/vt_vga.c index 62c9bf3e176b..e81dcba7663a 100644 --- a/sys/dev/vt/hw/vga/vt_vga.c +++ b/sys/dev/vt/hw/vga/vt_vga.c @@ -1,1317 +1,1315 @@ /*- * Copyright (c) 2005 Marcel Moolenaar * All rights reserved. * * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Ed Schouten * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include struct vga_softc { bus_space_tag_t vga_fb_tag; bus_space_handle_t vga_fb_handle; bus_space_tag_t vga_reg_tag; bus_space_handle_t vga_reg_handle; int vga_wmode; term_color_t vga_curfg, vga_curbg; boolean_t vga_enabled; }; /* Convenience macros. */ #define MEM_READ1(sc, ofs) \ bus_space_read_1(sc->vga_fb_tag, sc->vga_fb_handle, ofs) #define MEM_WRITE1(sc, ofs, val) \ bus_space_write_1(sc->vga_fb_tag, sc->vga_fb_handle, ofs, val) #define REG_READ1(sc, reg) \ bus_space_read_1(sc->vga_reg_tag, sc->vga_reg_handle, reg) #define REG_WRITE1(sc, reg, val) \ bus_space_write_1(sc->vga_reg_tag, sc->vga_reg_handle, reg, val) #define VT_VGA_WIDTH 640 #define VT_VGA_HEIGHT 480 #define VT_VGA_MEMSIZE (VT_VGA_WIDTH * VT_VGA_HEIGHT / 8) /* * VGA is designed to handle 8 pixels at a time (8 pixels in one byte of * memory). */ #define VT_VGA_PIXELS_BLOCK 8 /* * We use an off-screen addresses to: * o store the background color; * o store pixels pattern. * Those addresses are then loaded in the latches once. */ #define VT_VGA_BGCOLOR_OFFSET VT_VGA_MEMSIZE static vd_probe_t vga_probe; static vd_init_t vga_init; static vd_blank_t vga_blank; static vd_bitblt_text_t vga_bitblt_text; static vd_bitblt_bmp_t vga_bitblt_bitmap; static vd_drawrect_t vga_drawrect; static vd_setpixel_t vga_setpixel; static vd_postswitch_t vga_postswitch; static const struct vt_driver vt_vga_driver = { .vd_name = "vga", .vd_probe = vga_probe, .vd_init = vga_init, .vd_blank = vga_blank, .vd_bitblt_text = vga_bitblt_text, .vd_bitblt_bmp = vga_bitblt_bitmap, .vd_drawrect = vga_drawrect, .vd_setpixel = vga_setpixel, .vd_postswitch = vga_postswitch, .vd_priority = VD_PRIORITY_GENERIC, }; /* * Driver supports both text mode and graphics mode. Make sure the * buffer is always big enough to support both. */ static struct vga_softc vga_conssoftc; VT_DRIVER_DECLARE(vt_vga, vt_vga_driver); static inline void vga_setwmode(struct vt_device *vd, int wmode) { struct vga_softc *sc = vd->vd_softc; if (sc->vga_wmode == wmode) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, wmode); sc->vga_wmode = wmode; switch (wmode) { case 3: /* Re-enable all plans. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_EM3 | VGA_SEQ_MM_EM2 | VGA_SEQ_MM_EM1 | VGA_SEQ_MM_EM0); break; } } static inline void vga_setfg(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; vga_setwmode(vd, 3); if (sc->vga_curfg == color) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, color); sc->vga_curfg = color; } static inline void vga_setbg(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; vga_setwmode(vd, 3); if (sc->vga_curbg == color) return; REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, color); /* * Write 8 pixels using the background color to an off-screen * byte in the video memory. */ MEM_WRITE1(sc, VT_VGA_BGCOLOR_OFFSET, 0xff); /* * Read those 8 pixels back to load the background color in the * latches register. */ MEM_READ1(sc, VT_VGA_BGCOLOR_OFFSET); sc->vga_curbg = color; /* * The Set/Reset register doesn't contain the fg color anymore, * store an invalid color. */ sc->vga_curfg = 0xff; } /* * Binary searchable table for Unicode to CP437 conversion. */ struct unicp437 { uint16_t unicode_base; uint8_t cp437_base; uint8_t length; }; static const struct unicp437 cp437table[] = { { 0x0020, 0x20, 0x5e }, { 0x00a0, 0x20, 0x00 }, { 0x00a1, 0xad, 0x00 }, { 0x00a2, 0x9b, 0x00 }, { 0x00a3, 0x9c, 0x00 }, { 0x00a5, 0x9d, 0x00 }, { 0x00a7, 0x15, 0x00 }, { 0x00aa, 0xa6, 0x00 }, { 0x00ab, 0xae, 0x00 }, { 0x00ac, 0xaa, 0x00 }, { 0x00b0, 0xf8, 0x00 }, { 0x00b1, 0xf1, 0x00 }, { 0x00b2, 0xfd, 0x00 }, { 0x00b5, 0xe6, 0x00 }, { 0x00b6, 0x14, 0x00 }, { 0x00b7, 0xfa, 0x00 }, { 0x00ba, 0xa7, 0x00 }, { 0x00bb, 0xaf, 0x00 }, { 0x00bc, 0xac, 0x00 }, { 0x00bd, 0xab, 0x00 }, { 0x00bf, 0xa8, 0x00 }, { 0x00c4, 0x8e, 0x01 }, { 0x00c6, 0x92, 0x00 }, { 0x00c7, 0x80, 0x00 }, { 0x00c9, 0x90, 0x00 }, { 0x00d1, 0xa5, 0x00 }, { 0x00d6, 0x99, 0x00 }, { 0x00dc, 0x9a, 0x00 }, { 0x00df, 0xe1, 0x00 }, { 0x00e0, 0x85, 0x00 }, { 0x00e1, 0xa0, 0x00 }, { 0x00e2, 0x83, 0x00 }, { 0x00e4, 0x84, 0x00 }, { 0x00e5, 0x86, 0x00 }, { 0x00e6, 0x91, 0x00 }, { 0x00e7, 0x87, 0x00 }, { 0x00e8, 0x8a, 0x00 }, { 0x00e9, 0x82, 0x00 }, { 0x00ea, 0x88, 0x01 }, { 0x00ec, 0x8d, 0x00 }, { 0x00ed, 0xa1, 0x00 }, { 0x00ee, 0x8c, 0x00 }, { 0x00ef, 0x8b, 0x00 }, { 0x00f0, 0xeb, 0x00 }, { 0x00f1, 0xa4, 0x00 }, { 0x00f2, 0x95, 0x00 }, { 0x00f3, 0xa2, 0x00 }, { 0x00f4, 0x93, 0x00 }, { 0x00f6, 0x94, 0x00 }, { 0x00f7, 0xf6, 0x00 }, { 0x00f8, 0xed, 0x00 }, { 0x00f9, 0x97, 0x00 }, { 0x00fa, 0xa3, 0x00 }, { 0x00fb, 0x96, 0x00 }, { 0x00fc, 0x81, 0x00 }, { 0x00ff, 0x98, 0x00 }, { 0x0192, 0x9f, 0x00 }, { 0x0393, 0xe2, 0x00 }, { 0x0398, 0xe9, 0x00 }, { 0x03a3, 0xe4, 0x00 }, { 0x03a6, 0xe8, 0x00 }, { 0x03a9, 0xea, 0x00 }, { 0x03b1, 0xe0, 0x01 }, { 0x03b4, 0xeb, 0x00 }, { 0x03b5, 0xee, 0x00 }, { 0x03bc, 0xe6, 0x00 }, { 0x03c0, 0xe3, 0x00 }, { 0x03c3, 0xe5, 0x00 }, { 0x03c4, 0xe7, 0x00 }, { 0x03c6, 0xed, 0x00 }, { 0x03d5, 0xed, 0x00 }, { 0x2010, 0x2d, 0x00 }, { 0x2014, 0x2d, 0x00 }, { 0x2018, 0x60, 0x00 }, { 0x2019, 0x27, 0x00 }, { 0x201c, 0x22, 0x00 }, { 0x201d, 0x22, 0x00 }, { 0x2022, 0x07, 0x00 }, { 0x203c, 0x13, 0x00 }, { 0x207f, 0xfc, 0x00 }, { 0x20a7, 0x9e, 0x00 }, { 0x20ac, 0xee, 0x00 }, { 0x2126, 0xea, 0x00 }, { 0x2190, 0x1b, 0x00 }, { 0x2191, 0x18, 0x00 }, { 0x2192, 0x1a, 0x00 }, { 0x2193, 0x19, 0x00 }, { 0x2194, 0x1d, 0x00 }, { 0x2195, 0x12, 0x00 }, { 0x21a8, 0x17, 0x00 }, { 0x2202, 0xeb, 0x00 }, { 0x2208, 0xee, 0x00 }, { 0x2211, 0xe4, 0x00 }, { 0x2212, 0x2d, 0x00 }, { 0x2219, 0xf9, 0x00 }, { 0x221a, 0xfb, 0x00 }, { 0x221e, 0xec, 0x00 }, { 0x221f, 0x1c, 0x00 }, { 0x2229, 0xef, 0x00 }, { 0x2248, 0xf7, 0x00 }, { 0x2261, 0xf0, 0x00 }, { 0x2264, 0xf3, 0x00 }, { 0x2265, 0xf2, 0x00 }, { 0x2302, 0x7f, 0x00 }, { 0x2310, 0xa9, 0x00 }, { 0x2320, 0xf4, 0x00 }, { 0x2321, 0xf5, 0x00 }, { 0x2500, 0xc4, 0x00 }, { 0x2502, 0xb3, 0x00 }, { 0x250c, 0xda, 0x00 }, { 0x2510, 0xbf, 0x00 }, { 0x2514, 0xc0, 0x00 }, { 0x2518, 0xd9, 0x00 }, { 0x251c, 0xc3, 0x00 }, { 0x2524, 0xb4, 0x00 }, { 0x252c, 0xc2, 0x00 }, { 0x2534, 0xc1, 0x00 }, { 0x253c, 0xc5, 0x00 }, { 0x2550, 0xcd, 0x00 }, { 0x2551, 0xba, 0x00 }, { 0x2552, 0xd5, 0x00 }, { 0x2553, 0xd6, 0x00 }, { 0x2554, 0xc9, 0x00 }, { 0x2555, 0xb8, 0x00 }, { 0x2556, 0xb7, 0x00 }, { 0x2557, 0xbb, 0x00 }, { 0x2558, 0xd4, 0x00 }, { 0x2559, 0xd3, 0x00 }, { 0x255a, 0xc8, 0x00 }, { 0x255b, 0xbe, 0x00 }, { 0x255c, 0xbd, 0x00 }, { 0x255d, 0xbc, 0x00 }, { 0x255e, 0xc6, 0x01 }, { 0x2560, 0xcc, 0x00 }, { 0x2561, 0xb5, 0x00 }, { 0x2562, 0xb6, 0x00 }, { 0x2563, 0xb9, 0x00 }, { 0x2564, 0xd1, 0x01 }, { 0x2566, 0xcb, 0x00 }, { 0x2567, 0xcf, 0x00 }, { 0x2568, 0xd0, 0x00 }, { 0x2569, 0xca, 0x00 }, { 0x256a, 0xd8, 0x00 }, { 0x256b, 0xd7, 0x00 }, { 0x256c, 0xce, 0x00 }, { 0x2580, 0xdf, 0x00 }, { 0x2584, 0xdc, 0x00 }, { 0x2588, 0xdb, 0x00 }, { 0x258c, 0xdd, 0x00 }, { 0x2590, 0xde, 0x00 }, { 0x2591, 0xb0, 0x02 }, { 0x25a0, 0xfe, 0x00 }, { 0x25ac, 0x16, 0x00 }, { 0x25b2, 0x1e, 0x00 }, { 0x25ba, 0x10, 0x00 }, { 0x25bc, 0x1f, 0x00 }, { 0x25c4, 0x11, 0x00 }, { 0x25cb, 0x09, 0x00 }, { 0x25d8, 0x08, 0x00 }, { 0x25d9, 0x0a, 0x00 }, { 0x263a, 0x01, 0x01 }, { 0x263c, 0x0f, 0x00 }, { 0x2640, 0x0c, 0x00 }, { 0x2642, 0x0b, 0x00 }, { 0x2660, 0x06, 0x00 }, { 0x2663, 0x05, 0x00 }, { 0x2665, 0x03, 0x01 }, { 0x266a, 0x0d, 0x00 }, { 0x266c, 0x0e, 0x00 }, }; static uint8_t vga_get_cp437(term_char_t c) { int min, mid, max; min = 0; max = (sizeof(cp437table) / sizeof(struct unicp437)) - 1; if (c < cp437table[0].unicode_base || c > cp437table[max].unicode_base + cp437table[max].length) return '?'; while (max >= min) { mid = (min + max) / 2; if (c < cp437table[mid].unicode_base) max = mid - 1; else if (c > cp437table[mid].unicode_base + cp437table[mid].length) min = mid + 1; else return (c - cp437table[mid].unicode_base + cp437table[mid].cp437_base); } return '?'; } static void vga_blank(struct vt_device *vd, term_color_t color) { struct vga_softc *sc = vd->vd_softc; u_int ofs; vga_setfg(vd, color); for (ofs = 0; ofs < VT_VGA_MEMSIZE; ofs++) MEM_WRITE1(sc, ofs, 0xff); } static inline void vga_bitblt_put(struct vt_device *vd, u_long dst, term_color_t color, uint8_t v) { struct vga_softc *sc = vd->vd_softc; /* Skip empty writes, in order to avoid palette changes. */ if (v != 0x00) { vga_setfg(vd, color); /* * When this MEM_READ1() gets disabled, all sorts of * artifacts occur. This is because this read loads the * set of 8 pixels that are about to be changed. There * is one scenario where we can avoid the read, namely * if all pixels are about to be overwritten anyway. */ if (v != 0xff) { MEM_READ1(sc, dst); /* The bg color was trashed by the reads. */ sc->vga_curbg = 0xff; } MEM_WRITE1(sc, dst, v); } } static void vga_setpixel(struct vt_device *vd, int x, int y, term_color_t color) { if (vd->vd_flags & VDF_TEXTMODE) return; vga_bitblt_put(vd, (y * VT_VGA_WIDTH / 8) + (x / 8), color, 0x80 >> (x % 8)); } static void vga_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2, int fill, term_color_t color) { int x, y; if (vd->vd_flags & VDF_TEXTMODE) return; for (y = y1; y <= y2; y++) { if (fill || (y == y1) || (y == y2)) { for (x = x1; x <= x2; x++) vga_setpixel(vd, x, y, color); } else { vga_setpixel(vd, x1, y, color); vga_setpixel(vd, x2, y, color); } } } static void vga_compute_shifted_pattern(const uint8_t *src, unsigned int bytes, unsigned int src_x, unsigned int x_count, unsigned int dst_x, uint8_t *pattern, uint8_t *mask) { unsigned int n; n = src_x / 8; /* * This mask has bits set, where a pixel (ether 0 or 1) * comes from the source bitmap. */ if (mask != NULL) { *mask = (0xff >> (8 - x_count)) << (8 - x_count - dst_x); } if (n == (src_x + x_count - 1) / 8) { /* All the pixels we want are in the same byte. */ *pattern = src[n]; if (dst_x >= src_x) *pattern >>= (dst_x - src_x % 8); else *pattern <<= (src_x % 8 - dst_x); } else { /* The pixels we want are split into two bytes. */ if (dst_x >= src_x % 8) { *pattern = src[n] << (8 - dst_x - src_x % 8) | src[n + 1] >> (dst_x - src_x % 8); } else { *pattern = src[n] << (src_x % 8 - dst_x) | src[n + 1] >> (8 - src_x % 8 - dst_x); } } } static void vga_copy_bitmap_portion(uint8_t *pattern_2colors, uint8_t *pattern_ncolors, const uint8_t *src, const uint8_t *src_mask, unsigned int src_width, unsigned int src_x, unsigned int dst_x, unsigned int x_count, unsigned int src_y, unsigned int dst_y, unsigned int y_count, term_color_t fg, term_color_t bg, int overwrite) { unsigned int i, bytes; uint8_t pattern, relevant_bits, mask; bytes = (src_width + 7) / 8; for (i = 0; i < y_count; ++i) { vga_compute_shifted_pattern(src + (src_y + i) * bytes, bytes, src_x, x_count, dst_x, &pattern, &relevant_bits); if (src_mask == NULL) { /* * No src mask. Consider that all wanted bits * from the source are "authoritative". */ mask = relevant_bits; } else { /* * There's an src mask. We shift it the same way * we shifted the source pattern. */ vga_compute_shifted_pattern( src_mask + (src_y + i) * bytes, bytes, src_x, x_count, dst_x, &mask, NULL); /* Now, only keep the wanted bits among them. */ mask &= relevant_bits; } /* * Clear bits from the pattern which must be * transparent, according to the source mask. */ pattern &= mask; /* Set the bits in the 2-colors array. */ if (overwrite) pattern_2colors[dst_y + i] &= ~mask; pattern_2colors[dst_y + i] |= pattern; if (pattern_ncolors == NULL) continue; /* * Set the same bits in the n-colors array. This one * supports transparency, when a given bit is cleared in * all colors. */ if (overwrite) { /* * Ensure that the pixels used by this bitmap are * cleared in other colors. */ for (int j = 0; j < 16; ++j) pattern_ncolors[(dst_y + i) * 16 + j] &= ~mask; } pattern_ncolors[(dst_y + i) * 16 + fg] |= pattern; pattern_ncolors[(dst_y + i) * 16 + bg] |= (~pattern & mask); } } static void vga_bitblt_pixels_block_2colors(struct vt_device *vd, const uint8_t *masks, term_color_t fg, term_color_t bg, unsigned int x, unsigned int y, unsigned int height) { unsigned int i, offset; struct vga_softc *sc; /* * The great advantage of Write Mode 3 is that we just need * to load the foreground in the Set/Reset register, load the * background color in the latches register (this is done * through a write in offscreen memory followed by a read of * that data), then write the pattern to video memory. This * pattern indicates if the pixel should use the foreground * color (bit set) or the background color (bit cleared). */ vga_setbg(vd, bg); vga_setfg(vd, fg); sc = vd->vd_softc; offset = (VT_VGA_WIDTH * y + x) / 8; for (i = 0; i < height; ++i, offset += VT_VGA_WIDTH / 8) { MEM_WRITE1(sc, offset, masks[i]); } } static void vga_bitblt_pixels_block_ncolors(struct vt_device *vd, const uint8_t *masks, unsigned int x, unsigned int y, unsigned int height) { unsigned int i, j, plan, color, offset; struct vga_softc *sc; uint8_t mask, plans[height * 4]; sc = vd->vd_softc; memset(plans, 0, sizeof(plans)); /* * To write a group of pixels using 3 or more colors, we select * Write Mode 0 and write one byte to each plan separately. */ /* * We first compute each byte: each plan contains one bit of the * color code for each of the 8 pixels. * * For example, if the 8 pixels are like this: * GBBBBBBY * where: * G (gray) = 0b0111 * B (black) = 0b0000 * Y (yellow) = 0b0011 * * The corresponding for bytes are: * GBBBBBBY * Plan 0: 10000001 = 0x81 * Plan 1: 10000001 = 0x81 * Plan 2: 10000000 = 0x80 * Plan 3: 00000000 = 0x00 * | | | * | | +-> 0b0011 (Y) * | +-----> 0b0000 (B) * +--------> 0b0111 (G) */ for (i = 0; i < height; ++i) { for (color = 0; color < 16; ++color) { mask = masks[i * 16 + color]; if (mask == 0x00) continue; for (j = 0; j < 8; ++j) { if (!((mask >> (7 - j)) & 0x1)) continue; /* The pixel "j" uses color "color". */ for (plan = 0; plan < 4; ++plan) plans[i * 4 + plan] |= ((color >> plan) & 0x1) << (7 - j); } } } /* * The bytes are ready: we now switch to Write Mode 0 and write * all bytes, one plan at a time. */ vga_setwmode(vd, 0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); for (plan = 0; plan < 4; ++plan) { /* Select plan. */ REG_WRITE1(sc, VGA_SEQ_DATA, 1 << plan); /* Write all bytes for this plan, from Y to Y+height. */ for (i = 0; i < height; ++i) { offset = (VT_VGA_WIDTH * (y + i) + x) / 8; MEM_WRITE1(sc, offset, plans[i * 4 + plan]); } } } static void vga_bitblt_one_text_pixels_block(struct vt_device *vd, const struct vt_window *vw, unsigned int x, unsigned int y) { const struct vt_buf *vb; const struct vt_font *vf; unsigned int i, col, row, src_x, x_count; unsigned int used_colors_list[16], used_colors; uint8_t pattern_2colors[vw->vw_font->vf_height]; uint8_t pattern_ncolors[vw->vw_font->vf_height * 16]; term_char_t c; term_color_t fg, bg; const uint8_t *src; vb = &vw->vw_buf; vf = vw->vw_font; /* * The current pixels block. * * We fill it with portions of characters, because both "grids" * may not match. * * i is the index in this pixels block. */ i = x; used_colors = 0; memset(used_colors_list, 0, sizeof(used_colors_list)); memset(pattern_2colors, 0, sizeof(pattern_2colors)); memset(pattern_ncolors, 0, sizeof(pattern_ncolors)); if (i < vw->vw_draw_area.tr_begin.tp_col) { /* * i is in the margin used to center the text area on * the screen. */ i = vw->vw_draw_area.tr_begin.tp_col; } while (i < x + VT_VGA_PIXELS_BLOCK && i < vw->vw_draw_area.tr_end.tp_col) { /* * Find which character is drawn on this pixel in the * pixels block. * * While here, record what colors it uses. */ col = (i - vw->vw_draw_area.tr_begin.tp_col) / vf->vf_width; row = (y - vw->vw_draw_area.tr_begin.tp_row) / vf->vf_height; c = VTBUF_GET_FIELD(vb, row, col); src = vtfont_lookup(vf, c); vt_determine_colors(c, VTBUF_ISCURSOR(vb, row, col), &fg, &bg); if ((used_colors_list[fg] & 0x1) != 0x1) used_colors++; if ((used_colors_list[bg] & 0x2) != 0x2) used_colors++; used_colors_list[fg] |= 0x1; used_colors_list[bg] |= 0x2; /* * Compute the portion of the character we want to draw, * because the pixels block may start in the middle of a * character. * * The first pixel to draw in the character is * the current position - * the start position of the character * * The last pixel to draw is either * - the last pixel of the character, or * - the pixel of the character matching the end of * the pixels block * whichever comes first. This position is then * changed to be relative to the start position of the * character. */ src_x = i - (col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col); x_count = min(min( (col + 1) * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col, x + VT_VGA_PIXELS_BLOCK), vw->vw_draw_area.tr_end.tp_col); x_count -= col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col; x_count -= src_x; /* Copy a portion of the character. */ vga_copy_bitmap_portion(pattern_2colors, pattern_ncolors, src, NULL, vf->vf_width, src_x, i % VT_VGA_PIXELS_BLOCK, x_count, 0, 0, vf->vf_height, fg, bg, 0); /* We move to the next portion. */ i += x_count; } #ifndef SC_NO_CUTPASTE /* * Copy the mouse pointer bitmap if it's over the current pixels * block. * * We use the saved cursor position (saved in vt_flush()), because * the current position could be different than the one used * to mark the area dirty. */ term_rect_t drawn_area; drawn_area.tr_begin.tp_col = x; drawn_area.tr_begin.tp_row = y; drawn_area.tr_end.tp_col = x + VT_VGA_PIXELS_BLOCK; drawn_area.tr_end.tp_row = y + vf->vf_height; if (vd->vd_mshown && vt_is_cursor_in_area(vd, &drawn_area)) { struct vt_mouse_cursor *cursor; unsigned int mx, my; unsigned int dst_x, src_y, dst_y, y_count; cursor = vd->vd_mcursor; mx = vd->vd_mx_drawn + vw->vw_draw_area.tr_begin.tp_col; my = vd->vd_my_drawn + vw->vw_draw_area.tr_begin.tp_row; /* Compute the portion of the cursor we want to copy. */ src_x = x > mx ? x - mx : 0; dst_x = mx > x ? mx - x : 0; x_count = min(min(min( cursor->width - src_x, x + VT_VGA_PIXELS_BLOCK - mx), vw->vw_draw_area.tr_end.tp_col - mx), VT_VGA_PIXELS_BLOCK); /* * The cursor isn't aligned on the Y-axis with * characters, so we need to compute the vertical * start/count. */ src_y = y > my ? y - my : 0; dst_y = my > y ? my - y : 0; y_count = min( min(cursor->height - src_y, y + vf->vf_height - my), vf->vf_height); /* Copy the cursor portion. */ vga_copy_bitmap_portion(pattern_2colors, pattern_ncolors, cursor->map, cursor->mask, cursor->width, src_x, dst_x, x_count, src_y, dst_y, y_count, vd->vd_mcursor_fg, vd->vd_mcursor_bg, 1); if ((used_colors_list[vd->vd_mcursor_fg] & 0x1) != 0x1) used_colors++; if ((used_colors_list[vd->vd_mcursor_bg] & 0x2) != 0x2) used_colors++; } #endif /* * The pixels block is completed, we can now draw it on the * screen. */ if (used_colors == 2) vga_bitblt_pixels_block_2colors(vd, pattern_2colors, fg, bg, x, y, vf->vf_height); else vga_bitblt_pixels_block_ncolors(vd, pattern_ncolors, x, y, vf->vf_height); } static void vga_bitblt_text_gfxmode(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { const struct vt_font *vf; unsigned int col, row; unsigned int x1, y1, x2, y2, x, y; vf = vw->vw_font; /* * Compute the top-left pixel position aligned with the video * adapter pixels block size. * * This is calculated from the top-left column of te dirty area: * * 1. Compute the top-left pixel of the character: * col * font width + x offset * * NOTE: x offset is used to center the text area on the * screen. It's expressed in pixels, not in characters * col/row! * * 2. Find the pixel further on the left marking the start of * an aligned pixels block (eg. chunk of 8 pixels): * character's x / blocksize * blocksize * * The division, being made on integers, achieves the * alignment. * * For the Y-axis, we need to compute the character's y * coordinate, but we don't need to align it. */ col = area->tr_begin.tp_col; row = area->tr_begin.tp_row; x1 = (int)((col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col) / VT_VGA_PIXELS_BLOCK) * VT_VGA_PIXELS_BLOCK; y1 = row * vf->vf_height + vw->vw_draw_area.tr_begin.tp_row; /* * Compute the bottom right pixel position, again, aligned with * the pixels block size. * * The same rules apply, we just add 1 to base the computation * on the "right border" of the dirty area. */ col = area->tr_end.tp_col; row = area->tr_end.tp_row; - x2 = (int)((col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col - + VT_VGA_PIXELS_BLOCK - 1) - / VT_VGA_PIXELS_BLOCK) + x2 = (int)howmany(col * vf->vf_width + vw->vw_draw_area.tr_begin.tp_col, + VT_VGA_PIXELS_BLOCK) * VT_VGA_PIXELS_BLOCK; y2 = row * vf->vf_height + vw->vw_draw_area.tr_begin.tp_row; /* Clip the area to the screen size. */ x2 = min(x2, vw->vw_draw_area.tr_end.tp_col); y2 = min(y2, vw->vw_draw_area.tr_end.tp_row); /* * Now, we take care of N pixels line at a time (the first for * loop, N = font height), and for these lines, draw one pixels * block at a time (the second for loop), not a character at a * time. * * Therefore, on the X-axis, characters my be drawn partially if * they are not aligned on 8-pixels boundary. * * However, the operation is repeated for the full height of the * font before moving to the next character, because it allows * to keep the color settings and write mode, before perhaps * changing them with the next one. */ for (y = y1; y < y2; y += vf->vf_height) { for (x = x1; x < x2; x += VT_VGA_PIXELS_BLOCK) { vga_bitblt_one_text_pixels_block(vd, vw, x, y); } } } static void vga_bitblt_text_txtmode(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { struct vga_softc *sc; const struct vt_buf *vb; unsigned int col, row; term_char_t c; term_color_t fg, bg; uint8_t ch, attr; sc = vd->vd_softc; vb = &vw->vw_buf; for (row = area->tr_begin.tp_row; row < area->tr_end.tp_row; ++row) { for (col = area->tr_begin.tp_col; col < area->tr_end.tp_col; ++col) { /* * Get next character and its associated fg/bg * colors. */ c = VTBUF_GET_FIELD(vb, row, col); vt_determine_colors(c, VTBUF_ISCURSOR(vb, row, col), &fg, &bg); /* * Convert character to CP437, which is the * character set used by the VGA hardware by * default. */ ch = vga_get_cp437(TCHAR_CHARACTER(c)); /* Convert colors to VGA attributes. */ attr = bg << 4 | fg; MEM_WRITE1(sc, (row * 80 + col) * 2 + 0, ch); MEM_WRITE1(sc, (row * 80 + col) * 2 + 1, attr); } } } static void vga_bitblt_text(struct vt_device *vd, const struct vt_window *vw, const term_rect_t *area) { if (!(vd->vd_flags & VDF_TEXTMODE)) { vga_bitblt_text_gfxmode(vd, vw, area); } else { vga_bitblt_text_txtmode(vd, vw, area); } } static void vga_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw, const uint8_t *pattern, const uint8_t *mask, unsigned int width, unsigned int height, unsigned int x, unsigned int y, term_color_t fg, term_color_t bg) { unsigned int x1, y1, x2, y2, i, j, src_x, dst_x, x_count; uint8_t pattern_2colors; /* Align coordinates with the 8-pxels grid. */ x1 = x / VT_VGA_PIXELS_BLOCK * VT_VGA_PIXELS_BLOCK; y1 = y; - x2 = (x + width + VT_VGA_PIXELS_BLOCK - 1) / - VT_VGA_PIXELS_BLOCK * VT_VGA_PIXELS_BLOCK; + x2 = roundup(x + width, VT_VGA_PIXELS_BLOCK); y2 = y + height; x2 = min(x2, vd->vd_width - 1); y2 = min(y2, vd->vd_height - 1); for (j = y1; j < y2; ++j) { src_x = 0; dst_x = x - x1; x_count = VT_VGA_PIXELS_BLOCK - dst_x; for (i = x1; i < x2; i += VT_VGA_PIXELS_BLOCK) { pattern_2colors = 0; vga_copy_bitmap_portion( &pattern_2colors, NULL, pattern, mask, width, src_x, dst_x, x_count, j - y1, 0, 1, fg, bg, 0); vga_bitblt_pixels_block_2colors(vd, &pattern_2colors, fg, bg, i, j, 1); src_x += x_count; dst_x = (dst_x + x_count) % VT_VGA_PIXELS_BLOCK; x_count = min(width - src_x, VT_VGA_PIXELS_BLOCK); } } } static void vga_initialize_graphics(struct vt_device *vd) { struct vga_softc *sc = vd->vd_softc; /* Clock select. */ REG_WRITE1(sc, VGA_GEN_MISC_OUTPUT_W, VGA_GEN_MO_VSP | VGA_GEN_MO_HSP | VGA_GEN_MO_PB | VGA_GEN_MO_ER | VGA_GEN_MO_IOA); /* Set sequencer clocking and memory mode. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_CLOCKING_MODE); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_CM_89); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MEMORY_MODE); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_OE | VGA_SEQ_MM_EM); /* Set the graphics controller in graphics mode. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MISCELLANEOUS); REG_WRITE1(sc, VGA_GC_DATA, 0x04 + VGA_GC_MISC_GA); /* Program the CRT controller. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_HORIZ_TOTAL); REG_WRITE1(sc, VGA_CRTC_DATA, 0x5f); /* 760 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_HORIZ_DISP_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0x4f); /* 640 - 8 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_HORIZ_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0x50); /* 640 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_HORIZ_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_EHB_CR + 2); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_HORIZ_RETRACE); REG_WRITE1(sc, VGA_CRTC_DATA, 0x54); /* 672 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_HORIZ_RETRACE); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_EHR_EHB + 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_TOTAL); REG_WRITE1(sc, VGA_CRTC_DATA, 0x0b); /* 523 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_OVERFLOW); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_OF_VT9 | VGA_CRTC_OF_LC8 | VGA_CRTC_OF_VBS8 | VGA_CRTC_OF_VRS8 | VGA_CRTC_OF_VDE8); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MAX_SCAN_LINE); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_MSL_LC9); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_START); REG_WRITE1(sc, VGA_CRTC_DATA, 0xea); /* 480 + 10 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0x0c); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_DISPLAY_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0xdf); /* 480 - 1*/ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_OFFSET); REG_WRITE1(sc, VGA_CRTC_DATA, 0x28); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_VERT_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0xe7); /* 480 + 7 */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_END_VERT_BLANK); REG_WRITE1(sc, VGA_CRTC_DATA, 0x04); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_MC_WB | VGA_CRTC_MC_AW | VGA_CRTC_MC_SRS | VGA_CRTC_MC_CMS); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_LINE_COMPARE); REG_WRITE1(sc, VGA_CRTC_DATA, 0xff); /* 480 + 31 */ REG_WRITE1(sc, VGA_GEN_FEATURE_CTRL_W, 0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_MAP_MASK); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_MM_EM3 | VGA_SEQ_MM_EM2 | VGA_SEQ_MM_EM1 | VGA_SEQ_MM_EM0); REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_CHAR_MAP_SELECT); REG_WRITE1(sc, VGA_SEQ_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_ENABLE_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0x0f); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_COLOR_COMPARE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_DATA_ROTATE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_READ_MAP_SELECT); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, 0); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_COLOR_DONT_CARE); REG_WRITE1(sc, VGA_GC_DATA, 0x0f); REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_BIT_MASK); REG_WRITE1(sc, VGA_GC_DATA, 0xff); } static int vga_initialize(struct vt_device *vd, int textmode) { struct vga_softc *sc = vd->vd_softc; uint8_t x; int timeout; /* Make sure the VGA adapter is not in monochrome emulation mode. */ x = REG_READ1(sc, VGA_GEN_MISC_OUTPUT_R); REG_WRITE1(sc, VGA_GEN_MISC_OUTPUT_W, x | VGA_GEN_MO_IOA); /* Unprotect CRTC registers 0-7. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_VERT_RETRACE_END); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x & ~VGA_CRTC_VRE_PR); /* * Wait for the vertical retrace. * NOTE: this code reads the VGA_GEN_INPUT_STAT_1 register, which has * the side-effect of clearing the internal flip-flip of the attribute * controller's write register. This means that because this code is * here, we know for sure that the first write to the attribute * controller will be a write to the address register. Removing this * code therefore also removes that guarantee and appropriate measures * need to be taken. */ timeout = 10000; do { DELAY(10); x = REG_READ1(sc, VGA_GEN_INPUT_STAT_1); x &= VGA_GEN_IS1_VR | VGA_GEN_IS1_DE; } while (x != (VGA_GEN_IS1_VR | VGA_GEN_IS1_DE) && --timeout != 0); if (timeout == 0) { printf("Timeout initializing vt_vga\n"); return (ENXIO); } /* Now, disable the sync. signals. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x & ~VGA_CRTC_MC_HR); /* Asynchronous sequencer reset. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_RESET); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_RST_SR); if (!textmode) vga_initialize_graphics(vd); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_PRESET_ROW_SCAN); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_START); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_CS_COO); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_END); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_ADDR_HIGH); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_START_ADDR_LOW); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_LOC_HIGH); REG_WRITE1(sc, VGA_CRTC_DATA, 0); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_CURSOR_LOC_LOW); REG_WRITE1(sc, VGA_CRTC_DATA, 0x59); REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_UNDERLINE_LOC); REG_WRITE1(sc, VGA_CRTC_DATA, VGA_CRTC_UL_UL); if (textmode) { /* Set the attribute controller to blink disable. */ REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MODE_CONTROL); REG_WRITE1(sc, VGA_AC_WRITE, 0); } else { /* Set the attribute controller in graphics mode. */ REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MODE_CONTROL); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_MC_GA); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_HORIZ_PIXEL_PANNING); REG_WRITE1(sc, VGA_AC_WRITE, 0); } REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(0)); REG_WRITE1(sc, VGA_AC_WRITE, 0); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(1)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(2)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(3)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SG | VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(4)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(5)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(6)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(7)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_R | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(8)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(9)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(10)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(11)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_G); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(12)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(13)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(14)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PALETTE(15)); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_PAL_SR | VGA_AC_PAL_SG | VGA_AC_PAL_SB | VGA_AC_PAL_R | VGA_AC_PAL_G | VGA_AC_PAL_B); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_OVERSCAN_COLOR); REG_WRITE1(sc, VGA_AC_WRITE, 0); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_COLOR_PLANE_ENABLE); REG_WRITE1(sc, VGA_AC_WRITE, 0x0f); REG_WRITE1(sc, VGA_AC_WRITE, VGA_AC_COLOR_SELECT); REG_WRITE1(sc, VGA_AC_WRITE, 0); if (!textmode) { u_int ofs; /* * Done. Clear the frame buffer. All bit planes are * enabled, so a single-paged loop should clear all * planes. */ for (ofs = 0; ofs < VT_VGA_MEMSIZE; ofs++) { MEM_WRITE1(sc, ofs, 0); } } /* Re-enable the sequencer. */ REG_WRITE1(sc, VGA_SEQ_ADDRESS, VGA_SEQ_RESET); REG_WRITE1(sc, VGA_SEQ_DATA, VGA_SEQ_RST_SR | VGA_SEQ_RST_NAR); /* Re-enable the sync signals. */ REG_WRITE1(sc, VGA_CRTC_ADDRESS, VGA_CRTC_MODE_CONTROL); x = REG_READ1(sc, VGA_CRTC_DATA); REG_WRITE1(sc, VGA_CRTC_DATA, x | VGA_CRTC_MC_HR); if (!textmode) { /* Switch to write mode 3, because we'll mainly do bitblt. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_MODE); REG_WRITE1(sc, VGA_GC_DATA, 3); sc->vga_wmode = 3; /* * In Write Mode 3, Enable Set/Reset is ignored, but we * use Write Mode 0 to write a group of 8 pixels using * 3 or more colors. In this case, we want to disable * Set/Reset: set Enable Set/Reset to 0. */ REG_WRITE1(sc, VGA_GC_ADDRESS, VGA_GC_ENABLE_SET_RESET); REG_WRITE1(sc, VGA_GC_DATA, 0x00); /* * Clear the colors we think are loaded into Set/Reset or * the latches. */ sc->vga_curfg = sc->vga_curbg = 0xff; } return (0); } static int vga_probe(struct vt_device *vd) { return (CN_INTERNAL); } static int vga_init(struct vt_device *vd) { struct vga_softc *sc; int textmode; if (vd->vd_softc == NULL) vd->vd_softc = (void *)&vga_conssoftc; sc = vd->vd_softc; if (vd->vd_flags & VDF_DOWNGRADE && vd->vd_video_dev != NULL) vga_pci_repost(vd->vd_video_dev); #if defined(__amd64__) || defined(__i386__) sc->vga_fb_tag = X86_BUS_SPACE_MEM; sc->vga_reg_tag = X86_BUS_SPACE_IO; #else # error "Architecture not yet supported!" #endif bus_space_map(sc->vga_reg_tag, VGA_REG_BASE, VGA_REG_SIZE, 0, &sc->vga_reg_handle); /* * If "hw.vga.textmode" is not set and we're running on hypervisor, * we use text mode by default, this is because when we're on * hypervisor, vt(4) is usually much slower in graphics mode than * in text mode, especially when we're on Hyper-V. */ textmode = vm_guest != VM_GUEST_NO; TUNABLE_INT_FETCH("hw.vga.textmode", &textmode); if (textmode) { vd->vd_flags |= VDF_TEXTMODE; vd->vd_width = 80; vd->vd_height = 25; bus_space_map(sc->vga_fb_tag, VGA_TXT_BASE, VGA_TXT_SIZE, 0, &sc->vga_fb_handle); } else { vd->vd_width = VT_VGA_WIDTH; vd->vd_height = VT_VGA_HEIGHT; bus_space_map(sc->vga_fb_tag, VGA_MEM_BASE, VGA_MEM_SIZE, 0, &sc->vga_fb_handle); } if (vga_initialize(vd, textmode) != 0) return (CN_DEAD); sc->vga_enabled = true; return (CN_INTERNAL); } static void vga_postswitch(struct vt_device *vd) { /* Reinit VGA mode, to restore view after app which change mode. */ vga_initialize(vd, (vd->vd_flags & VDF_TEXTMODE)); /* Ask vt(9) to update chars on visible area. */ vd->vd_flags |= VDF_INVALID; } /* Dummy NewBus functions to reserve the resources used by the vt_vga driver */ static void vtvga_identify(driver_t *driver, device_t parent) { if (!vga_conssoftc.vga_enabled) return; if (BUS_ADD_CHILD(parent, 0, driver->name, 0) == NULL) panic("Unable to attach vt_vga console"); } static int vtvga_probe(device_t dev) { device_set_desc(dev, "VT VGA driver"); return (BUS_PROBE_NOWILDCARD); } static int vtvga_attach(device_t dev) { struct resource *pseudo_phys_res; int res_id; res_id = 0; pseudo_phys_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &res_id, VGA_MEM_BASE, VGA_MEM_BASE + VGA_MEM_SIZE - 1, VGA_MEM_SIZE, RF_ACTIVE); if (pseudo_phys_res == NULL) panic("Unable to reserve vt_vga memory"); return (0); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t vtvga_methods[] = { /* Device interface */ DEVMETHOD(device_identify, vtvga_identify), DEVMETHOD(device_probe, vtvga_probe), DEVMETHOD(device_attach, vtvga_attach), DEVMETHOD_END }; DEFINE_CLASS_0(vtvga, vtvga_driver, vtvga_methods, 0); devclass_t vtvga_devclass; DRIVER_MODULE(vtvga, nexus, vtvga_driver, vtvga_devclass, NULL, NULL); diff --git a/sys/dev/xen/grant_table/grant_table.c b/sys/dev/xen/grant_table/grant_table.c index 87e5676c544f..2a5c4276d302 100644 --- a/sys/dev/xen/grant_table/grant_table.c +++ b/sys/dev/xen/grant_table/grant_table.c @@ -1,702 +1,701 @@ /****************************************************************************** * gnttab.c * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2005, Christopher Clark * Copyright (c) 2004, K A Fraser */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static struct mtx gnttab_list_lock; /* * Resource representing allocated physical address space * for the grant table metainfo */ static struct resource *gnttab_pseudo_phys_res; /* Resource id for allocated physical address space. */ static int gnttab_pseudo_phys_res_id; static grant_entry_t *shared; static struct gnttab_free_callback *gnttab_free_callback_list = NULL; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) static int get_free_entries(int count, int *entries) { int ref, error; grant_ref_t head; mtx_lock(&gnttab_list_lock); if ((gnttab_free_count < count) && ((error = gnttab_expand(count - gnttab_free_count)) != 0)) { mtx_unlock(&gnttab_list_lock); return (error); } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; mtx_unlock(&gnttab_list_lock); *entries = ref; return (0); } static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (__predict_false(gnttab_free_callback_list != NULL)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { mtx_lock(&gnttab_list_lock); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); mtx_unlock(&gnttab_list_lock); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly, grant_ref_t *result) { int error, ref; error = get_free_entries(1, &ref); if (__predict_false(error)) return (error); shared[ref].frame = frame; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0); if (result) *result = ref; return (0); } void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0); } int gnttab_query_foreign_access(grant_ref_t ref) { uint16_t nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } int gnttab_end_foreign_access_ref(grant_ref_t ref) { uint16_t flags, nflags; nflags = shared[ref].flags; do { if ( (flags = nflags) & (GTF_reading|GTF_writing) ) { printf("%s: WARNING: g.e. still in use!\n", __func__); return (0); } } while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) != flags); return (1); } void gnttab_end_foreign_access(grant_ref_t ref, void *page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != NULL) { free(page, M_DEVBUF); } } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printf("%s: WARNING: leaking g.e. and page still in use!\n", __func__); } } void gnttab_end_foreign_access_references(u_int count, grant_ref_t *refs) { grant_ref_t *last_ref; grant_ref_t head; grant_ref_t tail; head = GNTTAB_LIST_END; tail = *refs; last_ref = refs + count; while (refs != last_ref) { if (gnttab_end_foreign_access_ref(*refs)) { gnttab_entry(*refs) = head; head = *refs; } else { /* * XXX This needs to be fixed so that the ref * is placed on a list to be freed up later. */ printf("%s: WARNING: leaking g.e. still in use!\n", __func__); count--; } refs++; } if (count != 0) { mtx_lock(&gnttab_list_lock); gnttab_free_count += count; gnttab_entry(tail) = gnttab_free_head; gnttab_free_head = head; mtx_unlock(&gnttab_list_lock); } } int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn, grant_ref_t *result) { int error, ref; error = get_free_entries(1, &ref); if (__predict_false(error)) return (error); gnttab_grant_foreign_transfer_ref(ref, domid, pfn); *result = ref; return (0); } void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; uint16_t flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags ) return (0); cpu_spinwait(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_spinwait(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; KASSERT(frame != 0, ("grant table inconsistent")); return (frame); } unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return (frame); } void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; int count = 1; if (head == GNTTAB_LIST_END) return; ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } mtx_lock(&gnttab_list_lock); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); mtx_unlock(&gnttab_list_lock); } int gnttab_alloc_grant_references(uint16_t count, grant_ref_t *head) { int ref, error; error = get_free_entries(count, &ref); if (__predict_false(error)) return (error); *head = ref; return (0); } int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (__predict_false(g == GNTTAB_LIST_END)) return (g); *private_head = gnttab_entry(g); return (g); } void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, uint16_t count) { mtx_lock(&gnttab_list_lock); if (callback->next) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: mtx_unlock(&gnttab_list_lock); } void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; mtx_lock(&gnttab_list_lock); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; break; } } mtx_unlock(&gnttab_list_lock); } static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * GREFS_PER_GRANT_FRAME; for (i = nr_grant_frames; i < new_nr_grant_frames; i++) { gnttab_list[i] = (grant_ref_t *) malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); if (!gnttab_list[i]) goto grow_nomem; } for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return (0); grow_nomem: for ( ; i >= nr_grant_frames; i--) free(gnttab_list[i], M_DEVBUF); return (ENOMEM); } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return (4); /* Legacy max supported number of frames */ return (query.max_nr_frames); } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return (boot_max_nr_grant_frames); return (xen_max); } #ifdef notyet /* * XXX needed for backend support * */ static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif static vm_paddr_t resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* * Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) panic("HYPERVISOR_memory_op failed to map gnttab"); } while (i-- > start_idx); if (shared == NULL) { vm_offset_t area; area = kva_alloc(PAGE_SIZE * max_nr_grant_frames()); KASSERT(area, ("can't allocate VM space for grant table")); shared = (grant_entry_t *)area; } for (i = start_idx; i <= end_idx; i++) { pmap_kenter((vm_offset_t) shared + i * PAGE_SIZE, resume_frames + i * PAGE_SIZE); } return (0); } int gnttab_resume(device_t dev) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return (ENOSYS); if (!resume_frames) { KASSERT(dev != NULL, ("No resume frames and no device provided")); gnttab_pseudo_phys_res = xenmem_alloc(dev, &gnttab_pseudo_phys_res_id, PAGE_SIZE * max_nr_gframes); if (gnttab_pseudo_phys_res == NULL) panic("Unable to reserve physical memory for gnttab"); resume_frames = rman_get_start(gnttab_pseudo_phys_res); } return (gnttab_map(0, nr_gframes - 1)); } static int gnttab_expand(unsigned int req_entries) { int error; unsigned int cur, extra; cur = nr_grant_frames; - extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / - GREFS_PER_GRANT_FRAME); + extra = howmany(req_entries, GREFS_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return (ENOSPC); error = gnttab_map(cur, cur + extra - 1); if (!error) error = grow_gnttab_list(extra); return (error); } MTX_SYSINIT(gnttab, &gnttab_list_lock, "GNTTAB LOCK", MTX_DEF); /*------------------ Private Device Attachment Functions --------------------*/ /** * \brief Identify instances of this device type in the system. * * \param driver The driver performing this identify action. * \param parent The NewBus parent device for any devices this method adds. */ static void granttable_identify(driver_t *driver __unused, device_t parent) { KASSERT(xen_domain(), ("Trying to attach grant-table device on non Xen domain")); /* * A single device instance for our driver is always present * in a system operating under Xen. */ if (BUS_ADD_CHILD(parent, 0, driver->name, 0) == NULL) panic("unable to attach Xen Grant-table device"); } /** * \brief Probe for the existence of the Xen Grant-table device * * \param dev NewBus device_t for this instance. * * \return Always returns 0 indicating success. */ static int granttable_probe(device_t dev) { device_set_desc(dev, "Xen Grant-table Device"); return (BUS_PROBE_NOWILDCARD); } /** * \brief Attach the Xen Grant-table device. * * \param dev NewBus device_t for this instance. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int granttable_attach(device_t dev) { int i; unsigned int max_nr_glist_frames; unsigned int nr_init_grefs; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = (boot_max_nr_grant_frames * GREFS_PER_GRANT_FRAME / (PAGE_SIZE / sizeof(grant_ref_t))); gnttab_list = malloc(max_nr_glist_frames * sizeof(grant_ref_t *), M_DEVBUF, M_NOWAIT); if (gnttab_list == NULL) return (ENOMEM); for (i = 0; i < nr_grant_frames; i++) { gnttab_list[i] = (grant_ref_t *) malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume(dev)) return (ENODEV); nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; if (bootverbose) printf("Grant table initialized\n"); return (0); ini_nomem: for (i--; i >= 0; i--) free(gnttab_list[i], M_DEVBUF); free(gnttab_list, M_DEVBUF); return (ENOMEM); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t granttable_methods[] = { /* Device interface */ DEVMETHOD(device_identify, granttable_identify), DEVMETHOD(device_probe, granttable_probe), DEVMETHOD(device_attach, granttable_attach), DEVMETHOD_END }; DEFINE_CLASS_0(granttable, granttable_driver, granttable_methods, 0); devclass_t granttable_devclass; DRIVER_MODULE_ORDERED(granttable, xenpv, granttable_driver, granttable_devclass, NULL, NULL, SI_ORDER_FIRST); diff --git a/sys/dev/xen/netback/netback.c b/sys/dev/xen/netback/netback.c index fb5d73c9af29..4b4fdb92bf15 100644 --- a/sys/dev/xen/netback/netback.c +++ b/sys/dev/xen/netback/netback.c @@ -1,2520 +1,2520 @@ /*- * Copyright (c) 2009-2011 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Justin T. Gibbs (Spectra Logic Corporation) * Alan Somers (Spectra Logic Corporation) * John Suykerbuyk (Spectra Logic Corporation) */ #include __FBSDID("$FreeBSD$"); /** * \file netback.c * * \brief Device driver supporting the vending of network access * from this FreeBSD domain to other domains. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_sctp.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 700000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Compile-time Tunables --------------------------*/ /*---------------------------------- Macros ----------------------------------*/ /** * Custom malloc type for all driver allocations. */ static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); #define XNB_SG 1 /* netback driver supports feature-sg */ #define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */ #define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ #define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ #undef XNB_DEBUG #define XNB_DEBUG /* hardcode on during development */ #ifdef XNB_DEBUG #define DPRINTF(fmt, args...) \ printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while (0) #endif /* Default length for stack-allocated grant tables */ #define GNTTAB_LEN (64) /* Features supported by all backends. TSO and LRO can be negotiated */ #define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) /** * Two argument version of the standard macro. Second argument is a tentative * value of req_cons */ #define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ unsigned int req = (_r)->sring->req_prod - cons; \ unsigned int rsp = RING_SIZE(_r) - \ (cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) #define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) /** * Predefined array type of grant table copy descriptors. Used to pass around * statically allocated memory structures. */ typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; /*--------------------------- Forward Declarations ---------------------------*/ struct xnb_softc; struct xnb_pkt; static void xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) __printflike(3,4); static int xnb_shutdown(struct xnb_softc *xnb); static int create_netdev(device_t dev); static int xnb_detach(device_t dev); static int xnb_ifmedia_upd(struct ifnet *ifp); static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static void xnb_intr(void *arg); static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, const struct mbuf *mbufc, gnttab_copy_table gnttab); static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, struct ifnet *ifnet, gnttab_copy_table gnttab); static int xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, RING_IDX start); static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, int error); static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id); static void xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, int n_entries); static int xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, RING_IDX start, int space); static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, domid_t otherend_id); static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, int n_entries, netif_rx_back_ring_t *ring); static void xnb_stop(struct xnb_softc*); static int xnb_ioctl(struct ifnet*, u_long, caddr_t); static void xnb_start_locked(struct ifnet*); static void xnb_start(struct ifnet*); static void xnb_ifinit_locked(struct xnb_softc*); static void xnb_ifinit(void*); #ifdef XNB_DEBUG static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); #endif #if defined(INET) || defined(INET6) static void xnb_add_mbuf_cksum(struct mbuf *mbufc); #endif /*------------------------------ Data Structures -----------------------------*/ /** * Representation of a xennet packet. Simplified version of a packet as * stored in the Xen tx ring. Applicable to both RX and TX packets */ struct xnb_pkt{ /** * Array index of the first data-bearing (eg, not extra info) entry * for this packet */ RING_IDX car; /** * Array index of the second data-bearing entry for this packet. * Invalid if the packet has only one data-bearing entry. If the * packet has more than two data-bearing entries, then the second * through the last will be sequential modulo the ring size */ RING_IDX cdr; /** * Optional extra info. Only valid if flags contains * NETTXF_extra_info. Note that extra.type will always be * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* */ netif_extra_info_t extra; /** Size of entire packet in bytes. */ uint16_t size; /** The size of the first entry's data in bytes */ uint16_t car_size; /** * Either NETTXF_ or NETRXF_ flags. Note that the flag values are * not the same for TX and RX packets */ uint16_t flags; /** * The number of valid data-bearing entries (either netif_tx_request's * or netif_rx_response's) in the packet. If this is 0, it means the * entire packet is invalid. */ uint16_t list_len; /** There was an error processing the packet */ uint8_t error; }; /** xnb_pkt method: initialize it */ static inline void xnb_pkt_initialize(struct xnb_pkt *pxnb) { bzero(pxnb, sizeof(*pxnb)); } /** xnb_pkt method: mark the packet as valid */ static inline void xnb_pkt_validate(struct xnb_pkt *pxnb) { pxnb->error = 0; }; /** xnb_pkt method: mark the packet as invalid */ static inline void xnb_pkt_invalidate(struct xnb_pkt *pxnb) { pxnb->error = 1; }; /** xnb_pkt method: Check whether the packet is valid */ static inline int xnb_pkt_is_valid(const struct xnb_pkt *pxnb) { return (! pxnb->error); } #ifdef XNB_DEBUG /** xnb_pkt method: print the packet's contents in human-readable format*/ static void __unused xnb_dump_pkt(const struct xnb_pkt *pkt) { if (pkt == NULL) { DPRINTF("Was passed a null pointer.\n"); return; } DPRINTF("pkt address= %p\n", pkt); DPRINTF("pkt->size=%d\n", pkt->size); DPRINTF("pkt->car_size=%d\n", pkt->car_size); DPRINTF("pkt->flags=0x%04x\n", pkt->flags); DPRINTF("pkt->list_len=%d\n", pkt->list_len); /* DPRINTF("pkt->extra"); TODO */ DPRINTF("pkt->car=%d\n", pkt->car); DPRINTF("pkt->cdr=%d\n", pkt->cdr); DPRINTF("pkt->error=%d\n", pkt->error); } #endif /* XNB_DEBUG */ static void xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) { if (txreq != NULL) { DPRINTF("netif_tx_request index =%u\n", idx); DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); DPRINTF("netif_tx_request.id =%hu\n", txreq->id); DPRINTF("netif_tx_request.size =%hu\n", txreq->size); } } /** * \brief Configuration data for a shared memory request ring * used to communicate with the front-end client of this * this driver. */ struct xnb_ring_config { /** * Runtime structures for ring access. Unfortunately, TX and RX rings * use different data structures, and that cannot be changed since it * is part of the interdomain protocol. */ union{ netif_rx_back_ring_t rx_ring; netif_tx_back_ring_t tx_ring; } back_ring; /** * The device bus address returned by the hypervisor when * mapping the ring and required to unmap it when a connection * is torn down. */ uint64_t bus_addr; /** The pseudo-physical address where ring memory is mapped.*/ uint64_t gnt_addr; /** KVA address where ring memory is mapped. */ vm_offset_t va; /** * Grant table handles, one per-ring page, returned by the * hyperpervisor upon mapping of the ring and required to * unmap it when a connection is torn down. */ grant_handle_t handle; /** The number of ring pages mapped for the current connection. */ unsigned ring_pages; /** * The grant references, one per-ring page, supplied by the * front-end, allowing us to reference the ring pages in the * front-end's domain and to map these pages into our own domain. */ grant_ref_t ring_ref; }; /** * Per-instance connection state flags. */ typedef enum { /** Communication with the front-end has been established. */ XNBF_RING_CONNECTED = 0x01, /** * Front-end requests exist in the ring and are waiting for * xnb_xen_req objects to free up. */ XNBF_RESOURCE_SHORTAGE = 0x02, /** Connection teardown has started. */ XNBF_SHUTDOWN = 0x04, /** A thread is already performing shutdown processing. */ XNBF_IN_SHUTDOWN = 0x08 } xnb_flag_t; /** * Types of rings. Used for array indices and to identify a ring's control * data structure type */ typedef enum{ XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ XNB_NUM_RING_TYPES } xnb_ring_type_t; /** * Per-instance configuration data. */ struct xnb_softc { /** NewBus device corresponding to this instance. */ device_t dev; /* Media related fields */ /** Generic network media state */ struct ifmedia sc_media; /** Media carrier info */ struct ifnet *xnb_ifp; /** Our own private carrier state */ unsigned carrier; /** Device MAC Address */ uint8_t mac[ETHER_ADDR_LEN]; /* Xen related fields */ /** * \brief The netif protocol abi in effect. * * There are situations where the back and front ends can * have a different, native abi (e.g. intel x86_64 and * 32bit x86 domains on the same machine). The back-end * always accomodates the front-end's native abi. That * value is pulled from the XenStore and recorded here. */ int abi; /** * Name of the bridge to which this VIF is connected, if any * This field is dynamically allocated by xenbus and must be free()ed * when no longer needed */ char *bridge; /** The interrupt driven even channel used to signal ring events. */ evtchn_port_t evtchn; /** Xen device handle.*/ long handle; /** Handle to the communication ring event channel. */ xen_intr_handle_t xen_intr_handle; /** * \brief Cached value of the front-end's domain id. * * This value is used at once for each mapped page in * a transaction. We cache it to avoid incuring the * cost of an ivar access every time this is needed. */ domid_t otherend_id; /** * Undocumented frontend feature. Has something to do with * scatter/gather IO */ uint8_t can_sg; /** Undocumented frontend feature */ uint8_t gso; /** Undocumented frontend feature */ uint8_t gso_prefix; /** Can checksum TCP/UDP over IPv4 */ uint8_t ip_csum; /* Implementation related fields */ /** * Preallocated grant table copy descriptor for RX operations. * Access must be protected by rx_lock */ gnttab_copy_table rx_gnttab; /** * Preallocated grant table copy descriptor for TX operations. * Access must be protected by tx_lock */ gnttab_copy_table tx_gnttab; /** * Resource representing allocated physical address space * associated with our per-instance kva region. */ struct resource *pseudo_phys_res; /** Resource id for allocated physical address space. */ int pseudo_phys_res_id; /** Ring mapping and interrupt configuration data. */ struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; /** * Global pool of kva used for mapping remote domain ring * and I/O transaction data. */ vm_offset_t kva; /** Psuedo-physical address corresponding to kva. */ uint64_t gnt_base_addr; /** Various configuration and state bit flags. */ xnb_flag_t flags; /** Mutex protecting per-instance data in the receive path. */ struct mtx rx_lock; /** Mutex protecting per-instance data in the softc structure. */ struct mtx sc_lock; /** Mutex protecting per-instance data in the transmit path. */ struct mtx tx_lock; /** The size of the global kva pool. */ int kva_size; /** Name of the interface */ char if_name[IFNAMSIZ]; }; /*---------------------------- Debugging functions ---------------------------*/ #ifdef XNB_DEBUG static void __unused xnb_dump_gnttab_copy(const struct gnttab_copy *entry) { if (entry == NULL) { printf("NULL grant table pointer\n"); return; } if (entry->flags & GNTCOPY_dest_gref) printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); else printf("gnttab dest gmfn=\t%"PRI_xen_pfn"\n", entry->dest.u.gmfn); printf("gnttab dest offset=\t%hu\n", entry->dest.offset); printf("gnttab dest domid=\t%hu\n", entry->dest.domid); if (entry->flags & GNTCOPY_source_gref) printf("gnttab source ref=\t%u\n", entry->source.u.ref); else printf("gnttab source gmfn=\t%"PRI_xen_pfn"\n", entry->source.u.gmfn); printf("gnttab source offset=\t%hu\n", entry->source.offset); printf("gnttab source domid=\t%hu\n", entry->source.domid); printf("gnttab len=\t%hu\n", entry->len); printf("gnttab flags=\t%hu\n", entry->flags); printf("gnttab status=\t%hd\n", entry->status); } static int xnb_dump_rings(SYSCTL_HANDLER_ARGS) { static char results[720]; struct xnb_softc const* xnb = (struct xnb_softc*)arg1; netif_rx_back_ring_t const* rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; netif_tx_back_ring_t const* txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; /* empty the result strings */ results[0] = 0; if ( !txb || !txb->sring || !rxb || !rxb->sring ) return (SYSCTL_OUT(req, results, strnlen(results, 720))); snprintf(results, 720, "\n\t%35s %18s\n" /* TX, RX */ "\t%16s %18d %18d\n" /* req_cons */ "\t%16s %18d %18d\n" /* nr_ents */ "\t%16s %18d %18d\n" /* rsp_prod_pvt */ "\t%16s %18p %18p\n" /* sring */ "\t%16s %18d %18d\n" /* req_prod */ "\t%16s %18d %18d\n" /* req_event */ "\t%16s %18d %18d\n" /* rsp_prod */ "\t%16s %18d %18d\n", /* rsp_event */ "TX", "RX", "req_cons", txb->req_cons, rxb->req_cons, "nr_ents", txb->nr_ents, rxb->nr_ents, "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, "sring", txb->sring, rxb->sring, "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, "sring->req_event", txb->sring->req_event, rxb->sring->req_event, "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); return (SYSCTL_OUT(req, results, strnlen(results, 720))); } static void __unused xnb_dump_mbuf(const struct mbuf *m) { int len; uint8_t *d; if (m == NULL) return; printf("xnb_dump_mbuf:\n"); if (m->m_flags & M_PKTHDR) { printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " "tso_segsz=%5hd\n", m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); printf(" rcvif=%16p, len=%19d\n", m->m_pkthdr.rcvif, m->m_pkthdr.len); } printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", m->m_next, m->m_nextpkt, m->m_data); printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", m->m_len, m->m_flags, m->m_type); len = m->m_len; d = mtod(m, uint8_t*); while (len > 0) { int i; printf(" "); for (i = 0; (i < 16) && (len > 0); i++, len--) { printf("%02hhx ", *(d++)); } printf("\n"); } } #endif /* XNB_DEBUG */ /*------------------------ Inter-Domain Communication ------------------------*/ /** * Free dynamically allocated KVA or pseudo-physical address allocations. * * \param xnb Per-instance xnb configuration structure. */ static void xnb_free_communication_mem(struct xnb_softc *xnb) { if (xnb->kva != 0) { if (xnb->pseudo_phys_res != NULL) { xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, xnb->pseudo_phys_res); xnb->pseudo_phys_res = NULL; } } xnb->kva = 0; xnb->gnt_base_addr = 0; } /** * Cleanup all inter-domain communication mechanisms. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_disconnect(struct xnb_softc *xnb) { struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; int error; int i; if (xnb->xen_intr_handle != NULL) xen_intr_unbind(&xnb->xen_intr_handle); /* * We may still have another thread currently processing requests. We * must acquire the rx and tx locks to make sure those threads are done, * but we can release those locks as soon as we acquire them, because no * more interrupts will be arriving. */ mtx_lock(&xnb->tx_lock); mtx_unlock(&xnb->tx_lock); mtx_lock(&xnb->rx_lock); mtx_unlock(&xnb->rx_lock); /* Free malloc'd softc member variables */ if (xnb->bridge != NULL) { free(xnb->bridge, M_XENSTORE); xnb->bridge = NULL; } /* All request processing has stopped, so unmap the rings */ for (i=0; i < XNB_NUM_RING_TYPES; i++) { gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; gnts[i].handle = xnb->ring_configs[i].handle; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, XNB_NUM_RING_TYPES); KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); xnb_free_communication_mem(xnb); /* * Zero the ring config structs because the pointers, handles, and * grant refs contained therein are no longer valid. */ bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], sizeof(struct xnb_ring_config)); bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], sizeof(struct xnb_ring_config)); xnb->flags &= ~XNBF_RING_CONNECTED; return (0); } /** * Map a single shared memory ring into domain local address space and * initialize its control structure * * \param xnb Per-instance xnb configuration structure * \param ring_type Array index of this ring in the xnb's array of rings * \return An errno */ static int xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) { struct gnttab_map_grant_ref gnt; struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; int error; /* TX ring type = 0, RX =1 */ ring->va = xnb->kva + ring_type * PAGE_SIZE; ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; gnt.host_addr = ring->gnt_addr; gnt.flags = GNTMAP_host_map; gnt.ref = ring->ring_ref; gnt.dom = xnb->otherend_id; error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); if (error != 0) panic("netback: Ring page grant table op failed (%d)", error); if (gnt.status != 0) { ring->va = 0; error = EACCES; xenbus_dev_fatal(xnb->dev, error, "Ring shared page mapping failed. " "Status %d.", gnt.status); } else { ring->handle = gnt.handle; ring->bus_addr = gnt.dev_bus_addr; if (ring_type == XNB_RING_TYPE_TX) { BACK_RING_INIT(&ring->back_ring.tx_ring, (netif_tx_sring_t*)ring->va, ring->ring_pages * PAGE_SIZE); } else if (ring_type == XNB_RING_TYPE_RX) { BACK_RING_INIT(&ring->back_ring.rx_ring, (netif_rx_sring_t*)ring->va, ring->ring_pages * PAGE_SIZE); } else { xenbus_dev_fatal(xnb->dev, error, "Unknown ring type %d", ring_type); } } return error; } /** * Setup the shared memory rings and bind an interrupt to the event channel * used to notify us of ring changes. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_connect_comms(struct xnb_softc *xnb) { int error; xnb_ring_type_t i; if ((xnb->flags & XNBF_RING_CONNECTED) != 0) return (0); /* * Kva for our rings are at the tail of the region of kva allocated * by xnb_alloc_communication_mem(). */ for (i=0; i < XNB_NUM_RING_TYPES; i++) { error = xnb_connect_ring(xnb, i); if (error != 0) return error; } xnb->flags |= XNBF_RING_CONNECTED; error = xen_intr_bind_remote_port(xnb->dev, xnb->otherend_id, xnb->evtchn, /*filter*/NULL, xnb_intr, /*arg*/xnb, INTR_TYPE_BIO | INTR_MPSAFE, &xnb->xen_intr_handle); if (error != 0) { (void)xnb_disconnect(xnb); xenbus_dev_fatal(xnb->dev, error, "binding event channel"); return (error); } DPRINTF("rings connected!\n"); return (0); } /** * Size KVA and pseudo-physical address allocations based on negotiated * values for the size and number of I/O requests, and the size of our * communication ring. * * \param xnb Per-instance xnb configuration structure. * * These address spaces are used to dynamically map pages in the * front-end's domain into our own. */ static int xnb_alloc_communication_mem(struct xnb_softc *xnb) { xnb_ring_type_t i; xnb->kva_size = 0; for (i=0; i < XNB_NUM_RING_TYPES; i++) { xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; } /* * Reserve a range of pseudo physical memory that we can map * into kva. These pages will only be backed by machine * pages ("real memory") during the lifetime of front-end requests * via grant table operations. We will map the netif tx and rx rings * into this space. */ xnb->pseudo_phys_res_id = 0; xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, xnb->kva_size); if (xnb->pseudo_phys_res == NULL) { xnb->kva = 0; return (ENOMEM); } xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); return (0); } /** * Collect information from the XenStore related to our device and its frontend * * \param xnb Per-instance xnb configuration structure. */ static int xnb_collect_xenstore_info(struct xnb_softc *xnb) { /** * \todo Linux collects the following info. We should collect most * of this, too: * "feature-rx-notify" */ const char *otherend_path; const char *our_path; int err; unsigned int rx_copy, bridge_len; uint8_t no_csum_offload; otherend_path = xenbus_get_otherend_path(xnb->dev); our_path = xenbus_get_node(xnb->dev); /* Collect the critical communication parameters */ err = xs_gather(XST_NIL, otherend_path, "tx-ring-ref", "%l" PRIu32, &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, "rx-ring-ref", "%l" PRIu32, &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, "event-channel", "%" PRIu32, &xnb->evtchn, NULL); if (err != 0) { xenbus_dev_fatal(xnb->dev, err, "Unable to retrieve ring information from " "frontend %s. Unable to connect.", otherend_path); return (err); } /* Collect the handle from xenstore */ err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); if (err != 0) { xenbus_dev_fatal(xnb->dev, err, "Error reading handle from frontend %s. " "Unable to connect.", otherend_path); } /* * Collect the bridgename, if any. We do not need bridge_len; we just * throw it away */ err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, (void**)&xnb->bridge); if (err != 0) xnb->bridge = NULL; /* * Does the frontend request that we use rx copy? If not, return an * error because this driver only supports rx copy. */ err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, "%" PRIu32, &rx_copy); if (err == ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", otherend_path); return err; } /** * \todo: figure out the exact meaning of this feature, and when * the frontend will set it to true. It should be set to true * at some point */ /* if (!rx_copy)*/ /* return EOPNOTSUPP;*/ /** \todo Collect the rx notify feature */ /* Collect the feature-sg. */ if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, "%hhu", &xnb->can_sg) < 0) xnb->can_sg = 0; /* Collect remaining frontend features */ if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, "%hhu", &xnb->gso) < 0) xnb->gso = 0; if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, "%hhu", &xnb->gso_prefix) < 0) xnb->gso_prefix = 0; if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, "%hhu", &no_csum_offload) < 0) no_csum_offload = 0; xnb->ip_csum = (no_csum_offload == 0); return (0); } /** * Supply information about the physical device to the frontend * via XenBus. * * \param xnb Per-instance xnb configuration structure. */ static int xnb_publish_backend_info(struct xnb_softc *xnb) { struct xs_transaction xst; const char *our_path; int error; our_path = xenbus_get_node(xnb->dev); do { error = xs_transaction_start(&xst); if (error != 0) { xenbus_dev_fatal(xnb->dev, error, "Error publishing backend info " "(start transaction)"); break; } error = xs_printf(xst, our_path, "feature-sg", "%d", XNB_SG); if (error != 0) break; error = xs_printf(xst, our_path, "feature-gso-tcpv4", "%d", XNB_GSO_TCPV4); if (error != 0) break; error = xs_printf(xst, our_path, "feature-rx-copy", "%d", XNB_RX_COPY); if (error != 0) break; error = xs_printf(xst, our_path, "feature-rx-flip", "%d", XNB_RX_FLIP); if (error != 0) break; error = xs_transaction_end(xst, 0); if (error != 0 && error != EAGAIN) { xenbus_dev_fatal(xnb->dev, error, "ending transaction"); break; } } while (error == EAGAIN); return (error); } /** * Connect to our netfront peer now that it has completed publishing * its configuration into the XenStore. * * \param xnb Per-instance xnb configuration structure. */ static void xnb_connect(struct xnb_softc *xnb) { int error; if (xenbus_get_state(xnb->dev) == XenbusStateConnected) return; if (xnb_collect_xenstore_info(xnb) != 0) return; xnb->flags &= ~XNBF_SHUTDOWN; /* Read front end configuration. */ /* Allocate resources whose size depends on front-end configuration. */ error = xnb_alloc_communication_mem(xnb); if (error != 0) { xenbus_dev_fatal(xnb->dev, error, "Unable to allocate communication memory"); return; } /* * Connect communication channel. */ error = xnb_connect_comms(xnb); if (error != 0) { /* Specific errors are reported by xnb_connect_comms(). */ return; } xnb->carrier = 1; /* Ready for I/O. */ xenbus_set_state(xnb->dev, XenbusStateConnected); } /*-------------------------- Device Teardown Support -------------------------*/ /** * Perform device shutdown functions. * * \param xnb Per-instance xnb configuration structure. * * Mark this instance as shutting down, wait for any active requests * to drain, disconnect from the front-end, and notify any waiters (e.g. * a thread invoking our detach method) that detach can now proceed. */ static int xnb_shutdown(struct xnb_softc *xnb) { /* * Due to the need to drop our mutex during some * xenbus operations, it is possible for two threads * to attempt to close out shutdown processing at * the same time. Tell the caller that hits this * race to try back later. */ if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) return (EAGAIN); xnb->flags |= XNBF_SHUTDOWN; xnb->flags |= XNBF_IN_SHUTDOWN; mtx_unlock(&xnb->sc_lock); /* Free the network interface */ xnb->carrier = 0; if (xnb->xnb_ifp != NULL) { ether_ifdetach(xnb->xnb_ifp); if_free(xnb->xnb_ifp); xnb->xnb_ifp = NULL; } mtx_lock(&xnb->sc_lock); xnb_disconnect(xnb); mtx_unlock(&xnb->sc_lock); if (xenbus_get_state(xnb->dev) < XenbusStateClosing) xenbus_set_state(xnb->dev, XenbusStateClosing); mtx_lock(&xnb->sc_lock); xnb->flags &= ~XNBF_IN_SHUTDOWN; /* Indicate to xnb_detach() that is it safe to proceed. */ wakeup(xnb); return (0); } /** * Report an attach time error to the console and Xen, and cleanup * this instance by forcing immediate detach processing. * * \param xnb Per-instance xnb configuration structure. * \param err Errno describing the error. * \param fmt Printf style format and arguments */ static void xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) { va_list ap; va_list ap_hotplug; va_start(ap, fmt); va_copy(ap_hotplug, ap); xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-error", fmt, ap_hotplug); va_end(ap_hotplug); xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-status", "error"); xenbus_dev_vfatal(xnb->dev, err, fmt, ap); va_end(ap); xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "online", "0"); xnb_detach(xnb->dev); } /*---------------------------- NewBus Entrypoints ----------------------------*/ /** * Inspect a XenBus device and claim it if is of the appropriate type. * * \param dev NewBus device object representing a candidate XenBus device. * * \return 0 for success, errno codes for failure. */ static int xnb_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vif")) { DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), devclass_get_name(device_get_devclass(dev))); device_set_desc(dev, "Backend Virtual Network Device"); device_quiet(dev); return (0); } return (ENXIO); } /** * Setup sysctl variables to control various Network Back parameters. * * \param xnb Xen Net Back softc. * */ static void xnb_setup_sysctl(struct xnb_softc *xnb) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; sysctl_ctx = device_get_sysctl_ctx(xnb->dev); if (sysctl_ctx == NULL) return; sysctl_tree = device_get_sysctl_tree(xnb->dev); if (sysctl_tree == NULL) return; #ifdef XNB_DEBUG SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "unit_test_results", CTLTYPE_STRING | CTLFLAG_RD, xnb, 0, xnb_unit_test_main, "A", "Results of builtin unit tests"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "dump_rings", CTLTYPE_STRING | CTLFLAG_RD, xnb, 0, xnb_dump_rings, "A", "Xennet Back Rings"); #endif /* XNB_DEBUG */ } /** * Create a network device. * @param handle device handle */ int create_netdev(device_t dev) { struct ifnet *ifp; struct xnb_softc *xnb; int err = 0; uint32_t handle; xnb = device_get_softc(dev); mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); xnb->dev = dev; ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); /* * Set the MAC address to a dummy value (00:00:00:00:00), * if the MAC address of the host-facing interface is set * to the same as the guest-facing one (the value found in * xenstore), the bridge would stop delivering packets to * us because it would see that the destination address of * the packet is the same as the interface, and so the bridge * would expect the packet has already been delivered locally * (and just drop it). */ bzero(&xnb->mac[0], sizeof(xnb->mac)); /* The interface will be named using the following nomenclature: * * xnb. * * Where handle is the oder of the interface referred to the guest. */ err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, "%" PRIu32, &handle); if (err != 0) return (err); snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, xenbus_get_otherend_id(dev), handle); if (err == 0) { /* Set up ifnet structure */ ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); ifp->if_softc = xnb; if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xnb_ioctl; ifp->if_output = ether_output; ifp->if_start = xnb_start; #ifdef notyet ifp->if_watchdog = xnb_watchdog; #endif ifp->if_init = xnb_ifinit; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; ifp->if_hwassist = XNB_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_capenable = IFCAP_HWCSUM; ether_ifattach(ifp, xnb->mac); xnb->carrier = 0; } return err; } /** * Attach to a XenBus device that has been claimed by our probe routine. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_attach(device_t dev) { struct xnb_softc *xnb; int error; xnb_ring_type_t i; error = create_netdev(dev); if (error != 0) { xenbus_dev_fatal(dev, error, "creating netdev"); return (error); } DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); /* * Basic initialization. * After this block it is safe to call xnb_detach() * to clean up any allocated data for this instance. */ xnb = device_get_softc(dev); xnb->otherend_id = xenbus_get_otherend_id(dev); for (i=0; i < XNB_NUM_RING_TYPES; i++) { xnb->ring_configs[i].ring_pages = 1; } /* * Setup sysctl variables. */ xnb_setup_sysctl(xnb); /* Update hot-plug status to satisfy xend. */ error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "hotplug-status", "connected"); if (error != 0) { xnb_attach_failed(xnb, error, "writing %s/hotplug-status", xenbus_get_node(xnb->dev)); return (error); } if ((error = xnb_publish_backend_info(xnb)) != 0) { /* * If we can't publish our data, we cannot participate * in this connection, and waiting for a front-end state * change will not help the situation. */ xnb_attach_failed(xnb, error, "Publishing backend status for %s", xenbus_get_node(xnb->dev)); return error; } /* Tell the front end that we are ready to connect. */ xenbus_set_state(dev, XenbusStateInitWait); return (0); } /** * Detach from a net back device instance. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. * * \note A net back device may be detached at any time in its life-cycle, * including part way through the attach process. For this reason, * initialization order and the intialization state checks in this * routine must be carefully coupled so that attach time failures * are gracefully handled. */ static int xnb_detach(device_t dev) { struct xnb_softc *xnb; DPRINTF("\n"); xnb = device_get_softc(dev); mtx_lock(&xnb->sc_lock); while (xnb_shutdown(xnb) == EAGAIN) { msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, "xnb_shutdown", 0); } mtx_unlock(&xnb->sc_lock); DPRINTF("\n"); mtx_destroy(&xnb->tx_lock); mtx_destroy(&xnb->rx_lock); mtx_destroy(&xnb->sc_lock); return (0); } /** * Prepare this net back device for suspension of this VM. * * \param dev NewBus device object representing this Xen net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_suspend(device_t dev) { return (0); } /** * Perform any processing required to recover from a suspended state. * * \param dev NewBus device object representing this Xen Net Back instance. * * \return 0 for success, errno codes for failure. */ static int xnb_resume(device_t dev) { return (0); } /** * Handle state changes expressed via the XenStore by our front-end peer. * * \param dev NewBus device object representing this Xen * Net Back instance. * \param frontend_state The new state of the front-end. * * \return 0 for success, errno codes for failure. */ static void xnb_frontend_changed(device_t dev, XenbusState frontend_state) { struct xnb_softc *xnb; xnb = device_get_softc(dev); DPRINTF("frontend_state=%s, xnb_state=%s\n", xenbus_strstate(frontend_state), xenbus_strstate(xenbus_get_state(xnb->dev))); switch (frontend_state) { case XenbusStateInitialising: break; case XenbusStateInitialised: case XenbusStateConnected: xnb_connect(xnb); break; case XenbusStateClosing: case XenbusStateClosed: mtx_lock(&xnb->sc_lock); xnb_shutdown(xnb); mtx_unlock(&xnb->sc_lock); if (frontend_state == XenbusStateClosed) xenbus_set_state(xnb->dev, XenbusStateClosed); break; default: xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", frontend_state); break; } } /*---------------------------- Request Processing ----------------------------*/ /** * Interrupt handler bound to the shared ring's event channel. * Entry point for the xennet transmit path in netback * Transfers packets from the Xen ring to the host's generic networking stack * * \param arg Callback argument registerd during event channel * binding - the xnb_softc for this instance. */ static void xnb_intr(void *arg) { struct xnb_softc *xnb; struct ifnet *ifp; netif_tx_back_ring_t *txb; RING_IDX req_prod_local; xnb = (struct xnb_softc *)arg; ifp = xnb->xnb_ifp; txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; mtx_lock(&xnb->tx_lock); do { int notify; req_prod_local = txb->sring->req_prod; xen_rmb(); for (;;) { struct mbuf *mbufc; int err; err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, xnb->tx_gnttab); if (err || (mbufc == NULL)) break; /* Send the packet to the generic network stack */ (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); if (notify != 0) xen_intr_signal(xnb->xen_intr_handle); txb->sring->req_event = txb->req_cons + 1; xen_mb(); } while (txb->sring->req_prod != req_prod_local) ; mtx_unlock(&xnb->tx_lock); xnb_start(ifp); } /** * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. * Will read exactly 0 or 1 packets from the ring; never a partial packet. * \param[out] pkt The returned packet. If there is an error building * the packet, pkt.list_len will be set to 0. * \param[in] tx_ring Pointer to the Ring that is the input to this function * \param[in] start The ring index of the first potential request * \return The number of requests consumed to build this packet */ static int xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, RING_IDX start) { /* * Outline: * 1) Initialize pkt * 2) Read the first request of the packet * 3) Read the extras * 4) Set cdr * 5) Loop on the remainder of the packet * 6) Finalize pkt (stuff like car_size and list_len) */ int idx = start; int discard = 0; /* whether to discard the packet */ int more_data = 0; /* there are more request past the last one */ uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ xnb_pkt_initialize(pkt); /* Read the first request */ if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); pkt->size = tx->size; pkt->flags = tx->flags & ~NETTXF_more_data; more_data = tx->flags & NETTXF_more_data; pkt->list_len++; pkt->car = idx; idx++; } /* Read the extra info */ if ((pkt->flags & NETTXF_extra_info) && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_extra_info_t *ext = (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); pkt->extra.type = ext->type; switch (pkt->extra.type) { case XEN_NETIF_EXTRA_TYPE_GSO: pkt->extra.u.gso = ext->u.gso; break; default: /* * The reference Linux netfront driver will * never set any other extra.type. So we don't * know what to do with it. Let's print an * error, then consume and discard the packet */ printf("xnb(%s:%d): Unknown extra info type %d." " Discarding packet\n", __func__, __LINE__, pkt->extra.type); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); discard = 1; break; } pkt->extra.flags = ext->flags; if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { /* * The reference linux netfront driver never sets this * flag (nor does any other known netfront). So we * will discard the packet. */ printf("xnb(%s:%d): Request sets " "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " "that\n", __func__, __LINE__); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); discard = 1; } idx++; } /* Set cdr. If there is not more data, cdr is invalid */ pkt->cdr = idx; /* Loop on remainder of packet */ while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); pkt->list_len++; cdr_size += tx->size; if (tx->flags & ~NETTXF_more_data) { /* There should be no other flags set at this point */ printf("xnb(%s:%d): Request sets unknown flags %d " "after the 1st request in the packet.\n", __func__, __LINE__, tx->flags); xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); } more_data = tx->flags & NETTXF_more_data; idx++; } /* Finalize packet */ if (more_data != 0) { /* The ring ran out of requests before finishing the packet */ xnb_pkt_invalidate(pkt); idx = start; /* tell caller that we consumed no requests */ } else { /* Calculate car_size */ pkt->car_size = pkt->size - cdr_size; } if (discard != 0) { xnb_pkt_invalidate(pkt); } return idx - start; } /** * Respond to all the requests that constituted pkt. Builds the responses and * writes them to the ring, but doesn't push them to the shared ring. * \param[in] pkt the packet that needs a response * \param[in] error true if there was an error handling the packet, such * as in the hypervisor copy op or mbuf allocation * \param[out] ring Responses go here */ static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, int error) { /* * Outline: * 1) Respond to the first request * 2) Respond to the extra info reques * Loop through every remaining request in the packet, generating * responses that copy those requests' ids and sets the status * appropriately. */ netif_tx_request_t *tx; netif_tx_response_t *rsp; int i; uint16_t status; status = (xnb_pkt_is_valid(pkt) == 0) || error ? NETIF_RSP_ERROR : NETIF_RSP_OKAY; KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), ("Cannot respond to ring requests out of order")); if (pkt->list_len >= 1) { uint16_t id; tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); id = tx->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = status; ring->rsp_prod_pvt++; if (pkt->flags & NETRXF_extra_info) { rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->status = NETIF_RSP_NULL; ring->rsp_prod_pvt++; } } for (i=0; i < pkt->list_len - 1; i++) { uint16_t id; tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); id = tx->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = status; ring->rsp_prod_pvt++; } } /** * Create an mbuf chain to represent a packet. Initializes all of the headers * in the mbuf chain, but does not copy the data. The returned chain must be * free()'d when no longer needed * \param[in] pkt A packet to model the mbuf chain after * \return A newly allocated mbuf chain, possibly with clusters attached. * NULL on failure */ static struct mbuf* xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) { /** * \todo consider using a memory pool for mbufs instead of * reallocating them for every packet */ /** \todo handle extra data */ struct mbuf *m; m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); if (m != NULL) { m->m_pkthdr.rcvif = ifp; if (pkt->flags & NETTXF_data_validated) { /* * We lie to the host OS and always tell it that the * checksums are ok, because the packet is unlikely to * get corrupted going across domains. */ m->m_pkthdr.csum_flags = ( CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR ); m->m_pkthdr.csum_data = 0xffff; } } return m; } /** * Build a gnttab_copy table that can be used to copy data from a pkt * to an mbufc. Does not actually perform the copy. Always uses gref's on * the packet side. * \param[in] pkt pkt's associated requests form the src for * the copy operation * \param[in] mbufc mbufc's storage forms the dest for the copy operation * \param[out] gnttab Storage for the returned grant table * \param[in] txb Pointer to the backend ring structure * \param[in] otherend_id The domain ID of the other end of the copy * \return The number of gnttab entries filled */ static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, domid_t otherend_id) { struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ int gnt_idx = 0; /* index into grant table */ RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ int r_ofs = 0; /* offset of next data within tx request's data area */ int m_ofs = 0; /* offset of next data within mbuf's data area */ /* size in bytes that still needs to be represented in the table */ uint16_t size_remaining = pkt->size; while (size_remaining > 0) { const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; const size_t req_size = r_idx == pkt->car ? pkt->car_size : txq->size; const size_t pkt_space = req_size - r_ofs; /* * space is the largest amount of data that can be copied in the * grant table's next entry */ const size_t space = MIN(pkt_space, mbuf_space); /* TODO: handle this error condition without panicking */ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); gnttab[gnt_idx].source.u.ref = txq->gref; gnttab[gnt_idx].source.domid = otherend_id; gnttab[gnt_idx].source.offset = txq->offset + r_ofs; gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].dest.offset = virt_to_offset( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].dest.domid = DOMID_SELF; gnttab[gnt_idx].len = space; gnttab[gnt_idx].flags = GNTCOPY_source_gref; gnt_idx++; r_ofs += space; m_ofs += space; size_remaining -= space; if (req_size - r_ofs <= 0) { /* Must move to the next tx request */ r_ofs = 0; r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; } if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { /* Must move to the next mbuf */ m_ofs = 0; mbuf = mbuf->m_next; } } return gnt_idx; } /** * Check the status of the grant copy operations, and update mbufs various * non-data fields to reflect the data present. * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of * the correct length, and data should already be present * \param[in] gnttab A grant table for a just completed copy op * \param[in] n_entries The number of valid entries in the grant table */ static void xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, int n_entries) { struct mbuf *mbuf = mbufc; int i; size_t total_size = 0; for (i = 0; i < n_entries; i++) { KASSERT(gnttab[i].status == GNTST_okay, ("Some gnttab_copy entry had error status %hd\n", gnttab[i].status)); mbuf->m_len += gnttab[i].len; total_size += gnttab[i].len; if (M_TRAILINGSPACE(mbuf) <= 0) { mbuf = mbuf->m_next; } } mbufc->m_pkthdr.len = total_size; #if defined(INET) || defined(INET6) xnb_add_mbuf_cksum(mbufc); #endif } /** * Dequeue at most one packet from the shared ring * \param[in,out] txb Netif tx ring. A packet will be removed from it, and * its private indices will be updated. But the indices * will not be pushed to the shared ring. * \param[in] ifnet Interface to which the packet will be sent * \param[in] otherend Domain ID of the other end of the ring * \param[out] mbufc The assembled mbuf chain, ready to send to the generic * networking stack * \param[in,out] gnttab Pointer to enough memory for a grant table. We make * this a function parameter so that we will take less * stack space. * \return An error code */ static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, struct ifnet *ifnet, gnttab_copy_table gnttab) { struct xnb_pkt pkt; /* number of tx requests consumed to build the last packet */ int num_consumed; int nr_ents; *mbufc = NULL; num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); if (num_consumed == 0) return 0; /* Nothing to receive */ /* update statistics independent of errors */ if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); /* * if we got here, then 1 or more requests was consumed, but the packet * is not necessarily valid. */ if (xnb_pkt_is_valid(&pkt) == 0) { /* got a garbage packet, respond and drop it */ xnb_txpkt2rsp(&pkt, txb, 1); txb->req_cons += num_consumed; DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", num_consumed); if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); return EINVAL; } *mbufc = xnb_pkt2mbufc(&pkt, ifnet); if (*mbufc == NULL) { /* * Couldn't allocate mbufs. Respond and drop the packet. Do * not consume the requests */ xnb_txpkt2rsp(&pkt, txb, 1); DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", num_consumed); if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); return ENOMEM; } nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); if (nr_ents > 0) { int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, gnttab, nr_ents); KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); xnb_update_mbufc(*mbufc, gnttab, nr_ents); } xnb_txpkt2rsp(&pkt, txb, 0); txb->req_cons += num_consumed; return 0; } /** * Create an xnb_pkt based on the contents of an mbuf chain. * \param[in] mbufc mbuf chain to transform into a packet * \param[out] pkt Storage for the newly generated xnb_pkt * \param[in] start The ring index of the first available slot in the rx * ring * \param[in] space The number of free slots in the rx ring * \retval 0 Success * \retval EINVAL mbufc was corrupt or not convertible into a pkt * \retval EAGAIN There was not enough space in the ring to queue the * packet */ static int xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, RING_IDX start, int space) { int retval = 0; if ((mbufc == NULL) || ( (mbufc->m_flags & M_PKTHDR) == 0) || (mbufc->m_pkthdr.len == 0)) { xnb_pkt_invalidate(pkt); retval = EINVAL; } else { int slots_required; xnb_pkt_validate(pkt); pkt->flags = 0; pkt->size = mbufc->m_pkthdr.len; pkt->car = start; pkt->car_size = mbufc->m_len; if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { pkt->flags |= NETRXF_extra_info; pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; pkt->extra.u.gso.pad = 0; pkt->extra.u.gso.features = 0; pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; pkt->extra.flags = 0; pkt->cdr = start + 2; } else { pkt->cdr = start + 1; } if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { pkt->flags |= (NETRXF_csum_blank | NETRXF_data_validated); } /* * Each ring response can have up to PAGE_SIZE of data. * Assume that we can defragment the mbuf chain efficiently * into responses so that each response but the last uses all * PAGE_SIZE bytes. */ - pkt->list_len = (pkt->size + PAGE_SIZE - 1) / PAGE_SIZE; + pkt->list_len = howmany(pkt->size, PAGE_SIZE); if (pkt->list_len > 1) { pkt->flags |= NETRXF_more_data; } slots_required = pkt->list_len + (pkt->flags & NETRXF_extra_info ? 1 : 0); if (slots_required > space) { xnb_pkt_invalidate(pkt); retval = EAGAIN; } } return retval; } /** * Build a gnttab_copy table that can be used to copy data from an mbuf chain * to the frontend's shared buffers. Does not actually perform the copy. * Always uses gref's on the other end's side. * \param[in] pkt pkt's associated responses form the dest for the copy * operatoin * \param[in] mbufc The source for the copy operation * \param[out] gnttab Storage for the returned grant table * \param[in] rxb Pointer to the backend ring structure * \param[in] otherend_id The domain ID of the other end of the copy * \return The number of gnttab entries filled */ static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, domid_t otherend_id) { const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ int gnt_idx = 0; /* index into grant table */ RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ int r_ofs = 0; /* offset of next data within rx request's data area */ int m_ofs = 0; /* offset of next data within mbuf's data area */ /* size in bytes that still needs to be represented in the table */ uint16_t size_remaining; size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; while (size_remaining > 0) { const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); const size_t mbuf_space = mbuf->m_len - m_ofs; /* Xen shared pages have an implied size of PAGE_SIZE */ const size_t req_size = PAGE_SIZE; const size_t pkt_space = req_size - r_ofs; /* * space is the largest amount of data that can be copied in the * grant table's next entry */ const size_t space = MIN(pkt_space, mbuf_space); /* TODO: handle this error condition without panicing */ KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); gnttab[gnt_idx].dest.u.ref = rxq->gref; gnttab[gnt_idx].dest.domid = otherend_id; gnttab[gnt_idx].dest.offset = r_ofs; gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].source.offset = virt_to_offset( mtod(mbuf, vm_offset_t) + m_ofs); gnttab[gnt_idx].source.domid = DOMID_SELF; gnttab[gnt_idx].len = space; gnttab[gnt_idx].flags = GNTCOPY_dest_gref; gnt_idx++; r_ofs += space; m_ofs += space; size_remaining -= space; if (req_size - r_ofs <= 0) { /* Must move to the next rx request */ r_ofs = 0; r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; } if (mbuf->m_len - m_ofs <= 0) { /* Must move to the next mbuf */ m_ofs = 0; mbuf = mbuf->m_next; } } return gnt_idx; } /** * Generates responses for all the requests that constituted pkt. Builds * responses and writes them to the ring, but doesn't push the shared ring * indices. * \param[in] pkt the packet that needs a response * \param[in] gnttab The grant copy table corresponding to this packet. * Used to determine how many rsp->netif_rx_response_t's to * generate. * \param[in] n_entries Number of relevant entries in the grant table * \param[out] ring Responses go here * \return The number of RX requests that were consumed to generate * the responses */ static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, int n_entries, netif_rx_back_ring_t *ring) { /* * This code makes the following assumptions: * * All entries in gnttab set GNTCOPY_dest_gref * * The entries in gnttab are grouped by their grefs: any two * entries with the same gref must be adjacent */ int error = 0; int gnt_idx, i; int n_responses = 0; grant_ref_t last_gref = GRANT_REF_INVALID; RING_IDX r_idx; KASSERT(gnttab != NULL, ("Received a null granttable copy")); /* * In the event of an error, we only need to send one response to the * netfront. In that case, we musn't write any data to the responses * after the one we send. So we must loop all the way through gnttab * looking for errors before we generate any responses * * Since we're looping through the grant table anyway, we'll count the * number of different gref's in it, which will tell us how many * responses to generate */ for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { int16_t status = gnttab[gnt_idx].status; if (status != GNTST_okay) { DPRINTF( "Got error %d for hypervisor gnttab_copy status\n", status); error = 1; break; } if (gnttab[gnt_idx].dest.u.ref != last_gref) { n_responses++; last_gref = gnttab[gnt_idx].dest.u.ref; } } if (error != 0) { uint16_t id; netif_rx_response_t *rsp; id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); rsp->id = id; rsp->status = NETIF_RSP_ERROR; n_responses = 1; } else { gnt_idx = 0; const int has_extra = pkt->flags & NETRXF_extra_info; if (has_extra != 0) n_responses++; for (i = 0; i < n_responses; i++) { netif_rx_request_t rxq; netif_rx_response_t *rsp; r_idx = ring->rsp_prod_pvt + i; /* * We copy the structure of rxq instead of making a * pointer because it shares the same memory as rsp. */ rxq = *(RING_GET_REQUEST(ring, r_idx)); rsp = RING_GET_RESPONSE(ring, r_idx); if (has_extra && (i == 1)) { netif_extra_info_t *ext = (netif_extra_info_t*)rsp; ext->type = XEN_NETIF_EXTRA_TYPE_GSO; ext->flags = 0; ext->u.gso.size = pkt->extra.u.gso.size; ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; ext->u.gso.pad = 0; ext->u.gso.features = 0; } else { rsp->id = rxq.id; rsp->status = GNTST_okay; rsp->offset = 0; rsp->flags = 0; if (i < pkt->list_len - 1) rsp->flags |= NETRXF_more_data; if ((i == 0) && has_extra) rsp->flags |= NETRXF_extra_info; if ((i == 0) && (pkt->flags & NETRXF_data_validated)) { rsp->flags |= NETRXF_data_validated; rsp->flags |= NETRXF_csum_blank; } rsp->status = 0; for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; gnt_idx++) { rsp->status += gnttab[gnt_idx].len; } } } } ring->req_cons += n_responses; ring->rsp_prod_pvt += n_responses; return n_responses; } #if defined(INET) || defined(INET6) /** * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf * in the chain must start with a struct ether_header. * * XXX This function will perform incorrectly on UDP packets that are split up * into multiple ethernet frames. */ static void xnb_add_mbuf_cksum(struct mbuf *mbufc) { struct ether_header *eh; struct ip *iph; uint16_t ether_type; eh = mtod(mbufc, struct ether_header*); ether_type = ntohs(eh->ether_type); if (ether_type != ETHERTYPE_IP) { /* Nothing to calculate */ return; } iph = (struct ip*)(eh + 1); if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { iph->ip_sum = 0; iph->ip_sum = in_cksum_hdr(iph); } switch (iph->ip_p) { case IPPROTO_TCP: if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); struct tcphdr *th = (struct tcphdr*)(iph + 1); th->th_sum = in_pseudo(iph->ip_src.s_addr, iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); th->th_sum = in_cksum_skip(mbufc, sizeof(struct ether_header) + ntohs(iph->ip_len), sizeof(struct ether_header) + (iph->ip_hl << 2)); } break; case IPPROTO_UDP: if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); struct udphdr *uh = (struct udphdr*)(iph + 1); uh->uh_sum = in_pseudo(iph->ip_src.s_addr, iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); uh->uh_sum = in_cksum_skip(mbufc, sizeof(struct ether_header) + ntohs(iph->ip_len), sizeof(struct ether_header) + (iph->ip_hl << 2)); } break; default: break; } } #endif /* INET || INET6 */ static void xnb_stop(struct xnb_softc *xnb) { struct ifnet *ifp; mtx_assert(&xnb->sc_lock, MA_OWNED); ifp = xnb->xnb_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); } static int xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct xnb_softc *xnb = ifp->if_softc; struct ifreq *ifr = (struct ifreq*) data; #ifdef INET struct ifaddr *ifa = (struct ifaddr*)data; #endif int error = 0; switch (cmd) { case SIOCSIFFLAGS: mtx_lock(&xnb->sc_lock); if (ifp->if_flags & IFF_UP) { xnb_ifinit_locked(xnb); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xnb_stop(xnb); } } /* * Note: netfront sets a variable named xn_if_flags * here, but that variable is never read */ mtx_unlock(&xnb->sc_lock); break; case SIOCSIFADDR: #ifdef INET mtx_lock(&xnb->sc_lock); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } arp_ifinit(ifp, ifa); mtx_unlock(&xnb->sc_lock); } else { mtx_unlock(&xnb->sc_lock); #endif error = ether_ioctl(ifp, cmd, data); #ifdef INET } #endif break; case SIOCSIFCAP: mtx_lock(&xnb->sc_lock); if (ifr->ifr_reqcap & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= XNB_CSUM_FEATURES; } else { ifp->if_capenable &= ~(IFCAP_TXCSUM); ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); } if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { ifp->if_capenable |= IFCAP_RXCSUM; } else { ifp->if_capenable &= ~(IFCAP_RXCSUM); } /* * TODO enable TSO4 and LRO once we no longer need * to calculate checksums in software */ #if 0 if (ifr->if_reqcap |= IFCAP_TSO4) { if (IFCAP_TXCSUM & ifp->if_capenable) { printf("xnb: Xen netif requires that " "TXCSUM be enabled in order " "to use TSO4\n"); error = EINVAL; } else { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } } else { ifp->if_capenable &= ~(IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TSO); } if (ifr->ifreqcap |= IFCAP_LRO) { ifp->if_capenable |= IFCAP_LRO; } else { ifp->if_capenable &= ~(IFCAP_LRO); } #endif mtx_unlock(&xnb->sc_lock); break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xnb_ifinit(xnb); break; case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void xnb_start_locked(struct ifnet *ifp) { netif_rx_back_ring_t *rxb; struct xnb_softc *xnb; struct mbuf *mbufc; RING_IDX req_prod_local; xnb = ifp->if_softc; rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; if (!xnb->carrier) return; do { int out_of_space = 0; int notify; req_prod_local = rxb->sring->req_prod; xen_rmb(); for (;;) { int error; IF_DEQUEUE(&ifp->if_snd, mbufc); if (mbufc == NULL) break; error = xnb_send(rxb, xnb->otherend_id, mbufc, xnb->rx_gnttab); switch (error) { case EAGAIN: /* * Insufficient space in the ring. * Requeue pkt and send when space is * available. */ IF_PREPEND(&ifp->if_snd, mbufc); /* * Perhaps the frontend missed an IRQ * and went to sleep. Notify it to wake * it up. */ out_of_space = 1; break; case EINVAL: /* OS gave a corrupt packet. Drop it.*/ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* FALLTHROUGH */ default: /* Send succeeded, or packet had error. * Free the packet */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (mbufc) m_freem(mbufc); break; } if (out_of_space != 0) break; } RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); if ((notify != 0) || (out_of_space != 0)) xen_intr_signal(xnb->xen_intr_handle); rxb->sring->req_event = req_prod_local + 1; xen_mb(); } while (rxb->sring->req_prod != req_prod_local) ; } /** * Sends one packet to the ring. Blocks until the packet is on the ring * \param[in] mbufc Contains one packet to send. Caller must free * \param[in,out] rxb The packet will be pushed onto this ring, but the * otherend will not be notified. * \param[in] otherend The domain ID of the other end of the connection * \retval EAGAIN The ring did not have enough space for the packet. * The ring has not been modified * \param[in,out] gnttab Pointer to enough memory for a grant table. We make * this a function parameter so that we will take less * stack space. * \retval EINVAL mbufc was corrupt or not convertible into a pkt */ static int xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, gnttab_copy_table gnttab) { struct xnb_pkt pkt; int error, n_entries, n_reqs; RING_IDX space; space = ring->sring->req_prod - ring->req_cons; error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); if (error != 0) return error; n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); if (n_entries != 0) { int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, gnttab, n_entries); KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); } n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); return 0; } static void xnb_start(struct ifnet *ifp) { struct xnb_softc *xnb; xnb = ifp->if_softc; mtx_lock(&xnb->rx_lock); xnb_start_locked(ifp); mtx_unlock(&xnb->rx_lock); } /* equivalent of network_open() in Linux */ static void xnb_ifinit_locked(struct xnb_softc *xnb) { struct ifnet *ifp; ifp = xnb->xnb_ifp; mtx_assert(&xnb->sc_lock, MA_OWNED); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; xnb_stop(xnb); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } static void xnb_ifinit(void *xsc) { struct xnb_softc *xnb = xsc; mtx_lock(&xnb->sc_lock); xnb_ifinit_locked(xnb); mtx_unlock(&xnb->sc_lock); } /** * Callback used by the generic networking code to tell us when our carrier * state has changed. Since we don't have a physical carrier, we don't care */ static int xnb_ifmedia_upd(struct ifnet *ifp) { return (0); } /** * Callback used by the generic networking code to ask us what our carrier * state is. Since we don't have a physical carrier, this is very simple */ static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /*---------------------------- NewBus Registration ---------------------------*/ static device_method_t xnb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xnb_probe), DEVMETHOD(device_attach, xnb_attach), DEVMETHOD(device_detach, xnb_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xnb_suspend), DEVMETHOD(device_resume, xnb_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), { 0, 0 } }; static driver_t xnb_driver = { "xnb", xnb_methods, sizeof(struct xnb_softc), }; devclass_t xnb_devclass; DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); /*-------------------------- Unit Tests -------------------------------------*/ #ifdef XNB_DEBUG #include "netback_unit_tests.c" #endif