diff --git a/sys/arm/freescale/vybrid/vf_ehci.c b/sys/arm/freescale/vybrid/vf_ehci.c
index a3477c743997..3a8b48008449 100644
--- a/sys/arm/freescale/vybrid/vf_ehci.c
+++ b/sys/arm/freescale/vybrid/vf_ehci.c
@@ -1,424 +1,424 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Ruslan Bukin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Vybrid Family Universal Serial Bus (USB) Controller
* Chapter 44-45, Vybrid Reference Manual, Rev. 5, 07/2013
*/
#include
#include "opt_bus.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "gpio_if.h"
#include "opt_platform.h"
#define ENUTMILEVEL3 (1 << 15)
#define ENUTMILEVEL2 (1 << 14)
#define GPIO_USB_PWR 134
#define USB_ID 0x000 /* Identification register */
#define USB_HWGENERAL 0x004 /* Hardware General */
#define USB_HWHOST 0x008 /* Host Hardware Parameters */
#define USB_HWDEVICE 0x00C /* Device Hardware Parameters */
#define USB_HWTXBUF 0x010 /* TX Buffer Hardware Parameters */
#define USB_HWRXBUF 0x014 /* RX Buffer Hardware Parameters */
#define USB_HCSPARAMS 0x104 /* Host Controller Structural Parameters */
#define USBPHY_PWD 0x00 /* PHY Power-Down Register */
#define USBPHY_PWD_SET 0x04 /* PHY Power-Down Register */
#define USBPHY_PWD_CLR 0x08 /* PHY Power-Down Register */
#define USBPHY_PWD_TOG 0x0C /* PHY Power-Down Register */
#define USBPHY_TX 0x10 /* PHY Transmitter Control Register */
#define USBPHY_RX 0x20 /* PHY Receiver Control Register */
#define USBPHY_RX_SET 0x24 /* PHY Receiver Control Register */
#define USBPHY_RX_CLR 0x28 /* PHY Receiver Control Register */
#define USBPHY_RX_TOG 0x2C /* PHY Receiver Control Register */
#define USBPHY_CTRL 0x30 /* PHY General Control Register */
#define USBPHY_CTRL_SET 0x34 /* PHY General Control Register */
#define USBPHY_CTRL_CLR 0x38 /* PHY General Control Register */
#define USBPHY_CTRL_TOG 0x3C /* PHY General Control Register */
#define USBPHY_STATUS 0x40 /* PHY Status Register */
#define USBPHY_DEBUG 0x50 /* PHY Debug Register */
#define USBPHY_DEBUG_SET 0x54 /* PHY Debug Register */
#define USBPHY_DEBUG_CLR 0x58 /* PHY Debug Register */
#define USBPHY_DEBUG_TOG 0x5C /* PHY Debug Register */
#define USBPHY_DEBUG0_STATUS 0x60 /* UTMI Debug Status Register 0 */
#define USBPHY_DEBUG1 0x70 /* UTMI Debug Status Register 1 */
#define USBPHY_DEBUG1_SET 0x74 /* UTMI Debug Status Register 1 */
#define USBPHY_DEBUG1_CLR 0x78 /* UTMI Debug Status Register 1 */
#define USBPHY_DEBUG1_TOG 0x7C /* UTMI Debug Status Register 1 */
#define USBPHY_VERSION 0x80 /* UTMI RTL Version */
#define USBPHY_IP 0x90 /* PHY IP Block Register */
#define USBPHY_IP_SET 0x94 /* PHY IP Block Register */
#define USBPHY_IP_CLR 0x98 /* PHY IP Block Register */
#define USBPHY_IP_TOG 0x9C /* PHY IP Block Register */
#define USBPHY_CTRL_SFTRST (1U << 31)
#define USBPHY_CTRL_CLKGATE (1 << 30)
#define USBPHY_DEBUG_CLKGATE (1 << 30)
#define PHY_READ4(_sc, _reg) \
bus_space_read_4(_sc->bst_phy, _sc->bsh_phy, _reg)
#define PHY_WRITE4(_sc, _reg, _val) \
bus_space_write_4(_sc->bst_phy, _sc->bsh_phy, _reg, _val)
#define USBC_READ4(_sc, _reg) \
bus_space_read_4(_sc->bst_usbc, _sc->bsh_usbc, _reg)
#define USBC_WRITE4(_sc, _reg, _val) \
bus_space_write_4(_sc->bst_usbc, _sc->bsh_usbc, _reg, _val)
/* Forward declarations */
static int vybrid_ehci_attach(device_t dev);
static int vybrid_ehci_detach(device_t dev);
static int vybrid_ehci_probe(device_t dev);
struct vybrid_ehci_softc {
ehci_softc_t base;
device_t dev;
struct resource *res[6];
bus_space_tag_t bst_phy;
bus_space_handle_t bsh_phy;
bus_space_tag_t bst_usbc;
bus_space_handle_t bsh_usbc;
};
static struct resource_spec vybrid_ehci_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_MEMORY, 1, RF_ACTIVE },
{ SYS_RES_MEMORY, 2, RF_ACTIVE },
{ SYS_RES_IRQ, 0, RF_ACTIVE },
{ -1, 0 }
};
static device_method_t ehci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, vybrid_ehci_probe),
DEVMETHOD(device_attach, vybrid_ehci_attach),
DEVMETHOD(device_detach, vybrid_ehci_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
{ 0, 0 }
};
/* kobj_class definition */
static driver_t ehci_driver = {
"ehci",
ehci_methods,
sizeof(ehci_softc_t)
};
DRIVER_MODULE(vybrid_ehci, simplebus, ehci_driver, 0, 0);
MODULE_DEPEND(vybrid_ehci, usb, 1, 1, 1);
static void
vybrid_ehci_post_reset(struct ehci_softc *ehci_softc)
{
uint32_t usbmode;
/* Force HOST mode */
usbmode = EOREAD4(ehci_softc, EHCI_USBMODE_NOLPM);
usbmode &= ~EHCI_UM_CM;
usbmode |= EHCI_UM_CM_HOST;
EOWRITE4(ehci_softc, EHCI_USBMODE_NOLPM, usbmode);
}
/*
* Public methods
*/
static int
vybrid_ehci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_is_compatible(dev, "fsl,mvf600-usb-ehci") == 0)
return (ENXIO);
device_set_desc(dev, "Vybrid Family integrated USB controller");
return (BUS_PROBE_DEFAULT);
}
static int
phy_init(struct vybrid_ehci_softc *esc)
{
device_t sc_gpio_dev;
int reg;
/* Reset phy */
reg = PHY_READ4(esc, USBPHY_CTRL);
reg |= (USBPHY_CTRL_SFTRST);
PHY_WRITE4(esc, USBPHY_CTRL, reg);
/* Minimum reset time */
DELAY(10000);
reg &= ~(USBPHY_CTRL_SFTRST | USBPHY_CTRL_CLKGATE);
PHY_WRITE4(esc, USBPHY_CTRL, reg);
reg = (ENUTMILEVEL2 | ENUTMILEVEL3);
PHY_WRITE4(esc, USBPHY_CTRL_SET, reg);
/* Get the GPIO device, we need this to give power to USB */
sc_gpio_dev = devclass_get_device(devclass_find("gpio"), 0);
if (sc_gpio_dev == NULL) {
device_printf(esc->dev, "Error: failed to get the GPIO dev\n");
return (1);
}
/* Give power to USB */
GPIO_PIN_SETFLAGS(sc_gpio_dev, GPIO_USB_PWR, GPIO_PIN_OUTPUT);
GPIO_PIN_SET(sc_gpio_dev, GPIO_USB_PWR, GPIO_PIN_HIGH);
/* Power up PHY */
PHY_WRITE4(esc, USBPHY_PWD, 0x00);
/* Ungate clocks */
reg = PHY_READ4(esc, USBPHY_DEBUG);
reg &= ~(USBPHY_DEBUG_CLKGATE);
PHY_WRITE4(esc, USBPHY_DEBUG, reg);
#if 0
printf("USBPHY_CTRL == 0x%08x\n",
PHY_READ4(esc, USBPHY_CTRL));
printf("USBPHY_IP == 0x%08x\n",
PHY_READ4(esc, USBPHY_IP));
printf("USBPHY_STATUS == 0x%08x\n",
PHY_READ4(esc, USBPHY_STATUS));
printf("USBPHY_DEBUG == 0x%08x\n",
PHY_READ4(esc, USBPHY_DEBUG));
printf("USBPHY_DEBUG0_STATUS == 0x%08x\n",
PHY_READ4(esc, USBPHY_DEBUG0_STATUS));
printf("USBPHY_DEBUG1 == 0x%08x\n",
PHY_READ4(esc, USBPHY_DEBUG1));
#endif
return (0);
}
static int
vybrid_ehci_attach(device_t dev)
{
struct vybrid_ehci_softc *esc;
ehci_softc_t *sc;
bus_space_handle_t bsh;
int err;
int reg;
esc = device_get_softc(dev);
esc->dev = dev;
sc = &esc->base;
sc->sc_bus.parent = dev;
sc->sc_bus.devices = sc->sc_devices;
sc->sc_bus.devices_max = EHCI_MAX_DEVICES;
sc->sc_bus.dma_bits = 32;
if (bus_alloc_resources(dev, vybrid_ehci_spec, esc->res)) {
device_printf(dev, "could not allocate resources\n");
return (ENXIO);
}
/* EHCI registers */
sc->sc_io_tag = rman_get_bustag(esc->res[0]);
bsh = rman_get_bushandle(esc->res[0]);
sc->sc_io_size = rman_get_size(esc->res[0]);
esc->bst_usbc = rman_get_bustag(esc->res[1]);
esc->bsh_usbc = rman_get_bushandle(esc->res[1]);
esc->bst_phy = rman_get_bustag(esc->res[2]);
esc->bsh_phy = rman_get_bushandle(esc->res[2]);
/* get all DMA memory */
if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(dev),
&ehci_iterate_hw_softc))
return (ENXIO);
#if 0
printf("USBx_HCSPARAMS is 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HCSPARAMS));
printf("USB_ID == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_ID));
printf("USB_HWGENERAL == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HWGENERAL));
printf("USB_HWHOST == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HWHOST));
printf("USB_HWDEVICE == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HWDEVICE));
printf("USB_HWTXBUF == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HWTXBUF));
printf("USB_HWRXBUF == 0x%08x\n",
bus_space_read_4(sc->sc_io_tag, bsh, USB_HWRXBUF));
#endif
if (phy_init(esc)) {
device_printf(dev, "Could not setup PHY\n");
return (1);
}
/*
* Set handle to USB related registers subregion used by
* generic EHCI driver.
*/
err = bus_space_subregion(sc->sc_io_tag, bsh, 0x100,
sc->sc_io_size, &sc->sc_io_hdl);
if (err != 0)
return (ENXIO);
/* Setup interrupt handler */
err = bus_setup_intr(dev, esc->res[3], INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, sc,
&sc->sc_intr_hdl);
if (err) {
device_printf(dev, "Could not setup irq, "
"%d\n", err);
return (1);
}
/* Add USB device */
sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(dev, "Could not add USB device\n");
err = bus_teardown_intr(dev, esc->res[5],
sc->sc_intr_hdl);
if (err)
device_printf(dev, "Could not tear down irq,"
" %d\n", err);
return (1);
}
device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
strlcpy(sc->sc_vendor, "Freescale", sizeof(sc->sc_vendor));
/* Set host mode */
reg = bus_space_read_4(sc->sc_io_tag, sc->sc_io_hdl, 0xA8);
reg |= 0x3;
bus_space_write_4(sc->sc_io_tag, sc->sc_io_hdl, 0xA8, reg);
/* Set flags and callbacks*/
sc->sc_flags |= EHCI_SCFLG_TT | EHCI_SCFLG_NORESTERM;
sc->sc_vendor_post_reset = vybrid_ehci_post_reset;
sc->sc_vendor_get_port_speed = ehci_get_port_speed_portsc;
err = ehci_init(sc);
if (!err) {
sc->sc_flags |= EHCI_SCFLG_DONEINIT;
err = device_probe_and_attach(sc->sc_bus.bdev);
} else {
device_printf(dev, "USB init failed err=%d\n", err);
device_delete_child(dev, sc->sc_bus.bdev);
sc->sc_bus.bdev = NULL;
err = bus_teardown_intr(dev, esc->res[5],
sc->sc_intr_hdl);
if (err)
device_printf(dev, "Could not tear down irq,"
" %d\n", err);
return (1);
}
return (0);
}
static int
vybrid_ehci_detach(device_t dev)
{
struct vybrid_ehci_softc *esc;
ehci_softc_t *sc;
int err;
esc = device_get_softc(dev);
sc = &esc->base;
/* First detach all children; we can't detach if that fails. */
- if ((err = device_delete_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
/*
* only call ehci_detach() after ehci_init()
*/
if (sc->sc_flags & EHCI_SCFLG_DONEINIT) {
ehci_detach(sc);
sc->sc_flags &= ~EHCI_SCFLG_DONEINIT;
}
/*
* Disable interrupts that might have been switched on in
* ehci_init.
*/
if (sc->sc_io_tag && sc->sc_io_hdl)
bus_space_write_4(sc->sc_io_tag, sc->sc_io_hdl,
EHCI_USBINTR, 0);
if (esc->res[5] && sc->sc_intr_hdl) {
err = bus_teardown_intr(dev, esc->res[5],
sc->sc_intr_hdl);
if (err) {
device_printf(dev, "Could not tear down irq,"
" %d\n", err);
return (err);
}
sc->sc_intr_hdl = NULL;
}
usb_bus_mem_free_all(&sc->sc_bus, &ehci_iterate_hw_softc);
bus_release_resources(dev, vybrid_ehci_spec, esc->res);
return (0);
}
diff --git a/sys/arm/mv/a37x0_spi.c b/sys/arm/mv/a37x0_spi.c
index bc47da2f3e6e..027dd57677a3 100644
--- a/sys/arm/mv/a37x0_spi.c
+++ b/sys/arm/mv/a37x0_spi.c
@@ -1,490 +1,490 @@
/*-
* Copyright (c) 2018, 2019 Rubicon Communications, LLC (Netgate)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "spibus_if.h"
struct a37x0_spi_softc {
device_t sc_dev;
struct mtx sc_mtx;
struct resource *sc_mem_res;
struct resource *sc_irq_res;
struct spi_command *sc_cmd;
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
uint32_t sc_len;
uint32_t sc_maxfreq;
uint32_t sc_read;
uint32_t sc_flags;
uint32_t sc_written;
void *sc_intrhand;
};
#define A37X0_SPI_WRITE(_sc, _off, _val) \
bus_space_write_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off), (_val))
#define A37X0_SPI_READ(_sc, _off) \
bus_space_read_4((_sc)->sc_bst, (_sc)->sc_bsh, (_off))
#define A37X0_SPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define A37X0_SPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define A37X0_SPI_BUSY (1 << 0)
/*
* While the A3700 utils from Marvell usually sets the QSF clock to 200MHz,
* there is no guarantee that it is correct without the proper clock framework
* to retrieve the actual TBG and PLL settings.
*/
#define A37X0_SPI_CLOCK 200000000 /* QSF Clock 200MHz */
#define A37X0_SPI_CONTROL 0x0
#define A37X0_SPI_CS_SHIFT 16
#define A37X0_SPI_CS_MASK (0xf << A37X0_SPI_CS_SHIFT)
#define A37X0_SPI_CONF 0x4
#define A37X0_SPI_WFIFO_THRS_SHIFT 28
#define A37X0_SPI_RFIFO_THRS_SHIFT 24
#define A37X0_SPI_AUTO_CS_EN (1 << 20)
#define A37X0_SPI_DMA_WR_EN (1 << 19)
#define A37X0_SPI_DMA_RD_EN (1 << 18)
#define A37X0_SPI_FIFO_MODE (1 << 17)
#define A37X0_SPI_SRST (1 << 16)
#define A37X0_SPI_XFER_START (1 << 15)
#define A37X0_SPI_XFER_STOP (1 << 14)
#define A37X0_SPI_INSTR_PIN (1 << 13)
#define A37X0_SPI_ADDR_PIN (1 << 12)
#define A37X0_SPI_DATA_PIN_MASK 0x3
#define A37X0_SPI_DATA_PIN_SHIFT 10
#define A37X0_SPI_FIFO_FLUSH (1 << 9)
#define A37X0_SPI_RW_EN (1 << 8)
#define A37X0_SPI_CLK_POL (1 << 7)
#define A37X0_SPI_CLK_PHASE (1 << 6)
#define A37X0_SPI_BYTE_LEN (1 << 5)
#define A37X0_SPI_PSC_MASK 0x1f
#define A37X0_SPI_DATA_OUT 0x8
#define A37X0_SPI_DATA_IN 0xc
#define A37X0_SPI_INTR_STAT 0x28
#define A37X0_SPI_INTR_MASK 0x2c
#define A37X0_SPI_RDY (1 << 1)
#define A37X0_SPI_XFER_DONE (1 << 0)
static struct ofw_compat_data compat_data[] = {
{ "marvell,armada-3700-spi", 1 },
{ NULL, 0 }
};
static void a37x0_spi_intr(void *);
static int
a37x0_spi_wait(struct a37x0_spi_softc *sc, int timeout, uint32_t reg,
uint32_t mask)
{
int i;
for (i = 0; i < timeout; i++) {
if ((A37X0_SPI_READ(sc, reg) & mask) == 0)
return (0);
DELAY(100);
}
return (ETIMEDOUT);
}
static int
a37x0_spi_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Armada 37x0 SPI controller");
return (BUS_PROBE_DEFAULT);
}
static int
a37x0_spi_attach(device_t dev)
{
int err, rid;
pcell_t maxfreq;
struct a37x0_spi_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
sc->sc_dev = dev;
rid = 0;
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!sc->sc_mem_res) {
device_printf(dev, "cannot allocate memory window\n");
return (ENXIO);
}
sc->sc_bst = rman_get_bustag(sc->sc_mem_res);
sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res);
rid = 0;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (!sc->sc_irq_res) {
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
device_printf(dev, "cannot allocate interrupt\n");
return (ENXIO);
}
/* Make sure that no CS is asserted. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONTROL);
A37X0_SPI_WRITE(sc, A37X0_SPI_CONTROL, reg & ~A37X0_SPI_CS_MASK);
/* Reset FIFO. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONF);
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg | A37X0_SPI_FIFO_FLUSH);
err = a37x0_spi_wait(sc, 20, A37X0_SPI_CONF, A37X0_SPI_FIFO_FLUSH);
if (err != 0) {
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
device_printf(dev, "cannot flush the controller fifo.\n");
return (ENXIO);
}
/* Reset the Controller. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONF);
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg | A37X0_SPI_SRST);
DELAY(1000);
/* Enable the single byte IO, disable FIFO. */
reg &= ~(A37X0_SPI_FIFO_MODE | A37X0_SPI_BYTE_LEN);
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg);
/* Disable and clear interrupts. */
A37X0_SPI_WRITE(sc, A37X0_SPI_INTR_MASK, 0);
reg = A37X0_SPI_READ(sc, A37X0_SPI_INTR_STAT);
A37X0_SPI_WRITE(sc, A37X0_SPI_INTR_STAT, reg);
/* Hook up our interrupt handler. */
if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, a37x0_spi_intr, sc, &sc->sc_intrhand)) {
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
device_printf(dev, "cannot setup the interrupt handler\n");
return (ENXIO);
}
mtx_init(&sc->sc_mtx, "a37x0_spi", NULL, MTX_DEF);
/* Read the controller max-frequency. */
if (OF_getencprop(ofw_bus_get_node(dev), "spi-max-frequency", &maxfreq,
sizeof(maxfreq)) == -1)
maxfreq = 0;
sc->sc_maxfreq = maxfreq;
device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
/* Probe and attach the spibus when interrupts are available. */
bus_delayed_attach_children(dev);
return (0);
}
static int
a37x0_spi_detach(device_t dev)
{
int err;
struct a37x0_spi_softc *sc;
- if ((err = device_delete_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
sc = device_get_softc(dev);
mtx_destroy(&sc->sc_mtx);
if (sc->sc_intrhand)
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand);
if (sc->sc_irq_res)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
if (sc->sc_mem_res)
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
return (0);
}
static __inline void
a37x0_spi_rx_byte(struct a37x0_spi_softc *sc)
{
struct spi_command *cmd;
uint32_t read;
uint8_t *p;
if (sc->sc_read == sc->sc_len)
return;
cmd = sc->sc_cmd;
p = (uint8_t *)cmd->rx_cmd;
read = sc->sc_read++;
if (read >= cmd->rx_cmd_sz) {
p = (uint8_t *)cmd->rx_data;
read -= cmd->rx_cmd_sz;
}
p[read] = A37X0_SPI_READ(sc, A37X0_SPI_DATA_IN) & 0xff;
}
static __inline void
a37x0_spi_tx_byte(struct a37x0_spi_softc *sc)
{
struct spi_command *cmd;
uint32_t written;
uint8_t *p;
if (sc->sc_written == sc->sc_len)
return;
cmd = sc->sc_cmd;
p = (uint8_t *)cmd->tx_cmd;
written = sc->sc_written++;
if (written >= cmd->tx_cmd_sz) {
p = (uint8_t *)cmd->tx_data;
written -= cmd->tx_cmd_sz;
}
A37X0_SPI_WRITE(sc, A37X0_SPI_DATA_OUT, p[written]);
}
static __inline void
a37x0_spi_set_clock(struct a37x0_spi_softc *sc, uint32_t clock)
{
uint32_t psc, reg;
if (sc->sc_maxfreq > 0 && clock > sc->sc_maxfreq)
clock = sc->sc_maxfreq;
psc = A37X0_SPI_CLOCK / clock;
if ((A37X0_SPI_CLOCK % clock) > 0)
psc++;
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONF);
reg &= ~A37X0_SPI_PSC_MASK;
reg |= psc & A37X0_SPI_PSC_MASK;
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg);
}
static __inline void
a37x0_spi_set_pins(struct a37x0_spi_softc *sc, uint32_t npins)
{
uint32_t reg;
/* Sets single, dual or quad SPI mode. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONF);
reg &= ~(A37X0_SPI_DATA_PIN_MASK << A37X0_SPI_DATA_PIN_SHIFT);
reg |= (npins / 2) << A37X0_SPI_DATA_PIN_SHIFT;
reg |= A37X0_SPI_INSTR_PIN | A37X0_SPI_ADDR_PIN;
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg);
}
static __inline void
a37x0_spi_set_mode(struct a37x0_spi_softc *sc, uint32_t mode)
{
uint32_t reg;
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONF);
switch (mode) {
case 0:
reg &= ~(A37X0_SPI_CLK_PHASE | A37X0_SPI_CLK_POL);
break;
case 1:
reg &= ~A37X0_SPI_CLK_POL;
reg |= A37X0_SPI_CLK_PHASE;
break;
case 2:
reg &= ~A37X0_SPI_CLK_PHASE;
reg |= A37X0_SPI_CLK_POL;
break;
case 3:
reg |= (A37X0_SPI_CLK_PHASE | A37X0_SPI_CLK_POL);
break;
}
A37X0_SPI_WRITE(sc, A37X0_SPI_CONF, reg);
}
static void
a37x0_spi_intr(void *arg)
{
struct a37x0_spi_softc *sc;
uint32_t status;
sc = (struct a37x0_spi_softc *)arg;
A37X0_SPI_LOCK(sc);
/* Filter stray interrupts. */
if ((sc->sc_flags & A37X0_SPI_BUSY) == 0) {
A37X0_SPI_UNLOCK(sc);
return;
}
status = A37X0_SPI_READ(sc, A37X0_SPI_INTR_STAT);
if (status & A37X0_SPI_XFER_DONE)
a37x0_spi_rx_byte(sc);
/* Clear the interrupt status. */
A37X0_SPI_WRITE(sc, A37X0_SPI_INTR_STAT, status);
/* Check for end of transfer. */
if (sc->sc_written == sc->sc_len && sc->sc_read == sc->sc_len)
wakeup(sc->sc_dev);
else
a37x0_spi_tx_byte(sc);
A37X0_SPI_UNLOCK(sc);
}
static int
a37x0_spi_transfer(device_t dev, device_t child, struct spi_command *cmd)
{
int timeout;
struct a37x0_spi_softc *sc;
uint32_t clock, cs, mode, reg;
KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz,
("TX/RX command sizes should be equal"));
KASSERT(cmd->tx_data_sz == cmd->rx_data_sz,
("TX/RX data sizes should be equal"));
/* Get the proper data for this child. */
spibus_get_cs(child, &cs);
cs &= ~SPIBUS_CS_HIGH;
if (cs > 3) {
device_printf(dev,
"Invalid CS %d requested by %s\n", cs,
device_get_nameunit(child));
return (EINVAL);
}
spibus_get_clock(child, &clock);
if (clock == 0) {
device_printf(dev,
"Invalid clock %uHz requested by %s\n", clock,
device_get_nameunit(child));
return (EINVAL);
}
spibus_get_mode(child, &mode);
if (mode > 3) {
device_printf(dev,
"Invalid mode %u requested by %s\n", mode,
device_get_nameunit(child));
return (EINVAL);
}
sc = device_get_softc(dev);
A37X0_SPI_LOCK(sc);
/* Wait until the controller is free. */
while (sc->sc_flags & A37X0_SPI_BUSY)
mtx_sleep(dev, &sc->sc_mtx, 0, "a37x0_spi", 0);
/* Now we have control over SPI controller. */
sc->sc_flags = A37X0_SPI_BUSY;
/* Set transfer mode and clock. */
a37x0_spi_set_mode(sc, mode);
a37x0_spi_set_pins(sc, 1);
a37x0_spi_set_clock(sc, clock);
/* Set CS. */
A37X0_SPI_WRITE(sc, A37X0_SPI_CONTROL, 1 << (A37X0_SPI_CS_SHIFT + cs));
/* Save a pointer to the SPI command. */
sc->sc_cmd = cmd;
sc->sc_read = 0;
sc->sc_written = 0;
sc->sc_len = cmd->tx_cmd_sz + cmd->tx_data_sz;
/* Clear interrupts. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_INTR_STAT);
A37X0_SPI_WRITE(sc, A37X0_SPI_INTR_STAT, reg);
while ((sc->sc_len - sc->sc_written) > 0) {
/*
* Write to start the transmission and read the byte
* back when ready.
*/
a37x0_spi_tx_byte(sc);
timeout = 1000;
while (--timeout > 0) {
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONTROL);
if (reg & A37X0_SPI_XFER_DONE)
break;
DELAY(1);
}
if (timeout == 0)
break;
a37x0_spi_rx_byte(sc);
}
/* Stop the controller. */
reg = A37X0_SPI_READ(sc, A37X0_SPI_CONTROL);
A37X0_SPI_WRITE(sc, A37X0_SPI_CONTROL, reg & ~A37X0_SPI_CS_MASK);
A37X0_SPI_WRITE(sc, A37X0_SPI_INTR_MASK, 0);
/* Release the controller and wakeup the next thread waiting for it. */
sc->sc_flags = 0;
wakeup_one(dev);
A37X0_SPI_UNLOCK(sc);
return ((timeout == 0) ? EIO : 0);
}
static phandle_t
a37x0_spi_get_node(device_t bus, device_t dev)
{
return (ofw_bus_get_node(bus));
}
static device_method_t a37x0_spi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, a37x0_spi_probe),
DEVMETHOD(device_attach, a37x0_spi_attach),
DEVMETHOD(device_detach, a37x0_spi_detach),
/* SPI interface */
DEVMETHOD(spibus_transfer, a37x0_spi_transfer),
/* ofw_bus interface */
DEVMETHOD(ofw_bus_get_node, a37x0_spi_get_node),
DEVMETHOD_END
};
static driver_t a37x0_spi_driver = {
"spi",
a37x0_spi_methods,
sizeof(struct a37x0_spi_softc),
};
DRIVER_MODULE(a37x0_spi, simplebus, a37x0_spi_driver, 0, 0);
diff --git a/sys/arm/nvidia/tegra_ehci.c b/sys/arm/nvidia/tegra_ehci.c
index 15f086a6c3c0..59bf8646385a 100644
--- a/sys/arm/nvidia/tegra_ehci.c
+++ b/sys/arm/nvidia/tegra_ehci.c
@@ -1,315 +1,315 @@
/*-
* Copyright (c) 2016 Michal Meloun
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
/*
* EHCI driver for Tegra SoCs.
*/
#include "opt_bus.h"
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "usbdevs.h"
#define TEGRA_EHCI_REG_OFF 0x100
#define TEGRA_EHCI_REG_SIZE 0x100
/* Compatible devices. */
#define TEGRA124_EHCI 1
#define TEGRA210_EHCI 2
static struct ofw_compat_data compat_data[] = {
{"nvidia,tegra124-ehci", (uintptr_t)TEGRA124_EHCI},
{"nvidia,tegra210-ehci", (uintptr_t)TEGRA210_EHCI},
{NULL, 0},
};
struct tegra_ehci_softc {
ehci_softc_t ehci_softc;
device_t dev;
struct resource *ehci_mem_res; /* EHCI core regs. */
struct resource *ehci_irq_res; /* EHCI core IRQ. */
int usb_alloc_called;
clk_t clk;
phy_t phy;
hwreset_t reset;
};
static void
tegra_ehci_post_reset(struct ehci_softc *ehci_softc)
{
uint32_t usbmode;
/* Force HOST mode. */
usbmode = EOREAD4(ehci_softc, EHCI_USBMODE_LPM);
usbmode &= ~EHCI_UM_CM;
usbmode |= EHCI_UM_CM_HOST;
device_printf(ehci_softc->sc_bus.bdev, "set host controller mode\n");
EOWRITE4(ehci_softc, EHCI_USBMODE_LPM, usbmode);
}
static int
tegra_ehci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) {
device_set_desc(dev, "Nvidia Tegra EHCI controller");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
tegra_ehci_detach(device_t dev)
{
struct tegra_ehci_softc *sc;
ehci_softc_t *esc;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
esc = &sc->ehci_softc;
if (sc->clk != NULL)
clk_release(sc->clk);
- if (esc->sc_bus.bdev != NULL)
- device_delete_child(dev, esc->sc_bus.bdev);
if (esc->sc_flags & EHCI_SCFLG_DONEINIT)
ehci_detach(esc);
if (esc->sc_intr_hdl != NULL)
bus_teardown_intr(dev, esc->sc_irq_res,
esc->sc_intr_hdl);
if (sc->ehci_irq_res != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0,
sc->ehci_irq_res);
if (sc->ehci_mem_res != NULL)
bus_release_resource(dev, SYS_RES_MEMORY, 0,
sc->ehci_mem_res);
if (sc->usb_alloc_called)
usb_bus_mem_free_all(&esc->sc_bus, &ehci_iterate_hw_softc);
- /* During module unload there are lots of children leftover. */
- device_delete_children(dev);
-
return (0);
}
static int
tegra_ehci_attach(device_t dev)
{
struct tegra_ehci_softc *sc;
ehci_softc_t *esc;
int rv, rid;
uint64_t freq;
sc = device_get_softc(dev);
sc->dev = dev;
esc = &sc->ehci_softc;
/* Allocate resources. */
rid = 0;
sc->ehci_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (sc->ehci_mem_res == NULL) {
device_printf(dev, "Cannot allocate memory resources\n");
rv = ENXIO;
goto out;
}
rid = 0;
sc->ehci_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->ehci_irq_res == NULL) {
device_printf(dev, "Cannot allocate IRQ resources\n");
rv = ENXIO;
goto out;
}
rv = hwreset_get_by_ofw_name(dev, 0, "usb", &sc->reset);
if (rv != 0) {
device_printf(dev, "Cannot get reset\n");
rv = ENXIO;
goto out;
}
rv = phy_get_by_ofw_property(sc->dev, 0, "nvidia,phy", &sc->phy);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'nvidia,phy' phy\n");
rv = ENXIO;
goto out;
}
rv = clk_get_by_ofw_index(sc->dev, 0, 0, &sc->clk);
if (rv != 0) {
device_printf(dev, "Cannot get clock\n");
goto out;
}
rv = clk_enable(sc->clk);
if (rv != 0) {
device_printf(dev, "Cannot enable clock\n");
goto out;
}
freq = 0;
rv = clk_get_freq(sc->clk, &freq);
if (rv != 0) {
device_printf(dev, "Cannot get clock frequency\n");
goto out;
}
rv = hwreset_deassert(sc->reset);
if (rv != 0) {
device_printf(dev, "Cannot clear reset: %d\n", rv);
rv = ENXIO;
goto out;
}
rv = phy_enable(sc->phy);
if (rv != 0) {
device_printf(dev, "Cannot enable phy: %d\n", rv);
goto out;
}
/* Fill data for EHCI driver. */
esc->sc_vendor_get_port_speed = ehci_get_port_speed_hostc;
esc->sc_vendor_post_reset = tegra_ehci_post_reset;
esc->sc_io_tag = rman_get_bustag(sc->ehci_mem_res);
esc->sc_bus.parent = dev;
esc->sc_bus.devices = esc->sc_devices;
esc->sc_bus.devices_max = EHCI_MAX_DEVICES;
esc->sc_bus.dma_bits = 32;
/* Allocate all DMA memory. */
rv = usb_bus_mem_alloc_all(&esc->sc_bus, USB_GET_DMA_TAG(dev),
&ehci_iterate_hw_softc);
sc->usb_alloc_called = 1;
if (rv != 0) {
device_printf(dev, "usb_bus_mem_alloc_all() failed\n");
rv = ENOMEM;
goto out;
}
/*
* Set handle to USB related registers subregion used by
* generic EHCI driver.
*/
rv = bus_space_subregion(esc->sc_io_tag,
rman_get_bushandle(sc->ehci_mem_res),
TEGRA_EHCI_REG_OFF, TEGRA_EHCI_REG_SIZE, &esc->sc_io_hdl);
if (rv != 0) {
device_printf(dev, "Could not create USB memory subregion\n");
rv = ENXIO;
goto out;
}
/* Setup interrupt handler. */
rv = bus_setup_intr(dev, sc->ehci_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, esc, &esc->sc_intr_hdl);
if (rv != 0) {
device_printf(dev, "Could not setup IRQ\n");
goto out;
}
/* Add USB bus device. */
esc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (esc->sc_bus.bdev == NULL) {
device_printf(dev, "Could not add USB device\n");
goto out;
}
device_set_ivars(esc->sc_bus.bdev, &esc->sc_bus);
esc->sc_id_vendor = USB_VENDOR_FREESCALE;
strlcpy(esc->sc_vendor, "Nvidia", sizeof(esc->sc_vendor));
/* Set flags that affect ehci_init() behavior. */
esc->sc_flags |= EHCI_SCFLG_TT;
esc->sc_flags |= EHCI_SCFLG_NORESTERM;
rv = ehci_init(esc);
if (rv != 0) {
device_printf(dev, "USB init failed: %d\n",
rv);
goto out;
}
esc->sc_flags |= EHCI_SCFLG_DONEINIT;
/* Probe the bus. */
rv = device_probe_and_attach(esc->sc_bus.bdev);
if (rv != 0) {
device_printf(dev,
"device_probe_and_attach() failed\n");
goto out;
}
return (0);
out:
tegra_ehci_detach(dev);
return (rv);
}
static device_method_t ehci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tegra_ehci_probe),
DEVMETHOD(device_attach, tegra_ehci_attach),
DEVMETHOD(device_detach, tegra_ehci_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD_END
};
static DEFINE_CLASS_0(ehci, ehci_driver, ehci_methods,
sizeof(struct tegra_ehci_softc));
DRIVER_MODULE(tegra_ehci, simplebus, ehci_driver, NULL, NULL);
MODULE_DEPEND(tegra_ehci, usb, 1, 1, 1);
diff --git a/sys/arm/nvidia/tegra_xhci.c b/sys/arm/nvidia/tegra_xhci.c
index e3b4dd483189..474e31981770 100644
--- a/sys/arm/nvidia/tegra_xhci.c
+++ b/sys/arm/nvidia/tegra_xhci.c
@@ -1,1122 +1,1126 @@
/*-
* Copyright (c) 2016 Michal Meloun
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
/*
* XHCI driver for Tegra SoCs.
*/
#include "opt_bus.h"
#include "opt_platform.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "usbdevs.h"
/* FPCI address space */
#define T_XUSB_CFG_0 0x000
#define T_XUSB_CFG_1 0x004
#define CFG_1_BUS_MASTER (1 << 2)
#define CFG_1_MEMORY_SPACE (1 << 1)
#define CFG_1_IO_SPACE (1 << 0)
#define T_XUSB_CFG_2 0x008
#define T_XUSB_CFG_3 0x00C
#define T_XUSB_CFG_4 0x010
#define CFG_4_BASE_ADDRESS(x) (((x) & 0x1FFFF) << 15)
#define T_XUSB_CFG_5 0x014
#define T_XUSB_CFG_ARU_MAILBOX_CMD 0x0E4
#define ARU_MAILBOX_CMD_INT_EN (1U << 31)
#define ARU_MAILBOX_CMD_DEST_XHCI (1 << 30)
#define ARU_MAILBOX_CMD_DEST_SMI (1 << 29)
#define ARU_MAILBOX_CMD_DEST_PME (1 << 28)
#define ARU_MAILBOX_CMD_DEST_FALC (1 << 27)
#define T_XUSB_CFG_ARU_MAILBOX_DATA_IN 0x0E8
#define ARU_MAILBOX_DATA_IN_DATA(x) (((x) & 0xFFFFFF) << 0)
#define ARU_MAILBOX_DATA_IN_TYPE(x) (((x) & 0x0000FF) << 24)
#define T_XUSB_CFG_ARU_MAILBOX_DATA_OUT 0x0EC
#define ARU_MAILBOX_DATA_OUT_DATA(x) (((x) >> 0) & 0xFFFFFF)
#define ARU_MAILBOX_DATA_OUT_TYPE(x) (((x) >> 24) & 0x0000FF)
#define T_XUSB_CFG_ARU_MAILBOX_OWNER 0x0F0
#define ARU_MAILBOX_OWNER_SW 2
#define ARU_MAILBOX_OWNER_FW 1
#define ARU_MAILBOX_OWNER_NONE 0
#define XUSB_CFG_ARU_C11_CSBRANGE 0x41C /* ! UNDOCUMENTED ! */
#define ARU_C11_CSBRANGE_PAGE(x) ((x) >> 9)
#define ARU_C11_CSBRANGE_ADDR(x) (0x800 + ((x) & 0x1FF))
#define XUSB_CFG_ARU_SMI_INTR 0x428 /* ! UNDOCUMENTED ! */
#define ARU_SMI_INTR_EN (1 << 3)
#define ARU_SMI_INTR_FW_HANG (1 << 1)
#define XUSB_CFG_ARU_RST 0x42C /* ! UNDOCUMENTED ! */
#define ARU_RST_RESET (1 << 0)
#define XUSB_HOST_CONFIGURATION 0x180
#define CONFIGURATION_CLKEN_OVERRIDE (1U<< 31)
#define CONFIGURATION_PW_NO_DEVSEL_ERR_CYA (1 << 19)
#define CONFIGURATION_INITIATOR_READ_IDLE (1 << 18)
#define CONFIGURATION_INITIATOR_WRITE_IDLE (1 << 17)
#define CONFIGURATION_WDATA_LEAD_CYA (1 << 15)
#define CONFIGURATION_WR_INTRLV_CYA (1 << 14)
#define CONFIGURATION_TARGET_READ_IDLE (1 << 11)
#define CONFIGURATION_TARGET_WRITE_IDLE (1 << 10)
#define CONFIGURATION_MSI_VEC_EMPTY (1 << 9)
#define CONFIGURATION_UFPCI_MSIAW (1 << 7)
#define CONFIGURATION_UFPCI_PWPASSPW (1 << 6)
#define CONFIGURATION_UFPCI_PASSPW (1 << 5)
#define CONFIGURATION_UFPCI_PWPASSNPW (1 << 4)
#define CONFIGURATION_DFPCI_PWPASSNPW (1 << 3)
#define CONFIGURATION_DFPCI_RSPPASSPW (1 << 2)
#define CONFIGURATION_DFPCI_PASSPW (1 << 1)
#define CONFIGURATION_EN_FPCI (1 << 0)
/* IPFS address space */
#define XUSB_HOST_FPCI_ERROR_MASKS 0x184
#define FPCI_ERROR_MASTER_ABORT (1 << 2)
#define FPCI_ERRORI_DATA_ERROR (1 << 1)
#define FPCI_ERROR_TARGET_ABORT (1 << 0)
#define XUSB_HOST_INTR_MASK 0x188
#define INTR_IP_INT_MASK (1 << 16)
#define INTR_MSI_MASK (1 << 8)
#define INTR_INT_MASK (1 << 0)
#define XUSB_HOST_CLKGATE_HYSTERESIS 0x1BC
/* CSB Falcon CPU */
#define XUSB_FALCON_CPUCTL 0x100
#define CPUCTL_STOPPED (1 << 5)
#define CPUCTL_HALTED (1 << 4)
#define CPUCTL_HRESET (1 << 3)
#define CPUCTL_SRESET (1 << 2)
#define CPUCTL_STARTCPU (1 << 1)
#define CPUCTL_IINVAL (1 << 0)
#define XUSB_FALCON_BOOTVEC 0x104
#define XUSB_FALCON_DMACTL 0x10C
#define XUSB_FALCON_IMFILLRNG1 0x154
#define IMFILLRNG1_TAG_HI(x) (((x) & 0xFFF) << 16)
#define IMFILLRNG1_TAG_LO(x) (((x) & 0xFFF) << 0)
#define XUSB_FALCON_IMFILLCTL 0x158
/* CSB mempool */
#define XUSB_CSB_MEMPOOL_APMAP 0x10181C
#define APMAP_BOOTPATH (1U << 31)
#define XUSB_CSB_MEMPOOL_ILOAD_ATTR 0x101A00
#define XUSB_CSB_MEMPOOL_ILOAD_BASE_LO 0x101A04
#define XUSB_CSB_MEMPOOL_ILOAD_BASE_HI 0x101A08
#define XUSB_CSB_MEMPOOL_L2IMEMOP_SIZE 0x101A10
#define L2IMEMOP_SIZE_OFFSET(x) (((x) & 0x3FF) << 8)
#define L2IMEMOP_SIZE_SIZE(x) (((x) & 0x0FF) << 24)
#define XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG 0x101A14
#define L2IMEMOP_INVALIDATE_ALL (0x40 << 24)
#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << 24)
#define XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT 0x101A18
#define L2IMEMOP_RESULT_VLD (1U << 31)
#define XUSB_CSB_IMEM_BLOCK_SIZE 256
#define TEGRA_XHCI_SS_HIGH_SPEED 120000000
#define TEGRA_XHCI_SS_LOW_SPEED 12000000
/* MBOX commands. */
#define MBOX_CMD_MSG_ENABLED 1
#define MBOX_CMD_INC_FALC_CLOCK 2
#define MBOX_CMD_DEC_FALC_CLOCK 3
#define MBOX_CMD_INC_SSPI_CLOCK 4
#define MBOX_CMD_DEC_SSPI_CLOCK 5
#define MBOX_CMD_SET_BW 6
#define MBOX_CMD_SET_SS_PWR_GATING 7
#define MBOX_CMD_SET_SS_PWR_UNGATING 8
#define MBOX_CMD_SAVE_DFE_CTLE_CTX 9
#define MBOX_CMD_AIRPLANE_MODE_ENABLED 10
#define MBOX_CMD_AIRPLANE_MODE_DISABLED 11
#define MBOX_CMD_START_HSIC_IDLE 12
#define MBOX_CMD_STOP_HSIC_IDLE 13
#define MBOX_CMD_DBC_WAKE_STACK 14
#define MBOX_CMD_HSIC_PRETEND_CONNECT 15
#define MBOX_CMD_RESET_SSPI 16
#define MBOX_CMD_DISABLE_SS_LFPS_DETECTION 17
#define MBOX_CMD_ENABLE_SS_LFPS_DETECTION 18
/* MBOX responses. */
#define MBOX_CMD_ACK (0x80 + 0)
#define MBOX_CMD_NAK (0x80 + 1)
#define IPFS_WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res_ipfs, (_r), (_v))
#define IPFS_RD4(_sc, _r) bus_read_4((_sc)->mem_res_ipfs, (_r))
#define FPCI_WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res_fpci, (_r), (_v))
#define FPCI_RD4(_sc, _r) bus_read_4((_sc)->mem_res_fpci, (_r))
#define LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define SLEEP(_sc, timeout) \
mtx_sleep(sc, &sc->mtx, 0, "tegra_xhci", timeout);
#define LOCK_INIT(_sc) \
mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_xhci", MTX_DEF)
#define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx)
#define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED)
#define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED)
struct tegra_xusb_fw_hdr {
uint32_t boot_loadaddr_in_imem;
uint32_t boot_codedfi_offset;
uint32_t boot_codetag;
uint32_t boot_codesize;
uint32_t phys_memaddr;
uint16_t reqphys_memsize;
uint16_t alloc_phys_memsize;
uint32_t rodata_img_offset;
uint32_t rodata_section_start;
uint32_t rodata_section_end;
uint32_t main_fnaddr;
uint32_t fwimg_cksum;
uint32_t fwimg_created_time;
uint32_t imem_resident_start;
uint32_t imem_resident_end;
uint32_t idirect_start;
uint32_t idirect_end;
uint32_t l2_imem_start;
uint32_t l2_imem_end;
uint32_t version_id;
uint8_t init_ddirect;
uint8_t reserved[3];
uint32_t phys_addr_log_buffer;
uint32_t total_log_entries;
uint32_t dequeue_ptr;
uint32_t dummy[2];
uint32_t fwimg_len;
uint8_t magic[8];
uint32_t ss_low_power_entry_timeout;
uint8_t num_hsic_port;
uint8_t ss_portmap;
uint8_t build;
uint8_t padding[137]; /* Pad to 256 bytes */
};
struct xhci_soc;
struct tegra_xhci_softc {
struct xhci_softc xhci_softc;
device_t dev;
struct xhci_soc *soc;
struct mtx mtx;
struct resource *mem_res_fpci;
struct resource *mem_res_ipfs;
struct resource *irq_res_mbox;
void *irq_hdl_mbox;
clk_t clk_xusb_host;
clk_t clk_xusb_gate;
clk_t clk_xusb_falcon_src;
clk_t clk_xusb_ss;
clk_t clk_xusb_hs_src;
clk_t clk_xusb_fs_src;
hwreset_t hwreset_xusb_host;
hwreset_t hwreset_xusb_ss;
regulator_t regulators[16]; /* Safe maximum */
phy_t phys[8]; /* Safe maximum */
struct intr_config_hook irq_hook;
bool xhci_inited;
void *fw_vaddr;
vm_size_t fw_size;
};
struct xhci_soc {
char *fw_name;
char **regulator_names;
char **phy_names;
};
/* Tegra 124 config */
static char *tegra124_reg_names[] = {
"avddio-pex-supply",
"dvddio-pex-supply",
"avdd-usb-supply",
"avdd-pll-utmip-supply",
"avdd-pll-erefe-supply",
"avdd-usb-ss-pll-supply",
"hvdd-usb-ss-supply",
"hvdd-usb-ss-pll-e-supply",
NULL
};
static char *tegra124_phy_names[] = {
"usb2-0",
"usb2-1",
"usb2-2",
"usb3-0",
NULL
};
static struct xhci_soc tegra124_soc =
{
.fw_name = "tegra124_xusb_fw",
.regulator_names = tegra124_reg_names,
.phy_names = tegra124_phy_names,
};
/* Tegra 210 config */
static char *tegra210_reg_names[] = {
"dvddio-pex-supply",
"hvddio-pex-supply",
"avdd-usb-supply",
"avdd-pll-utmip-supply",
"avdd-pll-uerefe-supply",
"dvdd-usb-ss-pll-supply",
"hvdd-usb-ss-pll-e-supply",
NULL
};
static char *tegra210_phy_names[] = {
"usb2-0",
"usb2-1",
"usb2-2",
"usb2-3",
"usb3-0",
"usb3-1",
NULL
};
static struct xhci_soc tegra210_soc =
{
.fw_name = "tegra210_xusb_fw",
.regulator_names = tegra210_reg_names,
.phy_names = tegra210_phy_names,
};
/* Compatible devices. */
static struct ofw_compat_data compat_data[] = {
{"nvidia,tegra124-xusb", (uintptr_t)&tegra124_soc},
{"nvidia,tegra210-xusb", (uintptr_t)&tegra210_soc},
{NULL, 0}
};
static uint32_t
CSB_RD4(struct tegra_xhci_softc *sc, uint32_t addr)
{
FPCI_WR4(sc, XUSB_CFG_ARU_C11_CSBRANGE, ARU_C11_CSBRANGE_PAGE(addr));
return (FPCI_RD4(sc, ARU_C11_CSBRANGE_ADDR(addr)));
}
static void
CSB_WR4(struct tegra_xhci_softc *sc, uint32_t addr, uint32_t val)
{
FPCI_WR4(sc, XUSB_CFG_ARU_C11_CSBRANGE, ARU_C11_CSBRANGE_PAGE(addr));
FPCI_WR4(sc, ARU_C11_CSBRANGE_ADDR(addr), val);
}
static int
get_fdt_resources(struct tegra_xhci_softc *sc, phandle_t node)
{
int i, rv;
/* Regulators. */
for (i = 0; sc->soc->regulator_names[i] != NULL; i++) {
if (i >= nitems(sc->regulators)) {
device_printf(sc->dev,
"Too many regulators present in DT.\n");
return (EOVERFLOW);
}
rv = regulator_get_by_ofw_property(sc->dev, 0,
sc->soc->regulator_names[i], sc->regulators + i);
if (rv != 0) {
device_printf(sc->dev,
"Cannot get '%s' regulator\n",
sc->soc->regulator_names[i]);
return (ENXIO);
}
}
rv = hwreset_get_by_ofw_name(sc->dev, 0, "xusb_host",
&sc->hwreset_xusb_host);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_host' reset\n");
return (ENXIO);
}
rv = hwreset_get_by_ofw_name(sc->dev, 0, "xusb_ss",
&sc->hwreset_xusb_ss);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_ss' reset\n");
return (ENXIO);
}
/* Phys. */
for (i = 0; sc->soc->phy_names[i] != NULL; i++) {
if (i >= nitems(sc->phys)) {
device_printf(sc->dev,
"Too many phys present in DT.\n");
return (EOVERFLOW);
}
rv = phy_get_by_ofw_name(sc->dev, 0, sc->soc->phy_names[i],
sc->phys + i);
if (rv != 0 && rv != ENOENT) {
device_printf(sc->dev, "Cannot get '%s' phy.\n",
sc->soc->phy_names[i]);
return (ENXIO);
}
}
rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_host",
&sc->clk_xusb_host);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_host' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_falcon_src",
&sc->clk_xusb_falcon_src);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_falcon_src' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_ss",
&sc->clk_xusb_ss);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_ss' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_hs_src",
&sc->clk_xusb_hs_src);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_hs_src' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "xusb_fs_src",
&sc->clk_xusb_fs_src);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_fs_src' clock\n");
return (ENXIO);
}
/* Clock xusb_gate is missing in mainstream DT */
rv = clk_get_by_name(sc->dev, "xusb_gate", &sc->clk_xusb_gate);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'xusb_gate' clock\n");
return (ENXIO);
}
return (0);
}
static int
enable_fdt_resources(struct tegra_xhci_softc *sc)
{
int i, rv;
rv = hwreset_assert(sc->hwreset_xusb_host);
if (rv != 0) {
device_printf(sc->dev, "Cannot reset 'xusb_host' reset\n");
return (rv);
}
rv = hwreset_assert(sc->hwreset_xusb_ss);
if (rv != 0) {
device_printf(sc->dev, "Cannot reset 'xusb_ss' reset\n");
return (rv);
}
/* Regulators. */
for (i = 0; i < nitems(sc->regulators); i++) {
if (sc->regulators[i] == NULL)
continue;
rv = regulator_enable(sc->regulators[i]);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable '%s' regulator\n",
sc->soc->regulator_names[i]);
return (rv);
}
}
/* Power off XUSB host and XUSB SS domains. */
rv = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
if (rv != 0) {
device_printf(sc->dev, "Cannot powerdown 'xusba' domain\n");
return (rv);
}
rv = tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
if (rv != 0) {
device_printf(sc->dev, "Cannot powerdown 'xusbc' domain\n");
return (rv);
}
/* Setup XUSB ss_src clock first */
clk_set_freq(sc->clk_xusb_ss, TEGRA_XHCI_SS_HIGH_SPEED, 0);
if (rv != 0)
return (rv);
/* The XUSB gate clock must be enabled before XUSBA can be powered. */
rv = clk_enable(sc->clk_xusb_gate);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable 'xusb_gate' clock\n");
return (rv);
}
/* Power on XUSB host and XUSB SS domains. */
rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
sc->clk_xusb_host, sc->hwreset_xusb_host);
if (rv != 0) {
device_printf(sc->dev, "Cannot powerup 'xusbc' domain\n");
return (rv);
}
rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
sc->clk_xusb_ss, sc->hwreset_xusb_ss);
if (rv != 0) {
device_printf(sc->dev, "Cannot powerup 'xusba' domain\n");
return (rv);
}
/* Enable rest of clocks */
rv = clk_enable(sc->clk_xusb_falcon_src);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable 'xusb_falcon_src' clock\n");
return (rv);
}
rv = clk_enable(sc->clk_xusb_fs_src);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable 'xusb_fs_src' clock\n");
return (rv);
}
rv = clk_enable(sc->clk_xusb_hs_src);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable 'xusb_hs_src' clock\n");
return (rv);
}
/* Phys. */
for (i = 0; i < nitems(sc->phys); i++) {
if (sc->phys[i] == NULL)
continue;
rv = phy_enable(sc->phys[i]);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable '%s' phy\n",
sc->soc->phy_names[i]);
return (rv);
}
}
return (0);
}
/* Respond by ACK/NAK back to FW */
static void
mbox_send_ack(struct tegra_xhci_softc *sc, uint32_t cmd, uint32_t data)
{
uint32_t reg;
reg = ARU_MAILBOX_DATA_IN_TYPE(cmd) | ARU_MAILBOX_DATA_IN_DATA(data);
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_IN, reg);
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD);
reg |= ARU_MAILBOX_CMD_DEST_FALC | ARU_MAILBOX_CMD_INT_EN;
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg);
}
/* Sent command to FW */
static int
mbox_send_cmd(struct tegra_xhci_softc *sc, uint32_t cmd, uint32_t data)
{
uint32_t reg;
int i;
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER);
if (reg != ARU_MAILBOX_OWNER_NONE) {
device_printf(sc->dev,
"CPU mailbox is busy: 0x%08X\n", reg);
return (EBUSY);
}
/* XXX Is this right? Retry loop? Wait before send? */
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER, ARU_MAILBOX_OWNER_SW);
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER);
if (reg != ARU_MAILBOX_OWNER_SW) {
device_printf(sc->dev,
"Cannot acquire CPU mailbox: 0x%08X\n", reg);
return (EBUSY);
}
reg = ARU_MAILBOX_DATA_IN_TYPE(cmd) | ARU_MAILBOX_DATA_IN_DATA(data);
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_IN, reg);
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD);
reg |= ARU_MAILBOX_CMD_DEST_FALC | ARU_MAILBOX_CMD_INT_EN;
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg);
for (i = 250; i > 0; i--) {
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER);
if (reg == ARU_MAILBOX_OWNER_NONE)
break;
DELAY(100);
}
if (i <= 0) {
device_printf(sc->dev,
"Command response timeout: 0x%08X\n", reg);
return (ETIMEDOUT);
}
return(0);
}
static void
process_msg(struct tegra_xhci_softc *sc, uint32_t req_cmd, uint32_t req_data,
uint32_t *resp_cmd, uint32_t *resp_data)
{
uint64_t freq;
int rv;
/* In most cases, data are echoed back. */
*resp_data = req_data;
switch (req_cmd) {
case MBOX_CMD_INC_FALC_CLOCK:
case MBOX_CMD_DEC_FALC_CLOCK:
rv = clk_set_freq(sc->clk_xusb_falcon_src, req_data * 1000ULL,
0);
if (rv == 0) {
rv = clk_get_freq(sc->clk_xusb_falcon_src, &freq);
*resp_data = (uint32_t)(freq / 1000);
}
*resp_cmd = rv == 0 ? MBOX_CMD_ACK: MBOX_CMD_NAK;
break;
case MBOX_CMD_INC_SSPI_CLOCK:
case MBOX_CMD_DEC_SSPI_CLOCK:
rv = clk_set_freq(sc->clk_xusb_ss, req_data * 1000ULL,
0);
if (rv == 0) {
rv = clk_get_freq(sc->clk_xusb_ss, &freq);
*resp_data = (uint32_t)(freq / 1000);
}
*resp_cmd = rv == 0 ? MBOX_CMD_ACK: MBOX_CMD_NAK;
break;
case MBOX_CMD_SET_BW:
/* No respense is expected. */
*resp_cmd = 0;
break;
case MBOX_CMD_SET_SS_PWR_GATING:
case MBOX_CMD_SET_SS_PWR_UNGATING:
*resp_cmd = MBOX_CMD_NAK;
break;
case MBOX_CMD_SAVE_DFE_CTLE_CTX:
/* Not implemented yet. */
*resp_cmd = MBOX_CMD_ACK;
break;
case MBOX_CMD_START_HSIC_IDLE:
case MBOX_CMD_STOP_HSIC_IDLE:
/* Not implemented yet. */
*resp_cmd = MBOX_CMD_NAK;
break;
case MBOX_CMD_DISABLE_SS_LFPS_DETECTION:
case MBOX_CMD_ENABLE_SS_LFPS_DETECTION:
/* Not implemented yet. */
*resp_cmd = MBOX_CMD_NAK;
break;
case MBOX_CMD_AIRPLANE_MODE_ENABLED:
case MBOX_CMD_AIRPLANE_MODE_DISABLED:
case MBOX_CMD_DBC_WAKE_STACK:
case MBOX_CMD_HSIC_PRETEND_CONNECT:
case MBOX_CMD_RESET_SSPI:
device_printf(sc->dev,
"Received unused/unexpected command: %u\n", req_cmd);
*resp_cmd = 0;
break;
default:
device_printf(sc->dev,
"Received unknown command: %u\n", req_cmd);
}
}
static void
intr_mbox(void *arg)
{
struct tegra_xhci_softc *sc;
uint32_t reg, msg, resp_cmd, resp_data;
sc = (struct tegra_xhci_softc *)arg;
/* Clear interrupt first */
reg = FPCI_RD4(sc, XUSB_CFG_ARU_SMI_INTR);
FPCI_WR4(sc, XUSB_CFG_ARU_SMI_INTR, reg);
if (reg & ARU_SMI_INTR_FW_HANG) {
device_printf(sc->dev,
"XUSB CPU firmware hang!!! CPUCTL: 0x%08X\n",
CSB_RD4(sc, XUSB_FALCON_CPUCTL));
}
msg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_DATA_OUT);
resp_cmd = 0;
process_msg(sc, ARU_MAILBOX_DATA_OUT_TYPE(msg),
ARU_MAILBOX_DATA_OUT_DATA(msg), &resp_cmd, &resp_data);
if (resp_cmd != 0)
mbox_send_ack(sc, resp_cmd, resp_data);
else
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_OWNER,
ARU_MAILBOX_OWNER_NONE);
reg = FPCI_RD4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD);
reg &= ~ARU_MAILBOX_CMD_DEST_SMI;
FPCI_WR4(sc, T_XUSB_CFG_ARU_MAILBOX_CMD, reg);
}
static int
load_fw(struct tegra_xhci_softc *sc)
{
const struct firmware *fw;
const struct tegra_xusb_fw_hdr *fw_hdr;
vm_paddr_t fw_paddr, fw_base;
void *fw_vaddr;
vm_size_t fw_size;
uint32_t code_tags, code_size;
struct clocktime fw_clock;
struct timespec fw_timespec;
int i;
/* Reset ARU */
FPCI_WR4(sc, XUSB_CFG_ARU_RST, ARU_RST_RESET);
DELAY(3000);
/* Check if FALCON already runs */
if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO) != 0) {
device_printf(sc->dev,
"XUSB CPU is already loaded, CPUCTL: 0x%08X\n",
CSB_RD4(sc, XUSB_FALCON_CPUCTL));
return (0);
}
fw = firmware_get(sc->soc->fw_name);
if (fw == NULL) {
device_printf(sc->dev, "Cannot read xusb firmware\n");
return (ENOENT);
}
/* Allocate uncached memory and copy firmware into. */
fw_hdr = (const struct tegra_xusb_fw_hdr *)fw->data;
fw_size = fw_hdr->fwimg_len;
fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0,
VM_MEMATTR_UNCACHEABLE);
fw_paddr = vtophys((uintptr_t)fw_vaddr);
fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr;
memcpy(fw_vaddr, fw->data, fw_size);
firmware_put(fw, FIRMWARE_UNLOAD);
sc->fw_vaddr = fw_vaddr;
sc->fw_size = fw_size;
/* Setup firmware physical address and size. */
fw_base = fw_paddr + sizeof(*fw_hdr);
CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_ATTR, fw_size);
CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_LO, fw_base & 0xFFFFFFFF);
CSB_WR4(sc, XUSB_CSB_MEMPOOL_ILOAD_BASE_HI, (uint64_t)fw_base >> 32);
CSB_WR4(sc, XUSB_CSB_MEMPOOL_APMAP, APMAP_BOOTPATH);
/* Invalidate full L2IMEM context. */
CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG,
L2IMEMOP_INVALIDATE_ALL);
/* Program load of L2IMEM by boot code. */
code_tags = howmany(fw_hdr->boot_codetag, XUSB_CSB_IMEM_BLOCK_SIZE);
code_size = howmany(fw_hdr->boot_codesize, XUSB_CSB_IMEM_BLOCK_SIZE);
CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_SIZE,
L2IMEMOP_SIZE_OFFSET(code_tags) |
L2IMEMOP_SIZE_SIZE(code_size));
/* Execute L2IMEM boot code fetch. */
CSB_WR4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_TRIG,
L2IMEMOP_LOAD_LOCKED_RESULT);
/* Program FALCON auto-fill range and block count */
CSB_WR4(sc, XUSB_FALCON_IMFILLCTL, code_size);
CSB_WR4(sc, XUSB_FALCON_IMFILLRNG1,
IMFILLRNG1_TAG_LO(code_tags) |
IMFILLRNG1_TAG_HI(code_tags + code_size));
CSB_WR4(sc, XUSB_FALCON_DMACTL, 0);
/* Wait for CPU */
for (i = 500; i > 0; i--) {
if (CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT) &
L2IMEMOP_RESULT_VLD)
break;
DELAY(100);
}
if (i <= 0) {
device_printf(sc->dev, "Timedout while wating for DMA, "
"state: 0x%08X\n",
CSB_RD4(sc, XUSB_CSB_MEMPOOL_L2IMEMOP_RESULT));
return (ETIMEDOUT);
}
/* Boot FALCON cpu */
CSB_WR4(sc, XUSB_FALCON_BOOTVEC, fw_hdr->boot_codetag);
CSB_WR4(sc, XUSB_FALCON_CPUCTL, CPUCTL_STARTCPU);
/* Wait for CPU */
for (i = 50; i > 0; i--) {
if (CSB_RD4(sc, XUSB_FALCON_CPUCTL) == CPUCTL_STOPPED)
break;
DELAY(100);
}
if (i <= 0) {
device_printf(sc->dev, "Timedout while wating for FALCON cpu, "
"state: 0x%08X\n", CSB_RD4(sc, XUSB_FALCON_CPUCTL));
return (ETIMEDOUT);
}
fw_timespec.tv_sec = fw_hdr->fwimg_created_time;
fw_timespec.tv_nsec = 0;
clock_ts_to_ct(&fw_timespec, &fw_clock);
device_printf(sc->dev,
" Falcon firmware version: %02X.%02X.%04X,"
" (%d/%d/%d %d:%02d:%02d UTC)\n",
(fw_hdr->version_id >> 24) & 0xFF,(fw_hdr->version_id >> 15) & 0xFF,
fw_hdr->version_id & 0xFFFF,
fw_clock.day, fw_clock.mon, fw_clock.year,
fw_clock.hour, fw_clock.min, fw_clock.sec);
return (0);
}
static int
init_hw(struct tegra_xhci_softc *sc)
{
int rv;
uint32_t reg;
rman_res_t base_addr;
base_addr = rman_get_start(sc->xhci_softc.sc_io_res);
/* Enable FPCI access */
reg = IPFS_RD4(sc, XUSB_HOST_CONFIGURATION);
reg |= CONFIGURATION_EN_FPCI;
IPFS_WR4(sc, XUSB_HOST_CONFIGURATION, reg);
IPFS_RD4(sc, XUSB_HOST_CONFIGURATION);
/* Program bar for XHCI base address */
reg = FPCI_RD4(sc, T_XUSB_CFG_4);
reg &= ~CFG_4_BASE_ADDRESS(~0);
reg |= CFG_4_BASE_ADDRESS((uint32_t)base_addr >> 15);
FPCI_WR4(sc, T_XUSB_CFG_4, reg);
FPCI_WR4(sc, T_XUSB_CFG_5, (uint32_t)((uint64_t)(base_addr) >> 32));
/* Enable bus master */
reg = FPCI_RD4(sc, T_XUSB_CFG_1);
reg |= CFG_1_IO_SPACE;
reg |= CFG_1_MEMORY_SPACE;
reg |= CFG_1_BUS_MASTER;
FPCI_WR4(sc, T_XUSB_CFG_1, reg);
/* Enable Interrupts */
reg = IPFS_RD4(sc, XUSB_HOST_INTR_MASK);
reg |= INTR_IP_INT_MASK;
IPFS_WR4(sc, XUSB_HOST_INTR_MASK, reg);
/* Set hysteresis */
IPFS_WR4(sc, XUSB_HOST_CLKGATE_HYSTERESIS, 128);
rv = load_fw(sc);
if (rv != 0)
return rv;
return (0);
}
static int
tegra_xhci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) {
device_set_desc(dev, "Nvidia Tegra XHCI controller");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
tegra_xhci_detach(device_t dev)
{
struct tegra_xhci_softc *sc;
struct xhci_softc *xsc;
+ int error;
sc = device_get_softc(dev);
xsc = &sc->xhci_softc;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
if (sc->xhci_inited) {
usb_callout_drain(&xsc->sc_callout);
xhci_halt_controller(xsc);
}
if (xsc->sc_irq_res && xsc->sc_intr_hdl) {
bus_teardown_intr(dev, xsc->sc_irq_res, xsc->sc_intr_hdl);
xsc->sc_intr_hdl = NULL;
}
if (xsc->sc_irq_res) {
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(xsc->sc_irq_res), xsc->sc_irq_res);
xsc->sc_irq_res = NULL;
}
if (xsc->sc_io_res != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(xsc->sc_io_res), xsc->sc_io_res);
xsc->sc_io_res = NULL;
}
if (sc->xhci_inited)
xhci_uninit(xsc);
if (sc->irq_hdl_mbox != NULL)
bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox);
if (sc->fw_vaddr != NULL)
kmem_free(sc->fw_vaddr, sc->fw_size);
LOCK_DESTROY(sc);
return (0);
}
static int
tegra_xhci_attach(device_t dev)
{
struct tegra_xhci_softc *sc;
struct xhci_softc *xsc;
int rv, rid;
phandle_t node;
sc = device_get_softc(dev);
sc->dev = dev;
sc->soc = (struct xhci_soc *)ofw_bus_search_compatible(dev,
compat_data)->ocd_data;
node = ofw_bus_get_node(dev);
xsc = &sc->xhci_softc;
LOCK_INIT(sc);
rv = get_fdt_resources(sc, node);
if (rv != 0) {
rv = ENXIO;
goto error;
}
rv = enable_fdt_resources(sc);
if (rv != 0) {
rv = ENXIO;
goto error;
}
/* Allocate resources. */
rid = 0;
xsc->sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (xsc->sc_io_res == NULL) {
device_printf(dev,
"Could not allocate HCD memory resources\n");
rv = ENXIO;
goto error;
}
rid = 1;
sc->mem_res_fpci = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res_fpci == NULL) {
device_printf(dev,
"Could not allocate FPCI memory resources\n");
rv = ENXIO;
goto error;
}
rid = 2;
sc->mem_res_ipfs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res_ipfs == NULL) {
device_printf(dev,
"Could not allocate IPFS memory resources\n");
rv = ENXIO;
goto error;
}
rid = 0;
xsc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (xsc->sc_irq_res == NULL) {
device_printf(dev, "Could not allocate HCD IRQ resources\n");
rv = ENXIO;
goto error;
}
rid = 1;
sc->irq_res_mbox = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->irq_res_mbox == NULL) {
device_printf(dev, "Could not allocate MBOX IRQ resources\n");
rv = ENXIO;
goto error;
}
rv = init_hw(sc);
if (rv != 0) {
device_printf(dev, "Could not initialize XUSB hardware\n");
goto error;
}
/* Wakeup and enable firmaware */
rv = mbox_send_cmd(sc, MBOX_CMD_MSG_ENABLED, 0);
if (rv != 0) {
device_printf(sc->dev, "Could not enable XUSB firmware\n");
goto error;
}
/* Fill data for XHCI driver. */
xsc->sc_bus.parent = dev;
xsc->sc_bus.devices = xsc->sc_devices;
xsc->sc_bus.devices_max = XHCI_MAX_DEVICES;
xsc->sc_io_tag = rman_get_bustag(xsc->sc_io_res);
xsc->sc_io_hdl = rman_get_bushandle(xsc->sc_io_res);
xsc->sc_io_size = rman_get_size(xsc->sc_io_res);
strlcpy(xsc->sc_vendor, "Nvidia", sizeof(xsc->sc_vendor));
/* Add USB bus device. */
xsc->sc_bus.bdev = device_add_child(sc->dev, "usbus", DEVICE_UNIT_ANY);
if (xsc->sc_bus.bdev == NULL) {
device_printf(sc->dev, "Could not add USB device\n");
rv = ENXIO;
goto error;
}
device_set_ivars(xsc->sc_bus.bdev, &xsc->sc_bus);
device_set_desc(xsc->sc_bus.bdev, "Nvidia USB 3.0 controller");
rv = xhci_init(xsc, sc->dev, 1);
if (rv != 0) {
device_printf(sc->dev, "USB init failed: %d\n", rv);
goto error;
}
sc->xhci_inited = true;
rv = xhci_start_controller(xsc);
if (rv != 0) {
device_printf(sc->dev,
"Could not start XHCI controller: %d\n", rv);
goto error;
}
rv = bus_setup_intr(dev, sc->irq_res_mbox, INTR_TYPE_MISC | INTR_MPSAFE,
NULL, intr_mbox, sc, &sc->irq_hdl_mbox);
if (rv != 0) {
device_printf(dev, "Could not setup error IRQ: %d\n",rv);
xsc->sc_intr_hdl = NULL;
goto error;
}
rv = bus_setup_intr(dev, xsc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)xhci_interrupt, xsc, &xsc->sc_intr_hdl);
if (rv != 0) {
device_printf(dev, "Could not setup error IRQ: %d\n",rv);
xsc->sc_intr_hdl = NULL;
goto error;
}
/* Probe the bus. */
rv = device_probe_and_attach(xsc->sc_bus.bdev);
if (rv != 0) {
device_printf(sc->dev, "Could not initialize USB: %d\n", rv);
goto error;
}
return (0);
error:
panic("XXXXX");
tegra_xhci_detach(dev);
return (rv);
}
static device_method_t xhci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tegra_xhci_probe),
DEVMETHOD(device_attach, tegra_xhci_attach),
DEVMETHOD(device_detach, tegra_xhci_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD_END
};
static DEFINE_CLASS_0(xhci, xhci_driver, xhci_methods,
sizeof(struct tegra_xhci_softc));
DRIVER_MODULE(tegra_xhci, simplebus, xhci_driver, NULL, NULL);
MODULE_DEPEND(tegra_xhci, usb, 1, 1, 1);
diff --git a/sys/arm/ti/am335x/am335x_musb.c b/sys/arm/ti/am335x/am335x_musb.c
index 147602c4dbd3..24a204e42c9c 100644
--- a/sys/arm/ti/am335x/am335x_musb.c
+++ b/sys/arm/ti/am335x/am335x_musb.c
@@ -1,457 +1,460 @@
/*-
* Copyright (c) 2013 Oleksandr Tymoshenko
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define USB_DEBUG_VAR usbssdebug
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "syscon_if.h"
#define USBCTRL_REV 0x00
#define USBCTRL_CTRL 0x14
#define USBCTRL_STAT 0x18
#define USBCTRL_IRQ_STAT0 0x30
#define IRQ_STAT0_RXSHIFT 16
#define IRQ_STAT0_TXSHIFT 0
#define USBCTRL_IRQ_STAT1 0x34
#define IRQ_STAT1_DRVVBUS (1 << 8)
#define USBCTRL_INTEN_SET0 0x38
#define USBCTRL_INTEN_SET1 0x3C
#define USBCTRL_INTEN_USB_ALL 0x1ff
#define USBCTRL_INTEN_USB_SOF (1 << 3)
#define USBCTRL_INTEN_CLR0 0x40
#define USBCTRL_INTEN_CLR1 0x44
#define USBCTRL_UTMI 0xE0
#define USBCTRL_UTMI_FSDATAEXT (1 << 1)
#define USBCTRL_MODE 0xE8
#define USBCTRL_MODE_IDDIG (1 << 8)
#define USBCTRL_MODE_IDDIGMUX (1 << 7)
/* USBSS resource + 2 MUSB ports */
#define RES_USBCORE 0
#define RES_USBCTRL 1
#define USB_WRITE4(sc, idx, reg, val) do { \
bus_write_4((sc)->sc_mem_res[idx], (reg), (val)); \
} while (0)
#define USB_READ4(sc, idx, reg) bus_read_4((sc)->sc_mem_res[idx], (reg))
#define USBCTRL_WRITE4(sc, reg, val) \
USB_WRITE4((sc), RES_USBCTRL, (reg), (val))
#define USBCTRL_READ4(sc, reg) \
USB_READ4((sc), RES_USBCTRL, (reg))
static struct resource_spec am335x_musbotg_mem_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ SYS_RES_MEMORY, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
#ifdef USB_DEBUG
static int usbssdebug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, am335x_usbss,
CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"AM335x USBSS");
SYSCTL_INT(_hw_usb_am335x_usbss, OID_AUTO, debug, CTLFLAG_RW,
&usbssdebug, 0, "Debug level");
#endif
static device_probe_t musbotg_probe;
static device_attach_t musbotg_attach;
static device_detach_t musbotg_detach;
struct musbotg_super_softc {
struct musbotg_softc sc_otg;
struct resource *sc_mem_res[2];
int sc_irq_rid;
struct syscon *syscon;
};
static void
musbotg_vbus_poll(struct musbotg_super_softc *sc)
{
uint32_t stat;
if (sc->sc_otg.sc_mode == MUSB2_DEVICE_MODE)
musbotg_vbus_interrupt(&sc->sc_otg, 1);
else {
stat = USBCTRL_READ4(sc, USBCTRL_STAT);
musbotg_vbus_interrupt(&sc->sc_otg, stat & 1);
}
}
/*
* Arg to musbotg_clocks_on and musbot_clocks_off is
* a uint32_t * pointing to the SCM register offset.
*/
static uint32_t USB_CTRL[] = {SCM_USB_CTRL0, SCM_USB_CTRL1};
static void
musbotg_clocks_on(void *arg)
{
struct musbotg_softc *sc;
struct musbotg_super_softc *ssc;
uint32_t reg;
sc = arg;
ssc = sc->sc_platform_data;
reg = SYSCON_READ_4(ssc->syscon, USB_CTRL[sc->sc_id]);
reg &= ~3; /* Enable power */
reg |= 1 << 19; /* VBUS detect enable */
reg |= 1 << 20; /* Session end enable */
SYSCON_WRITE_4(ssc->syscon, USB_CTRL[sc->sc_id], reg);
}
static void
musbotg_clocks_off(void *arg)
{
struct musbotg_softc *sc;
struct musbotg_super_softc *ssc;
uint32_t reg;
sc = arg;
ssc = sc->sc_platform_data;
/* Disable power to PHY */
reg = SYSCON_READ_4(ssc->syscon, USB_CTRL[sc->sc_id]);
SYSCON_WRITE_4(ssc->syscon, USB_CTRL[sc->sc_id], reg | 3);
}
static void
musbotg_ep_int_set(struct musbotg_softc *sc, int ep, int on)
{
struct musbotg_super_softc *ssc = sc->sc_platform_data;
uint32_t epmask;
epmask = ((1 << ep) << IRQ_STAT0_RXSHIFT);
epmask |= ((1 << ep) << IRQ_STAT0_TXSHIFT);
if (on)
USBCTRL_WRITE4(ssc, USBCTRL_INTEN_SET0, epmask);
else
USBCTRL_WRITE4(ssc, USBCTRL_INTEN_CLR0, epmask);
}
static void
musbotg_wrapper_interrupt(void *arg)
{
struct musbotg_softc *sc = arg;
struct musbotg_super_softc *ssc = sc->sc_platform_data;
uint32_t stat, stat0, stat1;
stat = USBCTRL_READ4(ssc, USBCTRL_STAT);
stat0 = USBCTRL_READ4(ssc, USBCTRL_IRQ_STAT0);
stat1 = USBCTRL_READ4(ssc, USBCTRL_IRQ_STAT1);
if (stat0)
USBCTRL_WRITE4(ssc, USBCTRL_IRQ_STAT0, stat0);
if (stat1)
USBCTRL_WRITE4(ssc, USBCTRL_IRQ_STAT1, stat1);
DPRINTFN(4, "port%d: stat0=%08x stat1=%08x, stat=%08x\n",
sc->sc_id, stat0, stat1, stat);
if (stat1 & IRQ_STAT1_DRVVBUS)
musbotg_vbus_interrupt(sc, stat & 1);
musbotg_interrupt(arg, ((stat0 >> 16) & 0xffff),
stat0 & 0xffff, stat1 & 0xff);
}
static int
musbotg_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ti,musb-am33xx"))
return (ENXIO);
device_set_desc(dev, "TI AM33xx integrated USB OTG controller");
return (BUS_PROBE_DEFAULT);
}
static int
musbotg_attach(device_t dev)
{
struct musbotg_super_softc *sc = device_get_softc(dev);
char mode[16];
int err;
uint32_t reg;
phandle_t opp_table;
clk_t clk_usbotg_fck;
sc->sc_otg.sc_id = device_get_unit(dev);
/* FIXME: The devicetree needs to be updated to get a handle to the gate
* usbotg_fck@47c. see TRM 8.1.12.2 CM_WKUP CM_CLKDCOLDO_DPLL_PER.
*/
err = clk_get_by_name(dev, "usbotg_fck@47c", &clk_usbotg_fck);
if (err) {
device_printf(dev, "Can not find usbotg_fck@47c\n");
return (ENXIO);
}
err = clk_enable(clk_usbotg_fck);
if (err) {
device_printf(dev, "Can not enable usbotg_fck@47c\n");
return (ENXIO);
}
/* FIXME: For now; Go and kidnap syscon from opp-table */
opp_table = OF_finddevice("/opp-table");
if (opp_table == -1) {
device_printf(dev, "Cant find /opp-table\n");
return (ENXIO);
}
if (!OF_hasprop(opp_table, "syscon")) {
device_printf(dev, "/opp-table missing syscon property\n");
return (ENXIO);
}
err = syscon_get_by_ofw_property(dev, opp_table, "syscon", &sc->syscon);
if (err) {
device_printf(dev, "Failed to get syscon\n");
return (ENXIO);
}
/* Request the memory resources */
err = bus_alloc_resources(dev, am335x_musbotg_mem_spec,
sc->sc_mem_res);
if (err) {
device_printf(dev,
"Error: could not allocate mem resources\n");
return (ENXIO);
}
/* Request the IRQ resources */
sc->sc_otg.sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&sc->sc_irq_rid, RF_ACTIVE);
if (sc->sc_otg.sc_irq_res == NULL) {
device_printf(dev,
"Error: could not allocate irq resources\n");
return (ENXIO);
}
/* setup MUSB OTG USB controller interface softc */
sc->sc_otg.sc_clocks_on = &musbotg_clocks_on;
sc->sc_otg.sc_clocks_off = &musbotg_clocks_off;
sc->sc_otg.sc_clocks_arg = &sc->sc_otg;
sc->sc_otg.sc_ep_int_set = musbotg_ep_int_set;
/* initialise some bus fields */
sc->sc_otg.sc_bus.parent = dev;
sc->sc_otg.sc_bus.devices = sc->sc_otg.sc_devices;
sc->sc_otg.sc_bus.devices_max = MUSB2_MAX_DEVICES;
sc->sc_otg.sc_bus.dma_bits = 32;
/* get all DMA memory */
if (usb_bus_mem_alloc_all(&sc->sc_otg.sc_bus,
USB_GET_DMA_TAG(dev), NULL)) {
device_printf(dev,
"Failed allocate bus mem for musb\n");
return (ENOMEM);
}
sc->sc_otg.sc_io_res = sc->sc_mem_res[RES_USBCORE];
sc->sc_otg.sc_io_tag =
rman_get_bustag(sc->sc_otg.sc_io_res);
sc->sc_otg.sc_io_hdl =
rman_get_bushandle(sc->sc_otg.sc_io_res);
sc->sc_otg.sc_io_size =
rman_get_size(sc->sc_otg.sc_io_res);
sc->sc_otg.sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (!(sc->sc_otg.sc_bus.bdev)) {
device_printf(dev, "No busdev for musb\n");
goto error;
}
device_set_ivars(sc->sc_otg.sc_bus.bdev,
&sc->sc_otg.sc_bus);
err = bus_setup_intr(dev, sc->sc_otg.sc_irq_res,
INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)musbotg_wrapper_interrupt,
&sc->sc_otg, &sc->sc_otg.sc_intr_hdl);
if (err) {
sc->sc_otg.sc_intr_hdl = NULL;
device_printf(dev,
"Failed to setup interrupt for musb\n");
goto error;
}
sc->sc_otg.sc_platform_data = sc;
if (OF_getprop(ofw_bus_get_node(dev), "dr_mode", mode,
sizeof(mode)) > 0) {
if (strcasecmp(mode, "host") == 0)
sc->sc_otg.sc_mode = MUSB2_HOST_MODE;
else
sc->sc_otg.sc_mode = MUSB2_DEVICE_MODE;
} else {
/* Beaglebone defaults: USB0 device, USB1 HOST. */
if (sc->sc_otg.sc_id == 0)
sc->sc_otg.sc_mode = MUSB2_DEVICE_MODE;
else
sc->sc_otg.sc_mode = MUSB2_HOST_MODE;
}
/*
* software-controlled function
*/
if (sc->sc_otg.sc_mode == MUSB2_HOST_MODE) {
reg = USBCTRL_READ4(sc, USBCTRL_MODE);
reg |= USBCTRL_MODE_IDDIGMUX;
reg &= ~USBCTRL_MODE_IDDIG;
USBCTRL_WRITE4(sc, USBCTRL_MODE, reg);
USBCTRL_WRITE4(sc, USBCTRL_UTMI,
USBCTRL_UTMI_FSDATAEXT);
} else {
reg = USBCTRL_READ4(sc, USBCTRL_MODE);
reg |= USBCTRL_MODE_IDDIGMUX;
reg |= USBCTRL_MODE_IDDIG;
USBCTRL_WRITE4(sc, USBCTRL_MODE, reg);
}
reg = USBCTRL_INTEN_USB_ALL & ~USBCTRL_INTEN_USB_SOF;
USBCTRL_WRITE4(sc, USBCTRL_INTEN_SET1, reg);
USBCTRL_WRITE4(sc, USBCTRL_INTEN_CLR0, 0xffffffff);
err = musbotg_init(&sc->sc_otg);
if (!err)
err = device_probe_and_attach(sc->sc_otg.sc_bus.bdev);
if (err)
goto error;
/* poll VBUS one time */
musbotg_vbus_poll(sc);
return (0);
error:
musbotg_detach(dev);
return (ENXIO);
}
static int
musbotg_detach(device_t dev)
{
struct musbotg_super_softc *sc = device_get_softc(dev);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (sc->sc_otg.sc_irq_res && sc->sc_otg.sc_intr_hdl) {
/*
* only call musbotg_uninit() after musbotg_init()
*/
musbotg_uninit(&sc->sc_otg);
bus_teardown_intr(dev, sc->sc_otg.sc_irq_res,
sc->sc_otg.sc_intr_hdl);
sc->sc_otg.sc_intr_hdl = NULL;
}
usb_bus_mem_free_all(&sc->sc_otg.sc_bus, NULL);
/* Free resources if any */
if (sc->sc_mem_res[0])
bus_release_resources(dev, am335x_musbotg_mem_spec,
sc->sc_mem_res);
if (sc->sc_otg.sc_irq_res)
bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
sc->sc_otg.sc_irq_res);
return (0);
}
static device_method_t musbotg_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, musbotg_probe),
DEVMETHOD(device_attach, musbotg_attach),
DEVMETHOD(device_detach, musbotg_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD_END
};
static driver_t musbotg_driver = {
.name = "musbotg",
.methods = musbotg_methods,
.size = sizeof(struct musbotg_super_softc),
};
DRIVER_MODULE(musbotg, ti_sysc, musbotg_driver, 0, 0);
MODULE_DEPEND(musbotg, ti_sysc, 1, 1, 1);
MODULE_DEPEND(musbotg, ti_am3359_cppi41, 1, 1, 1);
MODULE_DEPEND(usbss, usb, 1, 1, 1);
diff --git a/sys/arm/ti/usb/omap_ehci.c b/sys/arm/ti/usb/omap_ehci.c
index fee5f662963b..224c786bf9fa 100644
--- a/sys/arm/ti/usb/omap_ehci.c
+++ b/sys/arm/ti/usb/omap_ehci.c
@@ -1,464 +1,466 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2011
* Ben Gray .
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* EHCI */
#define OMAP_USBHOST_HCCAPBASE 0x0000
#define OMAP_USBHOST_HCSPARAMS 0x0004
#define OMAP_USBHOST_HCCPARAMS 0x0008
#define OMAP_USBHOST_USBCMD 0x0010
#define OMAP_USBHOST_USBSTS 0x0014
#define OMAP_USBHOST_USBINTR 0x0018
#define OMAP_USBHOST_FRINDEX 0x001C
#define OMAP_USBHOST_CTRLDSSEGMENT 0x0020
#define OMAP_USBHOST_PERIODICLISTBASE 0x0024
#define OMAP_USBHOST_ASYNCLISTADDR 0x0028
#define OMAP_USBHOST_CONFIGFLAG 0x0050
#define OMAP_USBHOST_PORTSC(i) (0x0054 + (0x04 * (i)))
#define OMAP_USBHOST_INSNREG00 0x0090
#define OMAP_USBHOST_INSNREG01 0x0094
#define OMAP_USBHOST_INSNREG02 0x0098
#define OMAP_USBHOST_INSNREG03 0x009C
#define OMAP_USBHOST_INSNREG04 0x00A0
#define OMAP_USBHOST_INSNREG05_UTMI 0x00A4
#define OMAP_USBHOST_INSNREG05_ULPI 0x00A4
#define OMAP_USBHOST_INSNREG06 0x00A8
#define OMAP_USBHOST_INSNREG07 0x00AC
#define OMAP_USBHOST_INSNREG08 0x00B0
#define OMAP_USBHOST_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
#define OMAP_USBHOST_INSNREG05_ULPI_CONTROL_SHIFT 31
#define OMAP_USBHOST_INSNREG05_ULPI_PORTSEL_SHIFT 24
#define OMAP_USBHOST_INSNREG05_ULPI_OPSEL_SHIFT 22
#define OMAP_USBHOST_INSNREG05_ULPI_REGADD_SHIFT 16
#define OMAP_USBHOST_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define OMAP_USBHOST_INSNREG05_ULPI_WRDATA_SHIFT 0
#define ULPI_FUNC_CTRL_RESET (1 << 5)
/*-------------------------------------------------------------------------*/
/*
* Macros for Set and Clear
* See ULPI 1.1 specification to find the registers with Set and Clear offsets
*/
#define ULPI_SET(a) (a + 1)
#define ULPI_CLR(a) (a + 2)
/*-------------------------------------------------------------------------*/
/*
* Register Map
*/
#define ULPI_VENDOR_ID_LOW 0x00
#define ULPI_VENDOR_ID_HIGH 0x01
#define ULPI_PRODUCT_ID_LOW 0x02
#define ULPI_PRODUCT_ID_HIGH 0x03
#define ULPI_FUNC_CTRL 0x04
#define ULPI_IFC_CTRL 0x07
#define ULPI_OTG_CTRL 0x0a
#define ULPI_USB_INT_EN_RISE 0x0d
#define ULPI_USB_INT_EN_FALL 0x10
#define ULPI_USB_INT_STS 0x13
#define ULPI_USB_INT_LATCH 0x14
#define ULPI_DEBUG 0x15
#define ULPI_SCRATCH 0x16
#define OMAP_EHCI_HC_DEVSTR "TI OMAP USB 2.0 controller"
struct omap_ehci_softc {
ehci_softc_t base; /* storage for EHCI code */
device_t sc_dev;
};
static device_attach_t omap_ehci_attach;
static device_detach_t omap_ehci_detach;
/**
* omap_ehci_read_4 - read a 32-bit value from the EHCI registers
* omap_ehci_write_4 - write a 32-bit value from the EHCI registers
* @sc: omap ehci device context
* @off: byte offset within the register set to read from
* @val: the value to write into the register
*
*
* LOCKING:
* None
*
* RETURNS:
* nothing in case of write function, if read function returns the value read.
*/
static inline uint32_t
omap_ehci_read_4(struct omap_ehci_softc *sc, bus_size_t off)
{
return (bus_read_4(sc->base.sc_io_res, off));
}
static inline void
omap_ehci_write_4(struct omap_ehci_softc *sc, bus_size_t off, uint32_t val)
{
bus_write_4(sc->base.sc_io_res, off, val);
}
/**
* omap_ehci_soft_phy_reset - resets the phy using the reset command
* @isc: omap ehci device context
* @port: port to send the reset over
*
*
* LOCKING:
* none
*
* RETURNS:
* nothing
*/
static void
omap_ehci_soft_phy_reset(struct omap_ehci_softc *isc, unsigned int port)
{
unsigned long timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
uint32_t reg;
reg = ULPI_FUNC_CTRL_RESET
/* FUNCTION_CTRL_SET register */
| (ULPI_SET(ULPI_FUNC_CTRL) << OMAP_USBHOST_INSNREG05_ULPI_REGADD_SHIFT)
/* Write */
| (2 << OMAP_USBHOST_INSNREG05_ULPI_OPSEL_SHIFT)
/* PORTn */
| ((port + 1) << OMAP_USBHOST_INSNREG05_ULPI_PORTSEL_SHIFT)
/* start ULPI access*/
| (1 << OMAP_USBHOST_INSNREG05_ULPI_CONTROL_SHIFT);
omap_ehci_write_4(isc, OMAP_USBHOST_INSNREG05_ULPI, reg);
/* Wait for ULPI access completion */
while ((omap_ehci_read_4(isc, OMAP_USBHOST_INSNREG05_ULPI)
& (1 << OMAP_USBHOST_INSNREG05_ULPI_CONTROL_SHIFT))) {
/* Sleep for a tick */
pause("USBPHY_RESET", 1);
if (timeout-- == 0) {
device_printf(isc->sc_dev, "PHY reset operation timed out\n");
break;
}
}
}
/**
* omap_ehci_init - initialises the USB host EHCI controller
* @isc: omap ehci device context
*
* This initialisation routine is quite heavily based on the work done by the
* OMAP Linux team (for which I thank them very much). The init sequence is
* almost identical, diverging only for the FreeBSD specifics.
*
* LOCKING:
* none
*
* RETURNS:
* 0 on success, a negative error code on failure.
*/
static int
omap_ehci_init(struct omap_ehci_softc *isc)
{
uint32_t reg = 0;
int i;
device_t uhh_dev;
uhh_dev = device_get_parent(isc->sc_dev);
device_printf(isc->sc_dev, "Starting TI EHCI USB Controller\n");
/* Set the interrupt threshold control, it controls the maximum rate at
* which the host controller issues interrupts. We set it to 1 microframe
* at startup - the default is 8 mircoframes (equates to 1ms).
*/
reg = omap_ehci_read_4(isc, OMAP_USBHOST_USBCMD);
reg &= 0xff00ffff;
reg |= (1 << 16);
omap_ehci_write_4(isc, OMAP_USBHOST_USBCMD, reg);
/* Soft reset the PHY using PHY reset command over ULPI */
for (i = 0; i < OMAP_HS_USB_PORTS; i++) {
if (omap_usb_port_mode(uhh_dev, i) == EHCI_HCD_OMAP_MODE_PHY)
omap_ehci_soft_phy_reset(isc, i);
}
return(0);
}
/**
* omap_ehci_probe - starts the given command
* @dev:
*
* Effectively boilerplate EHCI resume code.
*
* LOCKING:
* Caller should be holding the OMAP3_MMC lock.
*
* RETURNS:
* EH_HANDLED or EH_NOT_HANDLED
*/
static int
omap_ehci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ti,ehci-omap"))
return (ENXIO);
device_set_desc(dev, OMAP_EHCI_HC_DEVSTR);
return (BUS_PROBE_DEFAULT);
}
/**
* omap_ehci_attach - driver entry point, sets up the ECHI controller/driver
* @dev: the new device handle
*
* Sets up bus spaces, interrupt handles, etc for the EHCI controller. It also
* parses the resource hints and calls omap_ehci_init() to initialise the
* H/W.
*
* LOCKING:
* none
*
* RETURNS:
* 0 on success or a positive error code on failure.
*/
static int
omap_ehci_attach(device_t dev)
{
struct omap_ehci_softc *isc = device_get_softc(dev);
ehci_softc_t *sc = &isc->base;
#ifdef SOC_OMAP4
phandle_t root;
#endif
int err;
int rid;
#ifdef SOC_OMAP4
/*
* If we're running a Pandaboard, run Pandaboard-specific
* init code.
*/
root = OF_finddevice("/");
if (ofw_bus_node_is_compatible(root, "ti,omap4-panda"))
pandaboard_usb_hub_init();
#endif
/* initialise some bus fields */
sc->sc_bus.parent = dev;
sc->sc_bus.devices = sc->sc_devices;
sc->sc_bus.devices_max = EHCI_MAX_DEVICES;
sc->sc_bus.dma_bits = 32;
sprintf(sc->sc_vendor, "Texas Instruments");
/* save the device */
isc->sc_dev = dev;
/* get all DMA memory */
if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(dev),
&ehci_iterate_hw_softc)) {
return (ENOMEM);
}
/* Allocate resource for the EHCI register set */
rid = 0;
sc->sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!sc->sc_io_res) {
device_printf(dev, "Error: Could not map EHCI memory\n");
goto error;
}
/* Request an interrupt resource */
rid = 0;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
if (sc->sc_irq_res == NULL) {
device_printf(dev, "Error: could not allocate irq\n");
goto error;
}
/* Add this device as a child of the USBus device */
sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(dev, "Error: could not add USB device\n");
goto error;
}
device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
device_set_desc(sc->sc_bus.bdev, OMAP_EHCI_HC_DEVSTR);
/* Initialise the ECHI registers */
err = omap_ehci_init(isc);
if (err) {
device_printf(dev, "Error: could not setup OMAP EHCI, %d\n", err);
goto error;
}
/* Set the tag and size of the register set in the EHCI context */
sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
sc->sc_io_size = rman_get_size(sc->sc_io_res);
/* Setup the interrupt */
err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
if (err) {
device_printf(dev, "Error: could not setup irq, %d\n", err);
sc->sc_intr_hdl = NULL;
goto error;
}
/* Finally we are ready to kick off the ECHI host controller */
err = ehci_init(sc);
if (err == 0) {
err = device_probe_and_attach(sc->sc_bus.bdev);
}
if (err) {
device_printf(dev, "Error: USB init failed err=%d\n", err);
goto error;
}
return (0);
error:
omap_ehci_detach(dev);
return (ENXIO);
}
/**
* omap_ehci_detach - detach the device and cleanup the driver
* @dev: device handle
*
* Clean-up routine where everything initialised in omap_ehci_attach is
* freed and cleaned up. This function calls omap_ehci_fini() to shutdown
* the on-chip module.
*
* LOCKING:
* none
*
* RETURNS:
* Always returns 0 (success).
*/
static int
omap_ehci_detach(device_t dev)
{
struct omap_ehci_softc *isc = device_get_softc(dev);
ehci_softc_t *sc = &isc->base;
int err;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ err = bus_generic_detach(dev);
+ if (err != 0)
+ return (err);
/*
* disable interrupts that might have been switched on in ehci_init
*/
if (sc->sc_io_res) {
EWRITE4(sc, EHCI_USBINTR, 0);
}
if (sc->sc_irq_res && sc->sc_intr_hdl) {
/*
* only call ehci_detach() after ehci_init()
*/
ehci_detach(sc);
err = bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_hdl);
if (err)
device_printf(dev, "Error: could not tear down irq, %d\n", err);
sc->sc_intr_hdl = NULL;
}
/* Free the resources stored in the base EHCI handler */
if (sc->sc_irq_res) {
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
sc->sc_irq_res = NULL;
}
if (sc->sc_io_res) {
bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_io_res);
sc->sc_io_res = NULL;
}
return (0);
}
static device_method_t ehci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, omap_ehci_probe),
DEVMETHOD(device_attach, omap_ehci_attach),
DEVMETHOD(device_detach, omap_ehci_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
{0, 0}
};
static driver_t ehci_driver = {
"ehci",
ehci_methods,
sizeof(struct omap_ehci_softc),
};
DRIVER_MODULE(omap_ehci, omap_uhh, ehci_driver, 0, 0);
diff --git a/sys/arm/ti/usb/omap_host.c b/sys/arm/ti/usb/omap_host.c
index b7c387c00601..c336a25eabf3 100644
--- a/sys/arm/ti/usb/omap_host.c
+++ b/sys/arm/ti/usb/omap_host.c
@@ -1,464 +1,467 @@
/*-
* Copyright (c) 2015 Oleksandr Tymoshenko
* Copyright (c) 2011 Ben Gray .
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* USB Host Module
*/
/* UHH */
#define OMAP_USBHOST_UHH_REVISION 0x0000
#define OMAP_USBHOST_UHH_SYSCONFIG 0x0010
#define OMAP_USBHOST_UHH_SYSSTATUS 0x0014
#define OMAP_USBHOST_UHH_HOSTCONFIG 0x0040
#define OMAP_USBHOST_UHH_DEBUG_CSR 0x0044
/* UHH Register Set */
#define UHH_SYSCONFIG_MIDLEMODE_MASK (3UL << 12)
#define UHH_SYSCONFIG_MIDLEMODE_SMARTSTANDBY (2UL << 12)
#define UHH_SYSCONFIG_MIDLEMODE_NOSTANDBY (1UL << 12)
#define UHH_SYSCONFIG_MIDLEMODE_FORCESTANDBY (0UL << 12)
#define UHH_SYSCONFIG_CLOCKACTIVITY (1UL << 8)
#define UHH_SYSCONFIG_SIDLEMODE_MASK (3UL << 3)
#define UHH_SYSCONFIG_SIDLEMODE_SMARTIDLE (2UL << 3)
#define UHH_SYSCONFIG_SIDLEMODE_NOIDLE (1UL << 3)
#define UHH_SYSCONFIG_SIDLEMODE_FORCEIDLE (0UL << 3)
#define UHH_SYSCONFIG_ENAWAKEUP (1UL << 2)
#define UHH_SYSCONFIG_SOFTRESET (1UL << 1)
#define UHH_SYSCONFIG_AUTOIDLE (1UL << 0)
#define UHH_HOSTCONFIG_APP_START_CLK (1UL << 31)
#define UHH_HOSTCONFIG_P3_CONNECT_STATUS (1UL << 10)
#define UHH_HOSTCONFIG_P2_CONNECT_STATUS (1UL << 9)
#define UHH_HOSTCONFIG_P1_CONNECT_STATUS (1UL << 8)
#define UHH_HOSTCONFIG_ENA_INCR_ALIGN (1UL << 5)
#define UHH_HOSTCONFIG_ENA_INCR16 (1UL << 4)
#define UHH_HOSTCONFIG_ENA_INCR8 (1UL << 3)
#define UHH_HOSTCONFIG_ENA_INCR4 (1UL << 2)
#define UHH_HOSTCONFIG_AUTOPPD_ON_OVERCUR_EN (1UL << 1)
#define UHH_HOSTCONFIG_P1_ULPI_BYPASS (1UL << 0)
/* The following are on rev2 (OMAP44xx) of the EHCI only */
#define UHH_SYSCONFIG_IDLEMODE_MASK (3UL << 2)
#define UHH_SYSCONFIG_IDLEMODE_NOIDLE (1UL << 2)
#define UHH_SYSCONFIG_STANDBYMODE_MASK (3UL << 4)
#define UHH_SYSCONFIG_STANDBYMODE_NOSTDBY (1UL << 4)
#define UHH_HOSTCONFIG_P1_MODE_MASK (3UL << 16)
#define UHH_HOSTCONFIG_P1_MODE_ULPI_PHY (0UL << 16)
#define UHH_HOSTCONFIG_P1_MODE_UTMI_PHY (1UL << 16)
#define UHH_HOSTCONFIG_P1_MODE_HSIC (3UL << 16)
#define UHH_HOSTCONFIG_P2_MODE_MASK (3UL << 18)
#define UHH_HOSTCONFIG_P2_MODE_ULPI_PHY (0UL << 18)
#define UHH_HOSTCONFIG_P2_MODE_UTMI_PHY (1UL << 18)
#define UHH_HOSTCONFIG_P2_MODE_HSIC (3UL << 18)
/*
* Values of UHH_REVISION - Note: these are not given in the TRM but taken
* from the linux OMAP EHCI driver (thanks guys). It has been verified on
* a Panda and Beagle board.
*/
#define OMAP_UHH_REV1 0x00000010 /* OMAP3 */
#define OMAP_UHH_REV2 0x50700100 /* OMAP4 */
struct omap_uhh_softc {
struct simplebus_softc simplebus_sc;
device_t sc_dev;
/* UHH register set */
struct resource* uhh_mem_res;
/* The revision of the HS USB HOST read from UHH_REVISION */
uint32_t uhh_rev;
/* The following details are provided by conf hints */
int port_mode[3];
};
static device_attach_t omap_uhh_attach;
static device_detach_t omap_uhh_detach;
static inline uint32_t
omap_uhh_read_4(struct omap_uhh_softc *sc, bus_size_t off)
{
return bus_read_4(sc->uhh_mem_res, off);
}
static inline void
omap_uhh_write_4(struct omap_uhh_softc *sc, bus_size_t off, uint32_t val)
{
bus_write_4(sc->uhh_mem_res, off, val);
}
static int
omap_uhh_init(struct omap_uhh_softc *isc)
{
uint8_t tll_ch_mask;
uint32_t reg;
int i;
/* Enable Clocks for high speed USBHOST */
ti_sysc_clock_enable(device_get_parent(isc->sc_dev));
/* Read the UHH revision */
isc->uhh_rev = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_REVISION);
device_printf(isc->sc_dev, "UHH revision 0x%08x\n", isc->uhh_rev);
/* FIXME */
#if 0
if (isc->uhh_rev == OMAP_UHH_REV2) {
/* For OMAP44xx devices you have to enable the per-port clocks:
* PHY_MODE - External ULPI clock
* TTL_MODE - Internal UTMI clock
* HSIC_MODE - Internal 480Mhz and 60Mhz clocks
*/
switch(isc->port_mode[0]) {
case EHCI_HCD_OMAP_MODE_UNKNOWN:
break;
case EHCI_HCD_OMAP_MODE_PHY:
if (ti_prcm_clk_set_source(USBP1_PHY_CLK, EXT_CLK))
device_printf(isc->sc_dev,
"failed to set clock source for port 0\n");
if (ti_prcm_clk_enable(USBP1_PHY_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP1_PHY_CLK source for port 0\n");
break;
case EHCI_HCD_OMAP_MODE_TLL:
if (ti_prcm_clk_enable(USBP1_UTMI_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP1_PHY_CLK source for port 0\n");
break;
case EHCI_HCD_OMAP_MODE_HSIC:
if (ti_prcm_clk_enable(USBP1_HSIC_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP1_PHY_CLK source for port 0\n");
break;
default:
device_printf(isc->sc_dev, "unknown port mode %d for port 0\n", isc->port_mode[0]);
}
switch(isc->port_mode[1]) {
case EHCI_HCD_OMAP_MODE_UNKNOWN:
break;
case EHCI_HCD_OMAP_MODE_PHY:
if (ti_prcm_clk_set_source(USBP2_PHY_CLK, EXT_CLK))
device_printf(isc->sc_dev,
"failed to set clock source for port 0\n");
if (ti_prcm_clk_enable(USBP2_PHY_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP2_PHY_CLK source for port 1\n");
break;
case EHCI_HCD_OMAP_MODE_TLL:
if (ti_prcm_clk_enable(USBP2_UTMI_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP2_UTMI_CLK source for port 1\n");
break;
case EHCI_HCD_OMAP_MODE_HSIC:
if (ti_prcm_clk_enable(USBP2_HSIC_CLK))
device_printf(isc->sc_dev,
"failed to set clock USBP2_HSIC_CLK source for port 1\n");
break;
default:
device_printf(isc->sc_dev, "unknown port mode %d for port 1\n", isc->port_mode[1]);
}
}
#endif
/* Put UHH in SmartIdle/SmartStandby mode */
reg = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_SYSCONFIG);
if (isc->uhh_rev == OMAP_UHH_REV1) {
reg &= ~(UHH_SYSCONFIG_SIDLEMODE_MASK |
UHH_SYSCONFIG_MIDLEMODE_MASK);
reg |= (UHH_SYSCONFIG_ENAWAKEUP |
UHH_SYSCONFIG_AUTOIDLE |
UHH_SYSCONFIG_CLOCKACTIVITY |
UHH_SYSCONFIG_SIDLEMODE_SMARTIDLE |
UHH_SYSCONFIG_MIDLEMODE_SMARTSTANDBY);
} else if (isc->uhh_rev == OMAP_UHH_REV2) {
reg &= ~UHH_SYSCONFIG_IDLEMODE_MASK;
reg |= UHH_SYSCONFIG_IDLEMODE_NOIDLE;
reg &= ~UHH_SYSCONFIG_STANDBYMODE_MASK;
reg |= UHH_SYSCONFIG_STANDBYMODE_NOSTDBY;
}
omap_uhh_write_4(isc, OMAP_USBHOST_UHH_SYSCONFIG, reg);
device_printf(isc->sc_dev, "OMAP_UHH_SYSCONFIG: 0x%08x\n", reg);
reg = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_HOSTCONFIG);
/* Setup ULPI bypass and burst configurations */
reg |= (UHH_HOSTCONFIG_ENA_INCR4 |
UHH_HOSTCONFIG_ENA_INCR8 |
UHH_HOSTCONFIG_ENA_INCR16);
reg &= ~UHH_HOSTCONFIG_ENA_INCR_ALIGN;
if (isc->uhh_rev == OMAP_UHH_REV1) {
if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
reg &= ~UHH_HOSTCONFIG_P1_CONNECT_STATUS;
if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
reg &= ~UHH_HOSTCONFIG_P2_CONNECT_STATUS;
if (isc->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
reg &= ~UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
if ((isc->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
(isc->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
(isc->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
reg &= ~UHH_HOSTCONFIG_P1_ULPI_BYPASS;
else
reg |= UHH_HOSTCONFIG_P1_ULPI_BYPASS;
} else if (isc->uhh_rev == OMAP_UHH_REV2) {
reg |= UHH_HOSTCONFIG_APP_START_CLK;
/* Clear port mode fields for PHY mode*/
reg &= ~UHH_HOSTCONFIG_P1_MODE_MASK;
reg &= ~UHH_HOSTCONFIG_P2_MODE_MASK;
if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
reg |= UHH_HOSTCONFIG_P1_MODE_UTMI_PHY;
else if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_HSIC)
reg |= UHH_HOSTCONFIG_P1_MODE_HSIC;
if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
reg |= UHH_HOSTCONFIG_P2_MODE_UTMI_PHY;
else if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_HSIC)
reg |= UHH_HOSTCONFIG_P2_MODE_HSIC;
}
omap_uhh_write_4(isc, OMAP_USBHOST_UHH_HOSTCONFIG, reg);
device_printf(isc->sc_dev, "UHH setup done, uhh_hostconfig=0x%08x\n", reg);
/* I found the code and comments in the Linux EHCI driver - thanks guys :)
*
* "An undocumented "feature" in the OMAP3 EHCI controller, causes suspended
* ports to be taken out of suspend when the USBCMD.Run/Stop bit is cleared
* (for example when we do omap_uhh_bus_suspend). This breaks suspend-resume if
* the root-hub is allowed to suspend. Writing 1 to this undocumented
* register bit disables this feature and restores normal behavior."
*/
#if 0
omap_uhh_write_4(isc, OMAP_USBHOST_INSNREG04,
OMAP_USBHOST_INSNREG04_DISABLE_UNSUSPEND);
#endif
tll_ch_mask = 0;
for (i = 0; i < OMAP_HS_USB_PORTS; i++) {
if (isc->port_mode[i] == EHCI_HCD_OMAP_MODE_TLL)
tll_ch_mask |= (1 << i);
}
if (tll_ch_mask)
omap_tll_utmi_enable(tll_ch_mask);
return(0);
}
/**
* omap_uhh_fini - shutdown the EHCI controller
* @isc: omap ehci device context
*
*
*
* LOCKING:
* none
*
* RETURNS:
* 0 on success, a negative error code on failure.
*/
static void
omap_uhh_fini(struct omap_uhh_softc *isc)
{
unsigned long timeout;
device_printf(isc->sc_dev, "Stopping TI EHCI USB Controller\n");
/* Set the timeout */
if (hz < 10)
timeout = 1;
else
timeout = (100 * hz) / 1000;
/* Reset the UHH, OHCI and EHCI modules */
omap_uhh_write_4(isc, OMAP_USBHOST_UHH_SYSCONFIG, 0x0002);
while ((omap_uhh_read_4(isc, OMAP_USBHOST_UHH_SYSSTATUS) & 0x07) == 0x00) {
/* Sleep for a tick */
pause("USBRESET", 1);
if (timeout-- == 0) {
device_printf(isc->sc_dev, "operation timed out\n");
break;
}
}
/* Disable functional and interface clocks for the TLL and HOST modules */
ti_sysc_clock_disable(device_get_parent(isc->sc_dev));
device_printf(isc->sc_dev, "Clock to USB host has been disabled\n");
}
int
omap_usb_port_mode(device_t dev, int port)
{
struct omap_uhh_softc *isc;
isc = device_get_softc(dev);
if ((port < 0) || (port >= OMAP_HS_USB_PORTS))
return (-1);
return isc->port_mode[port];
}
static int
omap_uhh_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "ti,usbhs-host"))
return (ENXIO);
device_set_desc(dev, "TI OMAP USB 2.0 Host module");
return (BUS_PROBE_DEFAULT);
}
static int
omap_uhh_attach(device_t dev)
{
struct omap_uhh_softc *isc = device_get_softc(dev);
int err;
int rid;
int i;
phandle_t node;
char propname[16];
char *mode;
/* save the device */
isc->sc_dev = dev;
/* Allocate resource for the UHH register set */
rid = 0;
isc->uhh_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!isc->uhh_mem_res) {
device_printf(dev, "Error: Could not map UHH memory\n");
goto error;
}
node = ofw_bus_get_node(dev);
if (node == -1)
goto error;
/* Get port modes from FDT */
for (i = 0; i < OMAP_HS_USB_PORTS; i++) {
isc->port_mode[i] = EHCI_HCD_OMAP_MODE_UNKNOWN;
snprintf(propname, sizeof(propname),
"port%d-mode", i+1);
if (OF_getprop_alloc(node, propname, (void**)&mode) <= 0)
continue;
if (strcmp(mode, "ehci-phy") == 0)
isc->port_mode[i] = EHCI_HCD_OMAP_MODE_PHY;
else if (strcmp(mode, "ehci-tll") == 0)
isc->port_mode[i] = EHCI_HCD_OMAP_MODE_TLL;
else if (strcmp(mode, "ehci-hsic") == 0)
isc->port_mode[i] = EHCI_HCD_OMAP_MODE_HSIC;
}
/* Initialise the ECHI registers */
err = omap_uhh_init(isc);
if (err) {
device_printf(dev, "Error: could not setup OMAP EHCI, %d\n", err);
goto error;
}
simplebus_init(dev, node);
/*
* Allow devices to identify.
*/
bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
*/
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
bus_attach_children(dev);
return (0);
error:
omap_uhh_detach(dev);
return (ENXIO);
}
static int
omap_uhh_detach(device_t dev)
{
struct omap_uhh_softc *isc = device_get_softc(dev);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (isc->uhh_mem_res) {
bus_release_resource(dev, SYS_RES_MEMORY, 0, isc->uhh_mem_res);
isc->uhh_mem_res = NULL;
}
omap_uhh_fini(isc);
return (0);
}
static device_method_t omap_uhh_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, omap_uhh_probe),
DEVMETHOD(device_attach, omap_uhh_attach),
DEVMETHOD(device_detach, omap_uhh_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD_END
};
DEFINE_CLASS_1(omap_uhh, omap_uhh_driver, omap_uhh_methods,
sizeof(struct omap_uhh_softc), simplebus_driver);
DRIVER_MODULE(omap_uhh, simplebus, omap_uhh_driver, 0, 0);
diff --git a/sys/arm/xilinx/zy7_ehci.c b/sys/arm/xilinx/zy7_ehci.c
index f2e1d8a9ec2b..545e2a9bce16 100644
--- a/sys/arm/xilinx/zy7_ehci.c
+++ b/sys/arm/xilinx/zy7_ehci.c
@@ -1,365 +1,368 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2012-2013 Thomas Skibo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* A host-controller driver for Zynq-7000's USB OTG controller.
*
* Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
* (v1.4) November 16, 2012. Xilinx doc UG585. Ch. 15 covers the USB
* controller and register definitions are in appendix B.34.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* Register definitions. */
#define ZY7_USB_ID 0x0000
#define ZY7_USB_HWGENERAL 0x0004
#define ZY7_USB_HWHOST 0x0008
#define ZY7_USB_HWDEVICE 0x000c
#define ZY7_USB_HWTXBUF 0x0010
#define ZY7_USB_HWRXBUF 0x0014
#define ZY7_USB_GPTIMER0LD 0x0080
#define ZY7_USB_GPTIMER0CTRL 0x0084
#define ZY7_USB_GPTIMER1LD 0x0088
#define ZY7_USB_GPTIMER1CTRL 0x008c
#define ZY7_USB_SBUSCFG 0x0090
#define ZY7_USB_CAPLENGTH_HCIVERSION 0x0100
#define ZY7_USB_HCSPARAMS 0x0104
#define ZY7_USB_HCCPARAMS 0x0108
#define ZY7_USB_DCIVERSION 0x0120
#define ZY7_USB_DCCPARAMS 0x0124
#define ZY7_USB_USBCMD 0x0140
#define ZY7_USB_USBSTS 0x0144
#define ZY7_USB_USBINTR 0x0148
#define ZY7_USB_FRINDEX 0x014c
#define ZY7_USB_PERIODICLISTBASE_DEICEADDR 0x0154
#define ZY7_USB_ASYNCLISTADDR_ENDPOINTLISTADDR 0x0158
#define ZY7_USB_TTCTRL 0x015c
#define ZY7_USB_BURSTSIZE 0x0160
#define ZY7_USB_TXFILLTUNING 0x0164
#define ZY7_USB_TXFILLTUNING_TXFIFOTHRES_SHFT 16
#define ZY7_USB_TXFILLTUNING_TXFIFOTHRES_MASK (0x3f<<16)
#define ZY7_USB_TXTFILLTUNING 0x0168
#define ZY7_USB_IC_USB 0x016c
#define ZY7_USB_ULPI_VIEWPORT 0x0170
#define ZY7_USB_ULPI_VIEWPORT_WU (1<<31)
#define ZY7_USB_ULPI_VIEWPORT_RUN (1<<30)
#define ZY7_USB_ULPI_VIEWPORT_RW (1<<29)
#define ZY7_USB_ULPI_VIEWPORT_SS (1<<27)
#define ZY7_USB_ULPI_VIEWPORT_PORT_MASK (7<<24)
#define ZY7_USB_ULPI_VIEWPORT_PORT_SHIFT 24
#define ZY7_USB_ULPI_VIEWPORT_ADDR_MASK (0xff<<16)
#define ZY7_USB_ULPI_VIEWPORT_ADDR_SHIFT 16
#define ZY7_USB_ULPI_VIEWPORT_DATARD_MASK (0xff<<8)
#define ZY7_USB_ULPI_VIEWPORT_DATARD_SHIFT 8
#define ZY7_USB_ULPI_VIEWPORT_DATAWR_MASK (0xff<<0)
#define ZY7_USB_ULPI_VIEWPORT_DATAWR_SHIFT 0
#define ZY7_USB_ENDPTNAK 0x0178
#define ZY7_USB_ENDPTNAKEN 0x017c
#define ZY7_USB_CONFIGFLAG 0x0180
#define ZY7_USB_PORTSC(n) (0x0180+4*(n))
#define ZY7_USB_PORTSC_PTS_MASK (3<<30)
#define ZY7_USB_PORTSC_PTS_SHIFT 30
#define ZY7_USB_PORTSC_PTS_UTMI (0<<30)
#define ZY7_USB_PORTSC_PTS_ULPI (2<<30)
#define ZY7_USB_PORTSC_PTS_SERIAL (3<<30)
#define ZY7_USB_PORTSC_PTW (1<<28)
#define ZY7_USB_PORTSC_PTS2 (1<<25)
#define ZY7_USB_OTGSC 0x01a4
#define ZY7_USB_USBMODE 0x01a8
#define ZY7_USB_ENDPTSETUPSTAT 0x01ac
#define ZY7_USB_ENDPTPRIME 0x01b0
#define ZY7_USB_ENDPTFLUSH 0x01b4
#define ZY7_USB_ENDPTSTAT 0x01b8
#define ZY7_USB_ENDPTCOMPLETE 0x01bc
#define ZY7_USB_ENDPTCTRL(n) (0x01c0+4*(n))
#define EHCI_REG_OFFSET ZY7_USB_CAPLENGTH_HCIVERSION
#define EHCI_REG_SIZE 0x100
static void
zy7_ehci_post_reset(struct ehci_softc *ehci_softc)
{
uint32_t usbmode;
/* Force HOST mode */
usbmode = EOREAD4(ehci_softc, EHCI_USBMODE_NOLPM);
usbmode &= ~EHCI_UM_CM;
usbmode |= EHCI_UM_CM_HOST;
EOWRITE4(ehci_softc, EHCI_USBMODE_NOLPM, usbmode);
}
static int
zy7_phy_config(device_t dev, bus_space_tag_t io_tag, bus_space_handle_t bsh)
{
phandle_t node;
char buf[64];
uint32_t portsc;
int tries;
node = ofw_bus_get_node(dev);
if (OF_getprop(node, "phy_type", buf, sizeof(buf)) > 0) {
portsc = bus_space_read_4(io_tag, bsh, ZY7_USB_PORTSC(1));
portsc &= ~(ZY7_USB_PORTSC_PTS_MASK | ZY7_USB_PORTSC_PTW |
ZY7_USB_PORTSC_PTS2);
if (strcmp(buf,"ulpi") == 0)
portsc |= ZY7_USB_PORTSC_PTS_ULPI;
else if (strcmp(buf,"utmi") == 0)
portsc |= ZY7_USB_PORTSC_PTS_UTMI;
else if (strcmp(buf,"utmi-wide") == 0)
portsc |= (ZY7_USB_PORTSC_PTS_UTMI |
ZY7_USB_PORTSC_PTW);
else if (strcmp(buf, "serial") == 0)
portsc |= ZY7_USB_PORTSC_PTS_SERIAL;
bus_space_write_4(io_tag, bsh, ZY7_USB_PORTSC(1), portsc);
}
if (OF_getprop(node, "phy_vbus_ext", buf, sizeof(buf)) >= 0) {
/* Tell PHY that VBUS is supplied externally. */
bus_space_write_4(io_tag, bsh, ZY7_USB_ULPI_VIEWPORT,
ZY7_USB_ULPI_VIEWPORT_RUN |
ZY7_USB_ULPI_VIEWPORT_RW |
(0 << ZY7_USB_ULPI_VIEWPORT_PORT_SHIFT) |
(0x0b << ZY7_USB_ULPI_VIEWPORT_ADDR_SHIFT) |
(0x60 << ZY7_USB_ULPI_VIEWPORT_DATAWR_SHIFT)
);
tries = 100;
while ((bus_space_read_4(io_tag, bsh, ZY7_USB_ULPI_VIEWPORT) &
ZY7_USB_ULPI_VIEWPORT_RUN) != 0) {
if (--tries < 0)
return (-1);
DELAY(1);
}
}
return (0);
}
static int
zy7_ehci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_is_compatible(dev, "xlnx,zy7_ehci"))
return (ENXIO);
device_set_desc(dev, "Zynq-7000 EHCI USB 2.0 controller");
return (0);
}
static int zy7_ehci_detach(device_t dev);
static int
zy7_ehci_attach(device_t dev)
{
ehci_softc_t *sc = device_get_softc(dev);
bus_space_handle_t bsh;
int err, rid;
/* initialize some bus fields */
sc->sc_bus.parent = dev;
sc->sc_bus.devices = sc->sc_devices;
sc->sc_bus.devices_max = EHCI_MAX_DEVICES;
sc->sc_bus.dma_bits = 32;
/* get all DMA memory */
if (usb_bus_mem_alloc_all(&sc->sc_bus,
USB_GET_DMA_TAG(dev), &ehci_iterate_hw_softc))
return (ENOMEM);
/* Allocate memory. */
rid = 0;
sc->sc_io_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (sc->sc_io_res == NULL) {
device_printf(dev, "Can't allocate memory");
zy7_ehci_detach(dev);
return (ENOMEM);
}
sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
bsh = rman_get_bushandle(sc->sc_io_res);
sc->sc_io_size = EHCI_REG_SIZE;
if (bus_space_subregion(sc->sc_io_tag, bsh, EHCI_REG_OFFSET,
sc->sc_io_size, &sc->sc_io_hdl) != 0)
panic("%s: unable to subregion USB host registers",
device_get_name(dev));
/* Allocate IRQ. */
rid = 0;
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->sc_irq_res == NULL) {
device_printf(dev, "Can't allocate IRQ\n");
zy7_ehci_detach(dev);
return (ENOMEM);
}
/* Add USB device */
sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(dev, "Could not add USB device\n");
zy7_ehci_detach(dev);
return (ENXIO);
}
device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
device_set_desc(sc->sc_bus.bdev, "Zynq-7000 ehci USB 2.0 controller");
strcpy(sc->sc_vendor, "Xilinx"); /* or IP vendor? */
/* Activate the interrupt */
err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
NULL, (driver_intr_t *)ehci_interrupt, sc,
&sc->sc_intr_hdl);
if (err) {
device_printf(dev, "Cannot setup IRQ\n");
zy7_ehci_detach(dev);
return (err);
}
/* Customization. */
sc->sc_flags |= EHCI_SCFLG_TT | EHCI_SCFLG_NORESTERM;
sc->sc_vendor_post_reset = zy7_ehci_post_reset;
sc->sc_vendor_get_port_speed = ehci_get_port_speed_portsc;
/* Modify FIFO burst threshold from 2 to 8. */
bus_space_write_4(sc->sc_io_tag, bsh,
ZY7_USB_TXFILLTUNING,
8 << ZY7_USB_TXFILLTUNING_TXFIFOTHRES_SHFT);
/* Handle PHY options. */
if (zy7_phy_config(dev, sc->sc_io_tag, bsh) < 0) {
device_printf(dev, "Cannot config phy!\n");
zy7_ehci_detach(dev);
return (EIO);
}
/* Init ehci. */
err = ehci_init(sc);
if (!err) {
sc->sc_flags |= EHCI_SCFLG_DONEINIT;
err = device_probe_and_attach(sc->sc_bus.bdev);
}
if (err) {
device_printf(dev, "USB init failed err=%d\n", err);
zy7_ehci_detach(dev);
return (err);
}
return (0);
}
static int
zy7_ehci_detach(device_t dev)
{
ehci_softc_t *sc = device_get_softc(dev);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if ((sc->sc_flags & EHCI_SCFLG_DONEINIT) != 0) {
ehci_detach(sc);
sc->sc_flags &= ~EHCI_SCFLG_DONEINIT;
}
if (sc->sc_irq_res) {
if (sc->sc_intr_hdl != NULL)
bus_teardown_intr(dev, sc->sc_irq_res,
sc->sc_intr_hdl);
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
}
if (sc->sc_io_res)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->sc_io_res), sc->sc_io_res);
usb_bus_mem_free_all(&sc->sc_bus, &ehci_iterate_hw_softc);
return (0);
}
static device_method_t ehci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, zy7_ehci_probe),
DEVMETHOD(device_attach, zy7_ehci_attach),
DEVMETHOD(device_detach, zy7_ehci_detach),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD_END
};
static driver_t ehci_driver = {
"ehci",
ehci_methods,
sizeof(struct ehci_softc),
};
DRIVER_MODULE(zy7_ehci, simplebus, ehci_driver, NULL, NULL);
MODULE_DEPEND(zy7_ehci, usb, 1, 1, 1);
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index d64ec8caa13f..d5ce503f62ee 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -1,2909 +1,2911 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2009-2012 Alexander Motin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "ahci.h"
#include
#include
#include
#include
#include
/* local prototypes */
static void ahci_intr(void *data);
static void ahci_intr_one(void *data);
static void ahci_intr_one_edge(void *data);
static int ahci_ch_init(device_t dev);
static int ahci_ch_deinit(device_t dev);
static int ahci_ch_suspend(device_t dev);
static int ahci_ch_resume(device_t dev);
static void ahci_ch_pm(void *arg);
static void ahci_ch_intr(void *arg);
static void ahci_ch_intr_direct(void *arg);
static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus);
static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb);
static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
static void ahci_execute_transaction(struct ahci_slot *slot);
static void ahci_timeout(void *arg);
static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et);
static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag);
static void ahci_dmainit(device_t dev);
static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
static void ahci_dmafini(device_t dev);
static void ahci_slotsalloc(device_t dev);
static void ahci_slotsfree(device_t dev);
static void ahci_reset(struct ahci_channel *ch);
static void ahci_start(struct ahci_channel *ch, int fbs);
static void ahci_stop(struct ahci_channel *ch);
static void ahci_clo(struct ahci_channel *ch);
static void ahci_start_fr(struct ahci_channel *ch);
static void ahci_stop_fr(struct ahci_channel *ch);
static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr);
static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val);
static int ahci_sata_connect(struct ahci_channel *ch);
static int ahci_sata_phy_reset(struct ahci_channel *ch);
static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0);
static void ahci_issue_recovery(struct ahci_channel *ch);
static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb);
static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb);
static void ahciaction(struct cam_sim *sim, union ccb *ccb);
static void ahcipoll(struct cam_sim *sim);
static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
#define recovery_type spriv_field0
#define RECOVERY_NONE 0
#define RECOVERY_READ_LOG 1
#define RECOVERY_REQUEST_SENSE 2
#define recovery_slot spriv_field1
static uint32_t
ahci_ch_detval(struct ahci_channel *ch, uint32_t val)
{
return ch->disablephy ? ATA_SC_DET_DISABLE : val;
}
int
ahci_ctlr_setup(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
/* Clear interrupts */
ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS));
/* Configure CCC */
if (ctlr->ccc) {
ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI));
ATA_OUTL(ctlr->r_mem, AHCI_CCCC,
(ctlr->ccc << AHCI_CCCC_TV_SHIFT) |
(4 << AHCI_CCCC_CC_SHIFT) |
AHCI_CCCC_EN);
ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) &
AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT;
if (bootverbose) {
device_printf(dev,
"CCC with %dms/4cmd enabled on vector %d\n",
ctlr->ccc, ctlr->cccv);
}
}
/* Enable AHCI interrupts */
ATA_OUTL(ctlr->r_mem, AHCI_GHC,
ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE);
return (0);
}
int
ahci_ctlr_reset(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
uint32_t v;
int timeout;
/* BIOS/OS Handoff */
if ((ATA_INL(ctlr->r_mem, AHCI_VS) >= 0x00010200) &&
(ATA_INL(ctlr->r_mem, AHCI_CAP2) & AHCI_CAP2_BOH) &&
((v = ATA_INL(ctlr->r_mem, AHCI_BOHC)) & AHCI_BOHC_OOS) == 0) {
/* Request OS ownership. */
ATA_OUTL(ctlr->r_mem, AHCI_BOHC, v | AHCI_BOHC_OOS);
/* Wait up to 2s for BIOS ownership release. */
for (timeout = 0; timeout < 80; timeout++) {
DELAY(25000);
v = ATA_INL(ctlr->r_mem, AHCI_BOHC);
if ((v & AHCI_BOHC_BOS) == 0)
break;
if ((v & AHCI_BOHC_BB) == 0)
break;
}
}
/* Enable AHCI mode */
ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
/* Reset AHCI controller */
ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR);
for (timeout = 1000; timeout > 0; timeout--) {
DELAY(1000);
if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0)
break;
}
if (timeout == 0) {
device_printf(dev, "AHCI controller reset failure\n");
return (ENXIO);
}
/* Reenable AHCI mode */
ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
if (ctlr->quirks & AHCI_Q_RESTORE_CAP) {
/*
* Restore capability field.
* This is write to a read-only register to restore its state.
* On fully standard-compliant hardware this is not needed and
* this operation shall not take place. See ahci_pci.c for
* platforms using this quirk.
*/
ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps);
}
return (0);
}
int
ahci_attach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
int error, i, speed, unit;
uint32_t u, version;
device_t child;
ctlr->dev = dev;
ctlr->ccc = 0;
resource_int_value(device_get_name(dev),
device_get_unit(dev), "ccc", &ctlr->ccc);
mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF);
/* Setup our own memory management for channels. */
ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem);
ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem);
ctlr->sc_iomem.rm_type = RMAN_ARRAY;
ctlr->sc_iomem.rm_descr = "I/O memory addresses";
if ((error = rman_init(&ctlr->sc_iomem)) != 0) {
ahci_free_mem(dev);
return (error);
}
if ((error = rman_manage_region(&ctlr->sc_iomem,
rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) {
ahci_free_mem(dev);
rman_fini(&ctlr->sc_iomem);
return (error);
}
/* Get the HW capabilities */
version = ATA_INL(ctlr->r_mem, AHCI_VS);
ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP);
if (version >= 0x00010200)
ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2);
if (ctlr->caps & AHCI_CAP_EMS)
ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL);
if (ctlr->quirks & AHCI_Q_FORCE_PI) {
/*
* Enable ports.
* The spec says that BIOS sets up bits corresponding to
* available ports. On platforms where this information
* is missing, the driver can define available ports on its own.
*/
int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1;
int nmask = (1 << nports) - 1;
ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask);
device_printf(dev, "Forcing PI to %d ports (mask = %x)\n",
nports, nmask);
}
ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI);
/* Identify and set separate quirks for HBA and RAID f/w Marvells. */
if ((ctlr->quirks & AHCI_Q_ALTSIG) &&
(ctlr->caps & AHCI_CAP_SPM) == 0)
ctlr->quirks |= AHCI_Q_NOBSYRES;
if (ctlr->quirks & AHCI_Q_1CH) {
ctlr->caps &= ~AHCI_CAP_NPMASK;
ctlr->ichannels &= 0x01;
}
if (ctlr->quirks & AHCI_Q_2CH) {
ctlr->caps &= ~AHCI_CAP_NPMASK;
ctlr->caps |= 1;
ctlr->ichannels &= 0x03;
}
if (ctlr->quirks & AHCI_Q_4CH) {
ctlr->caps &= ~AHCI_CAP_NPMASK;
ctlr->caps |= 3;
ctlr->ichannels &= 0x0f;
}
ctlr->channels = MAX(flsl(ctlr->ichannels),
(ctlr->caps & AHCI_CAP_NPMASK) + 1);
if (ctlr->quirks & AHCI_Q_NOPMP)
ctlr->caps &= ~AHCI_CAP_SPM;
if (ctlr->quirks & AHCI_Q_NONCQ)
ctlr->caps &= ~AHCI_CAP_SNCQ;
if ((ctlr->caps & AHCI_CAP_CCCS) == 0)
ctlr->ccc = 0;
ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC);
/* Create controller-wide DMA tag. */
if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
(ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR :
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE,
ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL,
&ctlr->dma_tag)) {
ahci_free_mem(dev);
rman_fini(&ctlr->sc_iomem);
return (ENXIO);
}
ahci_ctlr_setup(dev);
/* Setup interrupts. */
if ((error = ahci_setup_interrupt(dev)) != 0) {
bus_dma_tag_destroy(ctlr->dma_tag);
ahci_free_mem(dev);
rman_fini(&ctlr->sc_iomem);
return (error);
}
i = 0;
for (u = ctlr->ichannels; u != 0; u >>= 1)
i += (u & 1);
ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3));
resource_int_value(device_get_name(dev), device_get_unit(dev),
"direct", &ctlr->direct);
/* Announce HW capabilities. */
speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT;
device_printf(dev,
"AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n",
((version >> 20) & 0xf0) + ((version >> 16) & 0x0f),
((version >> 4) & 0xf0) + (version & 0x0f),
(ctlr->caps & AHCI_CAP_NPMASK) + 1,
((speed == 1) ? "1.5":((speed == 2) ? "3":
((speed == 3) ? "6":"?"))),
(ctlr->caps & AHCI_CAP_SPM) ?
"supported" : "not supported",
(ctlr->caps & AHCI_CAP_FBSS) ?
" with FBS" : "");
if (ctlr->quirks != 0) {
device_printf(dev, "quirks=0x%b\n", ctlr->quirks,
AHCI_Q_BIT_STRING);
}
if (bootverbose) {
device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps",
(ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"",
(ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"",
(ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"",
(ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"",
(ctlr->caps & AHCI_CAP_SSS) ? " SS":"",
(ctlr->caps & AHCI_CAP_SALP) ? " ALP":"",
(ctlr->caps & AHCI_CAP_SAL) ? " AL":"",
(ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"",
((speed == 1) ? "1.5":((speed == 2) ? "3":
((speed == 3) ? "6":"?"))));
printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n",
(ctlr->caps & AHCI_CAP_SAM) ? " AM":"",
(ctlr->caps & AHCI_CAP_SPM) ? " PM":"",
(ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"",
(ctlr->caps & AHCI_CAP_PMD) ? " PMD":"",
(ctlr->caps & AHCI_CAP_SSC) ? " SSC":"",
(ctlr->caps & AHCI_CAP_PSC) ? " PSC":"",
((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1,
(ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"",
(ctlr->caps & AHCI_CAP_EMS) ? " EM":"",
(ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"",
(ctlr->caps & AHCI_CAP_NPMASK) + 1);
}
if (bootverbose && version >= 0x00010200) {
device_printf(dev, "Caps2:%s%s%s%s%s%s\n",
(ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"",
(ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"",
(ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"",
(ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"",
(ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"",
(ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":"");
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
child = device_add_child(dev, "ahcich", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add channel device\n");
continue;
}
device_set_ivars(child, (void *)(intptr_t)unit);
if ((ctlr->ichannels & (1 << unit)) == 0)
device_disable(child);
}
/* Attach any remapped NVME device */
for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) {
child = device_add_child(dev, "nvme", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add remapped NVMe device");
continue;
}
device_set_ivars(child, (void *)(intptr_t)(unit | AHCI_REMAPPED_UNIT));
}
int em = (ctlr->caps & AHCI_CAP_EMS) != 0;
resource_int_value(device_get_name(dev), device_get_unit(dev),
"em", &em);
if (em) {
child = device_add_child(dev, "ahciem", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(dev, "failed to add enclosure device\n");
else
device_set_ivars(child, (void *)(intptr_t)AHCI_EM_UNIT);
}
bus_attach_children(dev);
return (0);
}
int
ahci_detach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
- int i;
+ int error, i;
/* Detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
/* Free interrupts. */
for (i = 0; i < ctlr->numirqs; i++) {
if (ctlr->irqs[i].r_irq) {
bus_teardown_intr(dev, ctlr->irqs[i].r_irq,
ctlr->irqs[i].handle);
bus_release_resource(dev, SYS_RES_IRQ,
ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq);
}
}
bus_dma_tag_destroy(ctlr->dma_tag);
/* Free memory. */
rman_fini(&ctlr->sc_iomem);
ahci_free_mem(dev);
mtx_destroy(&ctlr->ch_mtx);
return (0);
}
void
ahci_free_mem(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
/* Release memory resources */
if (ctlr->r_mem)
bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
if (ctlr->r_msix_table)
bus_release_resource(dev, SYS_RES_MEMORY,
ctlr->r_msix_tab_rid, ctlr->r_msix_table);
if (ctlr->r_msix_pba)
bus_release_resource(dev, SYS_RES_MEMORY,
ctlr->r_msix_pba_rid, ctlr->r_msix_pba);
ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL;
}
int
ahci_setup_interrupt(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
int i;
/* Check for single MSI vector fallback. */
if (ctlr->numirqs > 1 &&
(ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) {
device_printf(dev, "Falling back to one MSI\n");
ctlr->numirqs = 1;
}
/* Ensure we don't overrun irqs. */
if (ctlr->numirqs > AHCI_MAX_IRQS) {
device_printf(dev, "Too many irqs %d > %d (clamping)\n",
ctlr->numirqs, AHCI_MAX_IRQS);
ctlr->numirqs = AHCI_MAX_IRQS;
}
/* Allocate all IRQs. */
for (i = 0; i < ctlr->numirqs; i++) {
ctlr->irqs[i].ctlr = ctlr;
ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0);
if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi)
ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
else if (ctlr->numirqs == 1 || i >= ctlr->channels ||
(ctlr->ccc && i == ctlr->cccv))
ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL;
else if (ctlr->channels > ctlr->numirqs &&
i == ctlr->numirqs - 1)
ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER;
else
ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) {
device_printf(dev, "unable to map interrupt\n");
return (ENXIO);
}
if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL,
(ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr :
((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge :
ahci_intr_one),
&ctlr->irqs[i], &ctlr->irqs[i].handle))) {
/* SOS XXX release r_irq */
device_printf(dev, "unable to setup interrupt\n");
return (ENXIO);
}
if (ctlr->numirqs > 1) {
bus_describe_intr(dev, ctlr->irqs[i].r_irq,
ctlr->irqs[i].handle,
ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ?
"ch%d" : "%d", i);
}
}
return (0);
}
/*
* Common case interrupt handler.
*/
static void
ahci_intr(void *data)
{
struct ahci_controller_irq *irq = data;
struct ahci_controller *ctlr = irq->ctlr;
u_int32_t is, ise = 0;
void *arg;
int unit;
if (irq->mode == AHCI_IRQ_MODE_ALL) {
unit = 0;
if (ctlr->ccc)
is = ctlr->ichannels;
else
is = ATA_INL(ctlr->r_mem, AHCI_IS);
} else { /* AHCI_IRQ_MODE_AFTER */
unit = irq->r_irq_rid - 1;
is = ATA_INL(ctlr->r_mem, AHCI_IS);
is &= (0xffffffff << unit);
}
/* CCC interrupt is edge triggered. */
if (ctlr->ccc)
ise = 1 << ctlr->cccv;
/* Some controllers have edge triggered IS. */
if (ctlr->quirks & AHCI_Q_EDGEIS)
ise |= is;
if (ise != 0)
ATA_OUTL(ctlr->r_mem, AHCI_IS, ise);
for (; unit < ctlr->channels; unit++) {
if ((is & (1 << unit)) != 0 &&
(arg = ctlr->interrupt[unit].argument)) {
ctlr->interrupt[unit].function(arg);
}
}
for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) {
if ((arg = ctlr->interrupt[unit].argument)) {
ctlr->interrupt[unit].function(arg);
}
}
/* AHCI declares level triggered IS. */
if (!(ctlr->quirks & AHCI_Q_EDGEIS))
ATA_OUTL(ctlr->r_mem, AHCI_IS, is);
ATA_RBL(ctlr->r_mem, AHCI_IS);
}
/*
* Simplified interrupt handler for multivector MSI mode.
*/
static void
ahci_intr_one(void *data)
{
struct ahci_controller_irq *irq = data;
struct ahci_controller *ctlr = irq->ctlr;
void *arg;
int unit;
unit = irq->r_irq_rid - 1;
if ((arg = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(arg);
/* AHCI declares level triggered IS. */
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
ATA_RBL(ctlr->r_mem, AHCI_IS);
}
static void
ahci_intr_one_edge(void *data)
{
struct ahci_controller_irq *irq = data;
struct ahci_controller *ctlr = irq->ctlr;
void *arg;
int unit;
unit = irq->r_irq_rid - 1;
/* Some controllers have edge triggered IS. */
ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
if ((arg = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(arg);
ATA_RBL(ctlr->r_mem, AHCI_IS);
}
struct resource *
ahci_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct ahci_controller *ctlr = device_get_softc(dev);
struct resource *res;
rman_res_t st;
int offset, size, unit;
bool is_em, is_remapped;
unit = (intptr_t)device_get_ivars(child);
is_em = is_remapped = false;
if (unit & AHCI_REMAPPED_UNIT) {
unit &= AHCI_UNIT;
unit -= ctlr->channels;
is_remapped = true;
} else if (unit & AHCI_EM_UNIT) {
unit &= AHCI_UNIT;
is_em = true;
}
res = NULL;
switch (type) {
case SYS_RES_MEMORY:
if (is_remapped) {
offset = ctlr->remap_offset + unit * ctlr->remap_size;
size = ctlr->remap_size;
} else if (!is_em) {
offset = AHCI_OFFSET + (unit << 7);
size = 128;
} else if ((ctlr->caps & AHCI_CAP_EMS) == 0) {
break;
} else if (*rid == 0) {
offset = AHCI_EM_CTL;
size = 4;
} else {
offset = (ctlr->emloc & 0xffff0000) >> 14;
size = (ctlr->emloc & 0x0000ffff) << 2;
if (*rid != 1) {
if (*rid == 2 && (ctlr->capsem &
(AHCI_EM_XMT | AHCI_EM_SMB)) == 0)
offset += size;
else
break;
}
}
st = rman_get_start(ctlr->r_mem);
res = rman_reserve_resource(&ctlr->sc_iomem, st + offset,
st + offset + size - 1, size, RF_ACTIVE, child);
if (res) {
bus_space_handle_t bsh;
bus_space_tag_t bst;
bsh = rman_get_bushandle(ctlr->r_mem);
bst = rman_get_bustag(ctlr->r_mem);
bus_space_subregion(bst, bsh, offset, 128, &bsh);
rman_set_bushandle(res, bsh);
rman_set_bustag(res, bst);
}
break;
case SYS_RES_IRQ:
if (*rid == ATA_IRQ_RID)
res = ctlr->irqs[0].r_irq;
break;
}
return (res);
}
int
ahci_release_resource(device_t dev, device_t child, struct resource *r)
{
switch (rman_get_type(r)) {
case SYS_RES_MEMORY:
rman_release_resource(r);
return (0);
case SYS_RES_IRQ:
if (rman_get_rid(r) != ATA_IRQ_RID)
return (ENOENT);
return (0);
}
return (EINVAL);
}
int
ahci_setup_intr(device_t dev, device_t child, struct resource *irq,
int flags, driver_filter_t *filter, driver_intr_t *function,
void *argument, void **cookiep)
{
struct ahci_controller *ctlr = device_get_softc(dev);
int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT;
if (filter != NULL) {
printf("ahci.c: we cannot use a filter here\n");
return (EINVAL);
}
ctlr->interrupt[unit].function = function;
ctlr->interrupt[unit].argument = argument;
return (0);
}
int
ahci_teardown_intr(device_t dev, device_t child, struct resource *irq,
void *cookie)
{
struct ahci_controller *ctlr = device_get_softc(dev);
int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT;
ctlr->interrupt[unit].function = NULL;
ctlr->interrupt[unit].argument = NULL;
return (0);
}
int
ahci_print_child(device_t dev, device_t child)
{
intptr_t ivars;
int retval;
retval = bus_print_child_header(dev, child);
ivars = (intptr_t)device_get_ivars(child);
if ((ivars & AHCI_EM_UNIT) == 0)
retval += printf(" at channel %d", (int)ivars & AHCI_UNIT);
retval += bus_print_child_footer(dev, child);
return (retval);
}
int
ahci_child_location(device_t dev, device_t child, struct sbuf *sb)
{
intptr_t ivars;
ivars = (intptr_t)device_get_ivars(child);
if ((ivars & AHCI_EM_UNIT) == 0)
sbuf_printf(sb, "channel=%d", (int)ivars & AHCI_UNIT);
return (0);
}
bus_dma_tag_t
ahci_get_dma_tag(device_t dev, device_t child)
{
struct ahci_controller *ctlr = device_get_softc(dev);
return (ctlr->dma_tag);
}
void
ahci_attached(device_t dev, struct ahci_channel *ch)
{
struct ahci_controller *ctlr = device_get_softc(dev);
mtx_lock(&ctlr->ch_mtx);
ctlr->ch[ch->unit] = ch;
mtx_unlock(&ctlr->ch_mtx);
}
void
ahci_detached(device_t dev, struct ahci_channel *ch)
{
struct ahci_controller *ctlr = device_get_softc(dev);
mtx_lock(&ctlr->ch_mtx);
mtx_lock(&ch->mtx);
ctlr->ch[ch->unit] = NULL;
mtx_unlock(&ch->mtx);
mtx_unlock(&ctlr->ch_mtx);
}
struct ahci_channel *
ahci_getch(device_t dev, int n)
{
struct ahci_controller *ctlr = device_get_softc(dev);
struct ahci_channel *ch;
KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n));
mtx_lock(&ctlr->ch_mtx);
ch = ctlr->ch[n];
if (ch != NULL)
mtx_lock(&ch->mtx);
mtx_unlock(&ctlr->ch_mtx);
return (ch);
}
void
ahci_putch(struct ahci_channel *ch)
{
mtx_unlock(&ch->mtx);
}
static int
ahci_ch_probe(device_t dev)
{
device_set_desc(dev, "AHCI channel");
return (BUS_PROBE_DEFAULT);
}
static int
ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS)
{
struct ahci_channel *ch;
int error, value;
ch = arg1;
value = ch->disablephy;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL || (value != 0 && value != 1))
return (error);
mtx_lock(&ch->mtx);
ch->disablephy = value;
if (value) {
ahci_ch_deinit(ch->dev);
} else {
ahci_ch_init(ch->dev);
ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED);
}
mtx_unlock(&ch->mtx);
return (0);
}
static int
ahci_ch_attach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ahci_channel *ch = device_get_softc(dev);
struct cam_devq *devq;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
int rid, error, i, sata_rev = 0;
u_int32_t version;
ch->dev = dev;
ch->unit = (intptr_t)device_get_ivars(dev);
ch->caps = ctlr->caps;
ch->caps2 = ctlr->caps2;
ch->start = ctlr->ch_start;
ch->quirks = ctlr->quirks;
ch->vendorid = ctlr->vendorid;
ch->deviceid = ctlr->deviceid;
ch->subvendorid = ctlr->subvendorid;
ch->subdeviceid = ctlr->subdeviceid;
ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1;
mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF);
ch->pm_level = 0;
resource_int_value(device_get_name(dev),
device_get_unit(dev), "pm_level", &ch->pm_level);
STAILQ_INIT(&ch->doneq);
if (ch->pm_level > 3)
callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
callout_init_mtx(&ch->reset_timer, &ch->mtx, 0);
/* JMicron external ports (0) sometimes limited */
if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0)
sata_rev = 1;
if (ch->quirks & AHCI_Q_SATA2)
sata_rev = 2;
resource_int_value(device_get_name(dev),
device_get_unit(dev), "sata_rev", &sata_rev);
for (i = 0; i < 16; i++) {
ch->user[i].revision = sata_rev;
ch->user[i].mode = 0;
ch->user[i].bytecount = 8192;
ch->user[i].tags = ch->numslots;
ch->user[i].caps = 0;
ch->curr[i] = ch->user[i];
if (ch->pm_level) {
ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ |
CTS_SATA_CAPS_H_APST |
CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST;
}
ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA |
CTS_SATA_CAPS_H_AN;
}
rid = 0;
if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE)))
return (ENXIO);
ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD);
version = ATA_INL(ctlr->r_mem, AHCI_VS);
if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS))
ch->chcaps |= AHCI_P_CMD_FBSCP;
if (ch->caps2 & AHCI_CAP2_SDS)
ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP);
if (bootverbose) {
device_printf(dev, "Caps:%s%s%s%s%s%s\n",
(ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"",
(ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"",
(ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"",
(ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"",
(ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"",
(ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":"");
}
ahci_dmainit(dev);
ahci_slotsalloc(dev);
mtx_lock(&ch->mtx);
ahci_ch_init(dev);
rid = ATA_IRQ_RID;
if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE))) {
device_printf(dev, "Unable to map interrupt\n");
error = ENXIO;
goto err0;
}
if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr,
ch, &ch->ih))) {
device_printf(dev, "Unable to setup interrupt\n");
error = ENXIO;
goto err1;
}
/* Create the device queue for our SIM. */
devq = cam_simq_alloc(ch->numslots);
if (devq == NULL) {
device_printf(dev, "Unable to allocate simq\n");
error = ENOMEM;
goto err1;
}
/* Construct SIM entry */
ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
device_get_unit(dev), (struct mtx *)&ch->mtx,
(ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots),
(ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
devq);
if (ch->sim == NULL) {
cam_simq_free(devq);
device_printf(dev, "unable to allocate sim\n");
error = ENOMEM;
goto err1;
}
if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
device_printf(dev, "unable to register xpt bus\n");
error = ENXIO;
goto err2;
}
if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
device_printf(dev, "unable to create path\n");
error = ENXIO;
goto err3;
}
if (ch->pm_level > 3) {
callout_reset(&ch->pm_timer,
(ch->pm_level == 4) ? hz / 1000 : hz / 8,
ahci_ch_pm, ch);
}
mtx_unlock(&ch->mtx);
ahci_attached(device_get_parent(dev), ch);
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy",
CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ch,
0, ahci_ch_disablephy_proc, "IU", "Disable PHY");
return (0);
err3:
xpt_bus_deregister(cam_sim_path(ch->sim));
err2:
cam_sim_free(ch->sim, /*free_devq*/TRUE);
err1:
bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
err0:
bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
mtx_unlock(&ch->mtx);
mtx_destroy(&ch->mtx);
return (error);
}
static int
ahci_ch_detach(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
ahci_detached(device_get_parent(dev), ch);
mtx_lock(&ch->mtx);
xpt_async(AC_LOST_DEVICE, ch->path, NULL);
/* Forget about reset. */
if (ch->resetting) {
ch->resetting = 0;
xpt_release_simq(ch->sim, TRUE);
}
xpt_free_path(ch->path);
xpt_bus_deregister(cam_sim_path(ch->sim));
cam_sim_free(ch->sim, /*free_devq*/TRUE);
mtx_unlock(&ch->mtx);
if (ch->pm_level > 3)
callout_drain(&ch->pm_timer);
callout_drain(&ch->reset_timer);
bus_teardown_intr(dev, ch->r_irq, ch->ih);
bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
ahci_ch_deinit(dev);
ahci_slotsfree(dev);
ahci_dmafini(dev);
bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
mtx_destroy(&ch->mtx);
return (0);
}
static int
ahci_ch_init(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
uint64_t work;
/* Disable port interrupts */
ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
/* Setup work areas */
work = ch->dma.work_bus + AHCI_CL_OFFSET;
ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff);
ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32);
work = ch->dma.rfis_bus;
ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff);
ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32);
/* Activate the channel and power/spin up device */
ATA_OUTL(ch->r_mem, AHCI_P_CMD,
(AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD |
((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) |
((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 )));
ahci_start_fr(ch);
ahci_start(ch, 1);
return (0);
}
static int
ahci_ch_deinit(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
/* Disable port interrupts. */
ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
/* Reset command register. */
ahci_stop(ch);
ahci_stop_fr(ch);
ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0);
/* Allow everything, including partial and slumber modes. */
ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0);
/* Request slumber mode transition and give some time to get there. */
ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER);
DELAY(100);
/* Disable PHY. */
ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
return (0);
}
static int
ahci_ch_suspend(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
mtx_lock(&ch->mtx);
xpt_freeze_simq(ch->sim, 1);
/* Forget about reset. */
if (ch->resetting) {
ch->resetting = 0;
callout_stop(&ch->reset_timer);
xpt_release_simq(ch->sim, TRUE);
}
while (ch->oslots)
msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100);
ahci_ch_deinit(dev);
mtx_unlock(&ch->mtx);
return (0);
}
static int
ahci_ch_resume(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
mtx_lock(&ch->mtx);
ahci_ch_init(dev);
ahci_reset(ch);
xpt_release_simq(ch->sim, TRUE);
mtx_unlock(&ch->mtx);
return (0);
}
static device_method_t ahcich_methods[] = {
DEVMETHOD(device_probe, ahci_ch_probe),
DEVMETHOD(device_attach, ahci_ch_attach),
DEVMETHOD(device_detach, ahci_ch_detach),
DEVMETHOD(device_suspend, ahci_ch_suspend),
DEVMETHOD(device_resume, ahci_ch_resume),
DEVMETHOD_END
};
static driver_t ahcich_driver = {
"ahcich",
ahcich_methods,
sizeof(struct ahci_channel)
};
DRIVER_MODULE(ahcich, ahci, ahcich_driver, NULL, NULL);
struct ahci_dc_cb_args {
bus_addr_t maddr;
int error;
};
static void
ahci_dmainit(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
struct ahci_dc_cb_args dcba;
size_t rfsize;
int error;
/* Command area. */
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE,
0, NULL, NULL, &ch->dma.work_tag);
if (error != 0)
goto error;
error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
BUS_DMA_ZERO, &ch->dma.work_map);
if (error != 0)
goto error;
error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
if (error != 0 || (error = dcba.error) != 0) {
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
goto error;
}
ch->dma.work_bus = dcba.maddr;
/* FIS receive area. */
if (ch->chcaps & AHCI_P_CMD_FBSCP)
rfsize = 4096;
else
rfsize = 256;
error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, rfsize, 1, rfsize,
0, NULL, NULL, &ch->dma.rfis_tag);
if (error != 0)
goto error;
error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0,
&ch->dma.rfis_map);
if (error != 0)
goto error;
error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis,
rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
if (error != 0 || (error = dcba.error) != 0) {
bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
goto error;
}
ch->dma.rfis_bus = dcba.maddr;
/* Data area. */
error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL,
AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX,
0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag);
if (error != 0)
goto error;
return;
error:
device_printf(dev, "WARNING - DMA initialization failed, error %d\n",
error);
ahci_dmafini(dev);
}
static void
ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
{
struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc;
if (!(dcba->error = error))
dcba->maddr = segs[0].ds_addr;
}
static void
ahci_dmafini(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
if (ch->dma.data_tag) {
bus_dma_tag_destroy(ch->dma.data_tag);
ch->dma.data_tag = NULL;
}
if (ch->dma.rfis_bus) {
bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map);
bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
ch->dma.rfis_bus = 0;
ch->dma.rfis = NULL;
}
if (ch->dma.work_bus) {
bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
ch->dma.work_bus = 0;
ch->dma.work = NULL;
}
if (ch->dma.work_tag) {
bus_dma_tag_destroy(ch->dma.work_tag);
ch->dma.work_tag = NULL;
}
}
static void
ahci_slotsalloc(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
int i;
/* Alloc and setup command/dma slots */
bzero(ch->slot, sizeof(ch->slot));
for (i = 0; i < ch->numslots; i++) {
struct ahci_slot *slot = &ch->slot[i];
slot->ch = ch;
slot->slot = i;
slot->state = AHCI_SLOT_EMPTY;
slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i;
slot->ccb = NULL;
callout_init_mtx(&slot->timeout, &ch->mtx, 0);
if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map))
device_printf(ch->dev, "FAILURE - create data_map\n");
}
}
static void
ahci_slotsfree(device_t dev)
{
struct ahci_channel *ch = device_get_softc(dev);
int i;
/* Free all dma slots */
for (i = 0; i < ch->numslots; i++) {
struct ahci_slot *slot = &ch->slot[i];
callout_drain(&slot->timeout);
if (slot->dma.data_map) {
bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map);
slot->dma.data_map = NULL;
}
}
}
static int
ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr)
{
if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) ||
((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) {
u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
union ccb *ccb;
if (bootverbose) {
if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
device_printf(ch->dev, "CONNECT requested\n");
else
device_printf(ch->dev, "DISCONNECT requested\n");
}
ahci_reset(ch);
if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
return (0);
if (xpt_create_path(&ccb->ccb_h.path, NULL,
cam_sim_path(ch->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
xpt_free_ccb(ccb);
return (0);
}
xpt_rescan(ccb);
return (1);
}
return (0);
}
static void
ahci_cpd_check_events(struct ahci_channel *ch)
{
u_int32_t status;
union ccb *ccb;
device_t dev;
if (ch->pm_level == 0)
return;
status = ATA_INL(ch->r_mem, AHCI_P_CMD);
if ((status & AHCI_P_CMD_CPD) == 0)
return;
if (bootverbose) {
dev = ch->dev;
if (status & AHCI_P_CMD_CPS) {
device_printf(dev, "COLD CONNECT requested\n");
} else
device_printf(dev, "COLD DISCONNECT requested\n");
}
ahci_reset(ch);
if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
return;
if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
xpt_free_ccb(ccb);
return;
}
xpt_rescan(ccb);
}
static void
ahci_notify_events(struct ahci_channel *ch, u_int32_t status)
{
struct cam_path *dpath;
int i;
if (ch->caps & AHCI_CAP_SSNTF)
ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status);
if (bootverbose)
device_printf(ch->dev, "SNTF 0x%04x\n", status);
for (i = 0; i < 16; i++) {
if ((status & (1 << i)) == 0)
continue;
if (xpt_create_path(&dpath, NULL,
xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) {
xpt_async(AC_SCSI_AEN, dpath, NULL);
xpt_free_path(dpath);
}
}
}
static void
ahci_done(struct ahci_channel *ch, union ccb *ccb)
{
mtx_assert(&ch->mtx, MA_OWNED);
if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
ch->batch == 0) {
xpt_done(ccb);
return;
}
STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe);
}
static void
ahci_ch_intr(void *arg)
{
struct ahci_channel *ch = (struct ahci_channel *)arg;
uint32_t istatus;
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
mtx_lock(&ch->mtx);
ahci_ch_intr_main(ch, istatus);
mtx_unlock(&ch->mtx);
}
static void
ahci_ch_intr_direct(void *arg)
{
struct ahci_channel *ch = (struct ahci_channel *)arg;
struct ccb_hdr *ccb_h;
uint32_t istatus;
STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq);
/* Read interrupt statuses. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
mtx_lock(&ch->mtx);
ch->batch = 1;
ahci_ch_intr_main(ch, istatus);
ch->batch = 0;
/*
* Prevent the possibility of issues caused by processing the queue
* while unlocked below by moving the contents to a local queue.
*/
STAILQ_CONCAT(&tmp_doneq, &ch->doneq);
mtx_unlock(&ch->mtx);
while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) {
STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe);
xpt_done_direct((union ccb *)ccb_h);
}
}
static void
ahci_ch_pm(void *arg)
{
struct ahci_channel *ch = (struct ahci_channel *)arg;
uint32_t work;
if (ch->numrslots != 0)
return;
work = ATA_INL(ch->r_mem, AHCI_P_CMD);
if (ch->pm_level == 4)
work |= AHCI_P_CMD_PARTIAL;
else
work |= AHCI_P_CMD_SLUMBER;
ATA_OUTL(ch->r_mem, AHCI_P_CMD, work);
}
static void
ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
{
uint32_t cstatus, serr = 0, sntf = 0, ok, err;
enum ahci_err_type et;
int i, ccs, port, reset = 0;
/* Clear interrupt statuses. */
ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus);
/* Read command statuses. */
if (ch->numtslots != 0)
cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
else
cstatus = 0;
if (ch->numrslots != ch->numtslots)
cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI);
/* Read SNTF in one of possible ways. */
if ((istatus & AHCI_P_IX_SDB) &&
(ch->pm_present || ch->curr[0].atapi != 0)) {
if (ch->caps & AHCI_CAP_SSNTF)
sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF);
else if (ch->fbs_enabled) {
u_int8_t *fis = ch->dma.rfis + 0x58;
for (i = 0; i < 16; i++) {
if (fis[1] & 0x80) {
fis[1] &= 0x7f;
sntf |= 1 << i;
}
fis += 256;
}
} else {
u_int8_t *fis = ch->dma.rfis + 0x58;
if (fis[1] & 0x80)
sntf = (1 << (fis[1] & 0x0f));
}
}
/* Process PHY events */
if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF |
AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
serr = ATA_INL(ch->r_mem, AHCI_P_SERR);
if (serr) {
ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr);
reset = ahci_phy_check_events(ch, serr);
}
}
/* Process cold presence detection events */
if ((istatus & AHCI_P_IX_CPD) && !reset)
ahci_cpd_check_events(ch);
/* Process command errors */
if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
if (ch->quirks & AHCI_Q_NOCCS) {
/*
* ASMedia chips sometimes report failed commands as
* completed. Count all running commands as failed.
*/
cstatus |= ch->rslots;
/* They also report wrong CCS, so try to guess one. */
ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1;
} else {
ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) &
AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT;
}
//device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
// __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
// serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
port = -1;
if (ch->fbs_enabled) {
uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS);
if (fbs & AHCI_P_FBS_SDE) {
port = (fbs & AHCI_P_FBS_DWE)
>> AHCI_P_FBS_DWE_SHIFT;
} else {
for (i = 0; i < 16; i++) {
if (ch->numrslotspd[i] == 0)
continue;
if (port == -1)
port = i;
else if (port != i) {
port = -2;
break;
}
}
}
}
err = ch->rslots & cstatus;
} else {
ccs = 0;
err = 0;
port = -1;
}
/* Complete all successful commands. */
ok = ch->rslots & ~cstatus;
for (i = 0; i < ch->numslots; i++) {
if ((ok >> i) & 1)
ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE);
}
/* On error, complete the rest of commands with error statuses. */
if (err) {
if (ch->frozen) {
union ccb *fccb = ch->frozen;
ch->frozen = NULL;
fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
ahci_done(ch, fccb);
}
for (i = 0; i < ch->numslots; i++) {
/* XXX: requests in loading state. */
if (((err >> i) & 1) == 0)
continue;
if (port >= 0 &&
ch->slot[i].ccb->ccb_h.target_id != port)
continue;
if (istatus & AHCI_P_IX_TFE) {
if (port != -2) {
/* Task File Error */
if (ch->numtslotspd[
ch->slot[i].ccb->ccb_h.target_id] == 0) {
/* Untagged operation. */
if (i == ccs)
et = AHCI_ERR_TFE;
else
et = AHCI_ERR_INNOCENT;
} else {
/* Tagged operation. */
et = AHCI_ERR_NCQ;
}
} else {
et = AHCI_ERR_TFE;
ch->fatalerr = 1;
}
} else if (istatus & AHCI_P_IX_IF) {
if (ch->numtslots == 0 && i != ccs && port != -2)
et = AHCI_ERR_INNOCENT;
else
et = AHCI_ERR_SATA;
} else
et = AHCI_ERR_INVALID;
ahci_end_transaction(&ch->slot[i], et);
}
/*
* We can't reinit port if there are some other
* commands active, use resume to complete them.
*/
if (ch->rslots != 0 && !ch->recoverycmd)
ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC);
}
/* Process NOTIFY events */
if (sntf)
ahci_notify_events(ch, sntf);
}
/* Must be called with channel locked. */
static int
ahci_check_collision(struct ahci_channel *ch, union ccb *ccb)
{
int t = ccb->ccb_h.target_id;
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
/* Tagged command while we have no supported tag free. */
if (((~ch->oslots) & (0xffffffff >> (32 -
ch->curr[t].tags))) == 0)
return (1);
/* If we have FBS */
if (ch->fbs_enabled) {
/* Tagged command while untagged are active. */
if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0)
return (1);
} else {
/* Tagged command while untagged are active. */
if (ch->numrslots != 0 && ch->numtslots == 0)
return (1);
/* Tagged command while tagged to other target is active. */
if (ch->numtslots != 0 &&
ch->taggedtarget != ccb->ccb_h.target_id)
return (1);
}
} else {
/* If we have FBS */
if (ch->fbs_enabled) {
/* Untagged command while tagged are active. */
if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0)
return (1);
} else {
/* Untagged command while tagged are active. */
if (ch->numrslots != 0 && ch->numtslots != 0)
return (1);
}
}
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) {
/* Atomic command while anything active. */
if (ch->numrslots != 0)
return (1);
}
/* We have some atomic command running. */
if (ch->aslots != 0)
return (1);
return (0);
}
/* Must be called with channel locked. */
static void
ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb)
{
struct ahci_slot *slot;
int tag, tags;
/* Choose empty slot. */
tags = ch->numslots;
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA))
tags = ch->curr[ccb->ccb_h.target_id].tags;
if (ch->lastslot + 1 < tags)
tag = ffs(~(ch->oslots >> (ch->lastslot + 1)));
else
tag = 0;
if (tag == 0 || tag + ch->lastslot >= tags)
tag = ffs(~ch->oslots) - 1;
else
tag += ch->lastslot;
ch->lastslot = tag;
/* Occupy chosen slot. */
slot = &ch->slot[tag];
slot->ccb = ccb;
/* Stop PM timer. */
if (ch->numrslots == 0 && ch->pm_level > 3)
callout_stop(&ch->pm_timer);
/* Update channel stats. */
ch->oslots |= (1 << tag);
ch->numrslots++;
ch->numrslotspd[ccb->ccb_h.target_id]++;
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
ch->numtslots++;
ch->numtslotspd[ccb->ccb_h.target_id]++;
ch->taggedtarget = ccb->ccb_h.target_id;
}
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)))
ch->aslots |= (1 << tag);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
slot->state = AHCI_SLOT_LOADING;
bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb,
ahci_dmasetprd, slot, 0);
} else {
slot->dma.nsegs = 0;
ahci_execute_transaction(slot);
}
}
/* Locked by busdma engine. */
static void
ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct ahci_slot *slot = arg;
struct ahci_channel *ch = slot->ch;
struct ahci_cmd_tab *ctp;
struct ahci_dma_prd *prd;
int i;
if (error) {
device_printf(ch->dev, "DMA load error\n");
ahci_end_transaction(slot, AHCI_ERR_INVALID);
return;
}
KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n"));
/* Get a piece of the workspace for this request */
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
/* Fill S/G table */
prd = &ctp->prd_tab[0];
for (i = 0; i < nsegs; i++) {
prd[i].dba = htole64(segs[i].ds_addr);
prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK);
}
slot->dma.nsegs = nsegs;
bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
ahci_execute_transaction(slot);
}
/* Must be called with channel locked. */
static void
ahci_execute_transaction(struct ahci_slot *slot)
{
struct ahci_channel *ch = slot->ch;
struct ahci_cmd_tab *ctp;
struct ahci_cmd_list *clp;
union ccb *ccb = slot->ccb;
int port = ccb->ccb_h.target_id & 0x0f;
int fis_size, i, softreset;
uint8_t *fis = ch->dma.rfis + 0x40;
uint8_t val;
uint16_t cmd_flags;
/* Get a piece of the workspace for this request */
ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset);
/* Setup the FIS for this request */
if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) {
device_printf(ch->dev, "Setting up SATA FIS failed\n");
ahci_end_transaction(slot, AHCI_ERR_INVALID);
return;
}
/* Setup the command list entry */
clp = (struct ahci_cmd_list *)
(ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
cmd_flags =
(ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) |
(ccb->ccb_h.func_code == XPT_SCSI_IO ?
(AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) |
(fis_size / sizeof(u_int32_t)) |
(port << 12);
clp->prd_length = htole16(slot->dma.nsegs);
/* Special handling for Soft Reset command. */
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
if (ccb->ataio.cmd.control & ATA_A_RESET) {
softreset = 1;
/* Kick controller into sane state */
ahci_stop(ch);
ahci_clo(ch);
ahci_start(ch, 0);
cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY;
} else {
softreset = 2;
/* Prepare FIS receive area for check. */
for (i = 0; i < 20; i++)
fis[i] = 0xff;
}
} else
softreset = 0;
clp->bytecount = 0;
clp->cmd_flags = htole16(cmd_flags);
clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset);
bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
BUS_DMASYNC_PREREAD);
/* Set ACTIVE bit for NCQ commands. */
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot);
}
/* If FBS is enabled, set PMP port. */
if (ch->fbs_enabled) {
ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN |
(port << AHCI_P_FBS_DEV_SHIFT));
}
/* Issue command to the controller. */
slot->state = AHCI_SLOT_RUNNING;
ch->rslots |= (1 << slot->slot);
ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot));
/* Device reset commands doesn't interrupt. Poll them. */
if (ccb->ccb_h.func_code == XPT_ATA_IO &&
(ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) {
int count, timeout = ccb->ccb_h.timeout * 100;
enum ahci_err_type et = AHCI_ERR_NONE;
for (count = 0; count < timeout; count++) {
DELAY(10);
if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot)))
break;
if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) &&
softreset != 1) {
#if 0
device_printf(ch->dev,
"Poll error on slot %d, TFD: %04x\n",
slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD));
#endif
et = AHCI_ERR_TFE;
break;
}
/* Workaround for ATI SB600/SB700 chipsets. */
if (ccb->ccb_h.target_id == 15 &&
(ch->quirks & AHCI_Q_ATI_PMP_BUG) &&
(ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) {
et = AHCI_ERR_TIMEOUT;
break;
}
}
/*
* Some Marvell controllers require additional time
* after soft reset to work properly. Setup delay
* to 50ms after soft reset.
*/
if (ch->quirks & AHCI_Q_MRVL_SR_DEL)
DELAY(50000);
/*
* Marvell HBAs with non-RAID firmware do not wait for
* readiness after soft reset, so we have to wait here.
* Marvell RAIDs do not have this problem, but instead
* sometimes forget to update FIS receive area, breaking
* this wait.
*/
if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 &&
(ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 &&
softreset == 2 && et == AHCI_ERR_NONE) {
for ( ; count < timeout; count++) {
bus_dmamap_sync(ch->dma.rfis_tag,
ch->dma.rfis_map, BUS_DMASYNC_POSTREAD);
val = fis[2];
bus_dmamap_sync(ch->dma.rfis_tag,
ch->dma.rfis_map, BUS_DMASYNC_PREREAD);
if ((val & ATA_S_BUSY) == 0)
break;
DELAY(10);
}
}
if (timeout && (count >= timeout)) {
device_printf(ch->dev, "Poll timeout on slot %d port %d\n",
slot->slot, port);
device_printf(ch->dev, "is %08x cs %08x ss %08x "
"rs %08x tfd %02x serr %08x cmd %08x\n",
ATA_INL(ch->r_mem, AHCI_P_IS),
ATA_INL(ch->r_mem, AHCI_P_CI),
ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
ATA_INL(ch->r_mem, AHCI_P_TFD),
ATA_INL(ch->r_mem, AHCI_P_SERR),
ATA_INL(ch->r_mem, AHCI_P_CMD));
et = AHCI_ERR_TIMEOUT;
}
/* Kick controller into sane state and enable FBS. */
if (softreset == 2)
ch->eslots |= (1 << slot->slot);
ahci_end_transaction(slot, et);
return;
}
/* Start command execution timeout */
callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2,
0, ahci_timeout, slot, 0);
return;
}
/* Must be called with channel locked. */
static void
ahci_process_timeout(struct ahci_channel *ch)
{
int i;
mtx_assert(&ch->mtx, MA_OWNED);
/* Handle the rest of commands. */
for (i = 0; i < ch->numslots; i++) {
/* Do we have a running request on slot? */
if (ch->slot[i].state < AHCI_SLOT_RUNNING)
continue;
ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT);
}
}
/* Must be called with channel locked. */
static void
ahci_rearm_timeout(struct ahci_channel *ch)
{
int i;
mtx_assert(&ch->mtx, MA_OWNED);
for (i = 0; i < ch->numslots; i++) {
struct ahci_slot *slot = &ch->slot[i];
/* Do we have a running request on slot? */
if (slot->state < AHCI_SLOT_RUNNING)
continue;
if ((ch->toslots & (1 << i)) == 0)
continue;
callout_reset_sbt(&slot->timeout,
SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
ahci_timeout, slot, 0);
}
}
/* Locked by callout mechanism. */
static void
ahci_timeout(void *arg)
{
struct ahci_slot *slot = arg;
struct ahci_channel *ch = slot->ch;
device_t dev = ch->dev;
uint32_t sstatus;
int ccs;
int i;
/* Check for stale timeout. */
if (slot->state < AHCI_SLOT_RUNNING)
return;
/* Check if slot was not being executed last time we checked. */
if (slot->state < AHCI_SLOT_EXECUTING) {
/* Check if slot started executing. */
sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
>> AHCI_P_CMD_CCS_SHIFT;
if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot ||
ch->fbs_enabled || ch->wrongccs)
slot->state = AHCI_SLOT_EXECUTING;
else if ((ch->rslots & (1 << ccs)) == 0) {
ch->wrongccs = 1;
slot->state = AHCI_SLOT_EXECUTING;
}
callout_reset_sbt(&slot->timeout,
SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
ahci_timeout, slot, 0);
return;
}
device_printf(dev, "Timeout on slot %d port %d\n",
slot->slot, slot->ccb->ccb_h.target_id & 0x0f);
device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x "
"serr %08x cmd %08x\n",
ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI),
ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR),
ATA_INL(ch->r_mem, AHCI_P_CMD));
/* Handle frozen command. */
if (ch->frozen) {
union ccb *fccb = ch->frozen;
ch->frozen = NULL;
fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
ahci_done(ch, fccb);
}
if (!ch->fbs_enabled && !ch->wrongccs) {
/* Without FBS we know real timeout source. */
ch->fatalerr = 1;
/* Handle command with timeout. */
ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT);
/* Handle the rest of commands. */
for (i = 0; i < ch->numslots; i++) {
/* Do we have a running request on slot? */
if (ch->slot[i].state < AHCI_SLOT_RUNNING)
continue;
ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
}
} else {
/* With FBS we wait for other commands timeout and pray. */
if (ch->toslots == 0)
xpt_freeze_simq(ch->sim, 1);
ch->toslots |= (1 << slot->slot);
if ((ch->rslots & ~ch->toslots) == 0)
ahci_process_timeout(ch);
else
device_printf(dev, " ... waiting for slots %08x\n",
ch->rslots & ~ch->toslots);
}
}
/* Must be called with channel locked. */
static void
ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
{
struct ahci_channel *ch = slot->ch;
union ccb *ccb = slot->ccb;
struct ahci_cmd_list *clp;
int lastto;
uint32_t sig;
bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
clp = (struct ahci_cmd_list *)
(ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
/* Read result registers to the result struct
* May be incorrect if several commands finished same time,
* so read only when sure or have to.
*/
if (ccb->ccb_h.func_code == XPT_ATA_IO) {
struct ata_res *res = &ccb->ataio.res;
if ((et == AHCI_ERR_TFE) ||
(ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) {
u_int8_t *fis = ch->dma.rfis + 0x40;
bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
BUS_DMASYNC_POSTREAD);
if (ch->fbs_enabled) {
fis += ccb->ccb_h.target_id * 256;
res->status = fis[2];
res->error = fis[3];
} else {
uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD);
res->status = tfd;
res->error = tfd >> 8;
}
res->lba_low = fis[4];
res->lba_mid = fis[5];
res->lba_high = fis[6];
res->device = fis[7];
res->lba_low_exp = fis[8];
res->lba_mid_exp = fis[9];
res->lba_high_exp = fis[10];
res->sector_count = fis[12];
res->sector_count_exp = fis[13];
/*
* Some weird controllers do not return signature in
* FIS receive area. Read it from PxSIG register.
*/
if ((ch->quirks & AHCI_Q_ALTSIG) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
(ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
sig = ATA_INL(ch->r_mem, AHCI_P_SIG);
res->lba_high = sig >> 24;
res->lba_mid = sig >> 16;
res->lba_low = sig >> 8;
res->sector_count = sig;
}
} else
bzero(res, sizeof(*res));
if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 &&
(ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
(ch->quirks & AHCI_Q_NOCOUNT) == 0) {
ccb->ataio.resid =
ccb->ataio.dxfer_len - le32toh(clp->bytecount);
}
} else {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
(ch->quirks & AHCI_Q_NOCOUNT) == 0) {
ccb->csio.resid =
ccb->csio.dxfer_len - le32toh(clp->bytecount);
}
}
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
(ccb->ccb_h.flags & CAM_DIR_IN) ?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map);
}
if (et != AHCI_ERR_NONE)
ch->eslots |= (1 << slot->slot);
/* In case of error, freeze device for proper recovery. */
if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) &&
!(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status |= CAM_DEV_QFRZN;
}
/* Set proper result status. */
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (et) {
case AHCI_ERR_NONE:
ccb->ccb_h.status |= CAM_REQ_CMP;
if (ccb->ccb_h.func_code == XPT_SCSI_IO)
ccb->csio.scsi_status = SCSI_STATUS_OK;
break;
case AHCI_ERR_INVALID:
ch->fatalerr = 1;
ccb->ccb_h.status |= CAM_REQ_INVALID;
break;
case AHCI_ERR_INNOCENT:
ccb->ccb_h.status |= CAM_REQUEUE_REQ;
break;
case AHCI_ERR_TFE:
case AHCI_ERR_NCQ:
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
} else {
ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
}
break;
case AHCI_ERR_SATA:
ch->fatalerr = 1;
if (!ch->recoverycmd) {
xpt_freeze_simq(ch->sim, 1);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
ccb->ccb_h.status |= CAM_UNCOR_PARITY;
break;
case AHCI_ERR_TIMEOUT:
if (!ch->recoverycmd) {
xpt_freeze_simq(ch->sim, 1);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
break;
default:
ch->fatalerr = 1;
ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
}
/* Free slot. */
ch->oslots &= ~(1 << slot->slot);
ch->rslots &= ~(1 << slot->slot);
ch->aslots &= ~(1 << slot->slot);
slot->state = AHCI_SLOT_EMPTY;
slot->ccb = NULL;
/* Update channel stats. */
ch->numrslots--;
ch->numrslotspd[ccb->ccb_h.target_id]--;
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
ch->numtslots--;
ch->numtslotspd[ccb->ccb_h.target_id]--;
}
/* Cancel timeout state if request completed normally. */
if (et != AHCI_ERR_TIMEOUT) {
lastto = (ch->toslots == (1 << slot->slot));
ch->toslots &= ~(1 << slot->slot);
if (lastto)
xpt_release_simq(ch->sim, TRUE);
}
/* If it was first request of reset sequence and there is no error,
* proceed to second request. */
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
(ccb->ataio.cmd.control & ATA_A_RESET) &&
et == AHCI_ERR_NONE) {
ccb->ataio.cmd.control &= ~ATA_A_RESET;
ahci_begin_transaction(ch, ccb);
return;
}
/* If it was our READ LOG command - process it. */
if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) {
ahci_process_read_log(ch, ccb);
/* If it was our REQUEST SENSE command - process it. */
} else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) {
ahci_process_request_sense(ch, ccb);
/* If it was NCQ or ATAPI command error, put result on hold. */
} else if (et == AHCI_ERR_NCQ ||
((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
(ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) {
ch->hold[slot->slot] = ccb;
ch->numhslots++;
} else
ahci_done(ch, ccb);
/* If we have no other active commands, ... */
if (ch->rslots == 0) {
/* if there was fatal error - reset port. */
if (ch->toslots != 0 || ch->fatalerr) {
ahci_reset(ch);
} else {
/* if we have slots in error, we can reinit port. */
if (ch->eslots != 0) {
ahci_stop(ch);
ahci_clo(ch);
ahci_start(ch, 1);
}
/* if there commands on hold, we can do READ LOG. */
if (!ch->recoverycmd && ch->numhslots)
ahci_issue_recovery(ch);
}
/* If all the rest of commands are in timeout - give them chance. */
} else if ((ch->rslots & ~ch->toslots) == 0 &&
et != AHCI_ERR_TIMEOUT)
ahci_rearm_timeout(ch);
/* Unfreeze frozen command. */
if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) {
union ccb *fccb = ch->frozen;
ch->frozen = NULL;
ahci_begin_transaction(ch, fccb);
xpt_release_simq(ch->sim, TRUE);
}
/* Start PM timer. */
if (ch->numrslots == 0 && ch->pm_level > 3 &&
(ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
callout_schedule(&ch->pm_timer,
(ch->pm_level == 4) ? hz / 1000 : hz / 8);
}
}
static void
ahci_issue_recovery(struct ahci_channel *ch)
{
union ccb *ccb;
struct ccb_ataio *ataio;
struct ccb_scsiio *csio;
int i;
/* Find some held command. */
for (i = 0; i < ch->numslots; i++) {
if (ch->hold[i])
break;
}
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
device_printf(ch->dev, "Unable to allocate recovery command\n");
completeall:
/* We can't do anything -- complete held commands. */
for (i = 0; i < ch->numslots; i++) {
if (ch->hold[i] == NULL)
continue;
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL;
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
ahci_reset(ch);
return;
}
xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path,
ch->hold[i]->ccb_h.pinfo.priority);
if (ch->hold[i]->ccb_h.func_code == XPT_ATA_IO) {
/* READ LOG */
ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
ccb->ccb_h.func_code = XPT_ATA_IO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
ataio = &ccb->ataio;
ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT);
if (ataio->data_ptr == NULL) {
xpt_free_ccb(ccb);
device_printf(ch->dev,
"Unable to allocate memory for READ LOG command\n");
goto completeall;
}
ataio->dxfer_len = 512;
bzero(&ataio->cmd, sizeof(ataio->cmd));
ataio->cmd.flags = CAM_ATAIO_48BIT;
ataio->cmd.command = 0x2F; /* READ LOG EXT */
ataio->cmd.sector_count = 1;
ataio->cmd.sector_count_exp = 0;
ataio->cmd.lba_low = 0x10;
ataio->cmd.lba_mid = 0;
ataio->cmd.lba_mid_exp = 0;
} else {
/* REQUEST SENSE */
ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE;
ccb->ccb_h.recovery_slot = i;
ccb->ccb_h.func_code = XPT_SCSI_IO;
ccb->ccb_h.flags = CAM_DIR_IN;
ccb->ccb_h.status = 0;
ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
csio = &ccb->csio;
csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data;
csio->dxfer_len = ch->hold[i]->csio.sense_len;
csio->cdb_len = 6;
bzero(&csio->cdb_io, sizeof(csio->cdb_io));
csio->cdb_io.cdb_bytes[0] = 0x03;
csio->cdb_io.cdb_bytes[4] = csio->dxfer_len;
}
/* Freeze SIM while doing recovery. */
ch->recoverycmd = 1;
xpt_freeze_simq(ch->sim, 1);
ahci_begin_transaction(ch, ccb);
}
static void
ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb)
{
uint8_t *data;
struct ata_res *res;
int i;
ch->recoverycmd = 0;
data = ccb->ataio.data_ptr;
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
(data[0] & 0x80) == 0) {
for (i = 0; i < ch->numslots; i++) {
if (!ch->hold[i])
continue;
if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
continue;
if ((data[0] & 0x1F) == i) {
res = &ch->hold[i]->ataio.res;
res->status = data[2];
res->error = data[3];
res->lba_low = data[4];
res->lba_mid = data[5];
res->lba_high = data[6];
res->device = data[7];
res->lba_low_exp = data[8];
res->lba_mid_exp = data[9];
res->lba_high_exp = data[10];
res->sector_count = data[12];
res->sector_count_exp = data[13];
} else {
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
}
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
} else {
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
device_printf(ch->dev, "Error while READ LOG EXT\n");
else if ((data[0] & 0x80) == 0) {
device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n");
}
for (i = 0; i < ch->numslots; i++) {
if (!ch->hold[i])
continue;
if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
continue;
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
}
free(ccb->ataio.data_ptr, M_AHCI);
xpt_free_ccb(ccb);
xpt_release_simq(ch->sim, TRUE);
}
static void
ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb)
{
int i;
ch->recoverycmd = 0;
i = ccb->ccb_h.recovery_slot;
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID;
} else {
ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
}
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
xpt_free_ccb(ccb);
xpt_release_simq(ch->sim, TRUE);
}
static void
ahci_start(struct ahci_channel *ch, int fbs)
{
u_int32_t cmd;
/* Run the channel start callback, if any. */
if (ch->start)
ch->start(ch);
/* Clear SATA error register */
ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF);
/* Clear any interrupts pending on this channel */
ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF);
/* Configure FIS-based switching if supported. */
if (ch->chcaps & AHCI_P_CMD_FBSCP) {
ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0;
ATA_OUTL(ch->r_mem, AHCI_P_FBS,
ch->fbs_enabled ? AHCI_P_FBS_EN : 0);
}
/* Start operations on this channel */
cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
cmd &= ~AHCI_P_CMD_PMA;
ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST |
(ch->pm_present ? AHCI_P_CMD_PMA : 0));
}
static void
ahci_stop(struct ahci_channel *ch)
{
u_int32_t cmd;
int timeout;
/* Kill all activity on this channel */
cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST);
/* Wait for activity stop. */
timeout = 0;
do {
DELAY(10);
if (timeout++ > 50000) {
device_printf(ch->dev, "stopping AHCI engine failed\n");
break;
}
} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR);
ch->eslots = 0;
}
static void
ahci_clo(struct ahci_channel *ch)
{
u_int32_t cmd;
int timeout;
/* Issue Command List Override if supported */
if (ch->caps & AHCI_CAP_SCLO) {
cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
cmd |= AHCI_P_CMD_CLO;
ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd);
timeout = 0;
do {
DELAY(10);
if (timeout++ > 50000) {
device_printf(ch->dev, "executing CLO failed\n");
break;
}
} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO);
}
}
static void
ahci_stop_fr(struct ahci_channel *ch)
{
u_int32_t cmd;
int timeout;
/* Kill all FIS reception on this channel */
cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE);
/* Wait for FIS reception stop. */
timeout = 0;
do {
DELAY(10);
if (timeout++ > 50000) {
device_printf(ch->dev, "stopping AHCI FR engine failed\n");
break;
}
} while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR);
}
static void
ahci_start_fr(struct ahci_channel *ch)
{
u_int32_t cmd;
/* Start FIS reception on this channel */
cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE);
}
static int
ahci_wait_ready(struct ahci_channel *ch, int t, int t0)
{
int timeout = 0;
uint32_t val;
while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) &
(ATA_S_BUSY | ATA_S_DRQ)) {
if (timeout > t) {
if (t != 0) {
device_printf(ch->dev,
"AHCI reset: device not ready after %dms "
"(tfd = %08x)\n",
MAX(t, 0) + t0, val);
}
return (EBUSY);
}
DELAY(1000);
timeout++;
}
if (bootverbose)
device_printf(ch->dev, "AHCI reset: device ready after %dms\n",
timeout + t0);
return (0);
}
static void
ahci_reset_to(void *arg)
{
struct ahci_channel *ch = arg;
if (ch->resetting == 0)
return;
ch->resetting--;
if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0,
(310 - ch->resetting) * 100) == 0) {
ch->resetting = 0;
ahci_start(ch, 1);
xpt_release_simq(ch->sim, TRUE);
return;
}
if (ch->resetting == 0) {
ahci_clo(ch);
ahci_start(ch, 1);
xpt_release_simq(ch->sim, TRUE);
return;
}
callout_schedule(&ch->reset_timer, hz / 10);
}
static void
ahci_reset(struct ahci_channel *ch)
{
struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev));
int i;
xpt_freeze_simq(ch->sim, 1);
if (bootverbose)
device_printf(ch->dev, "AHCI reset...\n");
/* Forget about previous reset. */
if (ch->resetting) {
ch->resetting = 0;
callout_stop(&ch->reset_timer);
xpt_release_simq(ch->sim, TRUE);
}
/* Requeue freezed command. */
if (ch->frozen) {
union ccb *fccb = ch->frozen;
ch->frozen = NULL;
fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
xpt_freeze_devq(fccb->ccb_h.path, 1);
fccb->ccb_h.status |= CAM_DEV_QFRZN;
}
ahci_done(ch, fccb);
}
/* Kill the engine and requeue all running commands. */
ahci_stop(ch);
for (i = 0; i < ch->numslots; i++) {
/* Do we have a running request on slot? */
if (ch->slot[i].state < AHCI_SLOT_RUNNING)
continue;
/* XXX; Commands in loading state. */
ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
}
for (i = 0; i < ch->numslots; i++) {
if (!ch->hold[i])
continue;
ahci_done(ch, ch->hold[i]);
ch->hold[i] = NULL;
ch->numhslots--;
}
if (ch->toslots != 0)
xpt_release_simq(ch->sim, TRUE);
ch->eslots = 0;
ch->toslots = 0;
ch->wrongccs = 0;
ch->fatalerr = 0;
/* Tell the XPT about the event */
xpt_async(AC_BUS_RESET, ch->path, NULL);
/* Disable port interrupts */
ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
/* Reset and reconnect PHY, */
if (!ahci_sata_phy_reset(ch)) {
if (bootverbose)
device_printf(ch->dev,
"AHCI reset: device not found\n");
ch->devices = 0;
/* Enable wanted port interrupts */
ATA_OUTL(ch->r_mem, AHCI_P_IE,
(((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
AHCI_P_IX_PRC | AHCI_P_IX_PC));
xpt_release_simq(ch->sim, TRUE);
return;
}
if (bootverbose)
device_printf(ch->dev, "AHCI reset: device found\n");
/* Wait for clearing busy status. */
if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) {
if (dumping)
ahci_clo(ch);
else
ch->resetting = 310;
}
ch->devices = 1;
/* Enable wanted port interrupts */
ATA_OUTL(ch->r_mem, AHCI_P_IE,
(((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
AHCI_P_IX_TFE | AHCI_P_IX_HBF |
AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF |
((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC |
AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) |
AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR)));
if (ch->resetting)
callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch);
else {
ahci_start(ch, 1);
xpt_release_simq(ch->sim, TRUE);
}
}
static int
ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag)
{
u_int8_t *fis = &ctp->cfis[0];
bzero(fis, 20);
fis[0] = 0x27; /* host to device */
fis[1] = (ccb->ccb_h.target_id & 0x0f);
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
fis[1] |= 0x80;
fis[2] = ATA_PACKET_CMD;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
fis[3] = ATA_F_DMA;
else {
fis[5] = ccb->csio.dxfer_len;
fis[6] = ccb->csio.dxfer_len >> 8;
}
fis[7] = ATA_D_LBA;
fis[15] = ATA_A_4BIT;
bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
ctp->acmd, ccb->csio.cdb_len);
bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len);
} else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) {
fis[1] |= 0x80;
fis[2] = ccb->ataio.cmd.command;
fis[3] = ccb->ataio.cmd.features;
fis[4] = ccb->ataio.cmd.lba_low;
fis[5] = ccb->ataio.cmd.lba_mid;
fis[6] = ccb->ataio.cmd.lba_high;
fis[7] = ccb->ataio.cmd.device;
fis[8] = ccb->ataio.cmd.lba_low_exp;
fis[9] = ccb->ataio.cmd.lba_mid_exp;
fis[10] = ccb->ataio.cmd.lba_high_exp;
fis[11] = ccb->ataio.cmd.features_exp;
fis[12] = ccb->ataio.cmd.sector_count;
if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
fis[12] &= 0x07;
fis[12] |= tag << 3;
}
fis[13] = ccb->ataio.cmd.sector_count_exp;
if (ccb->ataio.ata_flags & ATA_FLAG_ICC)
fis[14] = ccb->ataio.icc;
fis[15] = ATA_A_4BIT;
if (ccb->ataio.ata_flags & ATA_FLAG_AUX) {
fis[16] = ccb->ataio.aux & 0xff;
fis[17] = (ccb->ataio.aux >> 8) & 0xff;
fis[18] = (ccb->ataio.aux >> 16) & 0xff;
fis[19] = (ccb->ataio.aux >> 24) & 0xff;
}
} else {
fis[15] = ccb->ataio.cmd.control;
}
return (20);
}
static int
ahci_sata_connect(struct ahci_channel *ch)
{
u_int32_t status;
int timeout, timeoutslot, found = 0;
/*
* Wait for "connect well", up to 100ms by default and
* up to 500ms for devices with the SLOWDEV quirk.
*/
timeoutslot = ((ch->quirks & AHCI_Q_SLOWDEV) ? 5000 : 1000);
for (timeout = 0; timeout < timeoutslot; timeout++) {
status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
found = 1;
if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) &&
((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) &&
((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE))
break;
if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) {
if (bootverbose) {
device_printf(ch->dev, "SATA offline status=%08x\n",
status);
}
return (0);
}
if (found == 0 && timeout >= 100)
break;
DELAY(100);
}
if (timeout >= timeoutslot || !found) {
if (bootverbose) {
device_printf(ch->dev,
"SATA connect timeout time=%dus status=%08x\n",
timeout * 100, status);
}
return (0);
}
if (bootverbose) {
device_printf(ch->dev, "SATA connect time=%dus status=%08x\n",
timeout * 100, status);
}
/* Clear SATA error register */
ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff);
return (1);
}
static int
ahci_sata_phy_reset(struct ahci_channel *ch)
{
int sata_rev;
uint32_t val, detval;
if (ch->listening) {
val = ATA_INL(ch->r_mem, AHCI_P_CMD);
val |= AHCI_P_CMD_SUD;
ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
ch->listening = 0;
}
sata_rev = ch->user[ch->pm_present ? 15 : 0].revision;
if (sata_rev == 1)
val = ATA_SC_SPD_SPEED_GEN1;
else if (sata_rev == 2)
val = ATA_SC_SPD_SPEED_GEN2;
else if (sata_rev == 3)
val = ATA_SC_SPD_SPEED_GEN3;
else
val = 0;
detval = ahci_ch_detval(ch, ATA_SC_DET_RESET);
ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
detval | val |
ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER);
DELAY(1000);
detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE);
ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
detval | val | ((ch->pm_level > 0) ? 0 :
(ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER)));
if (!ahci_sata_connect(ch)) {
if (ch->caps & AHCI_CAP_SSS) {
val = ATA_INL(ch->r_mem, AHCI_P_CMD);
val &= ~AHCI_P_CMD_SUD;
ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
ch->listening = 1;
} else if (ch->pm_level > 0)
ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
return (0);
}
return (1);
}
static int
ahci_check_ids(struct ahci_channel *ch, union ccb *ccb)
{
if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) {
ccb->ccb_h.status = CAM_TID_INVALID;
ahci_done(ch, ccb);
return (-1);
}
if (ccb->ccb_h.target_lun != 0) {
ccb->ccb_h.status = CAM_LUN_INVALID;
ahci_done(ch, ccb);
return (-1);
}
return (0);
}
static void
ahciaction(struct cam_sim *sim, union ccb *ccb)
{
struct ahci_channel *ch;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n",
ccb->ccb_h.func_code));
ch = (struct ahci_channel *)cam_sim_softc(sim);
switch (ccb->ccb_h.func_code) {
/* Common cases first */
case XPT_ATA_IO: /* Execute the requested I/O operation */
case XPT_SCSI_IO:
if (ahci_check_ids(ch, ccb))
return;
if (ch->devices == 0 ||
(ch->pm_present == 0 &&
ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) {
ccb->ccb_h.status = CAM_SEL_TIMEOUT;
break;
}
ccb->ccb_h.recovery_type = RECOVERY_NONE;
/* Check for command collision. */
if (ahci_check_collision(ch, ccb)) {
/* Freeze command. */
ch->frozen = ccb;
/* We have only one frozen slot, so freeze simq also. */
xpt_freeze_simq(ch->sim, 1);
return;
}
ahci_begin_transaction(ch, ccb);
return;
case XPT_ABORT: /* Abort the specified CCB */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
case XPT_SET_TRAN_SETTINGS:
{
struct ccb_trans_settings *cts = &ccb->cts;
struct ahci_device *d;
if (ahci_check_ids(ch, ccb))
return;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
d = &ch->curr[ccb->ccb_h.target_id];
else
d = &ch->user[ccb->ccb_h.target_id];
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
d->revision = cts->xport_specific.sata.revision;
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE)
d->mode = cts->xport_specific.sata.mode;
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS)
d->tags = min(ch->numslots, cts->xport_specific.sata.tags);
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM)
ch->pm_present = cts->xport_specific.sata.pm_present;
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
d->atapi = cts->xport_specific.sata.atapi;
if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
d->caps = cts->xport_specific.sata.caps;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
case XPT_GET_TRAN_SETTINGS:
/* Get default/user set transfer settings for the target */
{
struct ccb_trans_settings *cts = &ccb->cts;
struct ahci_device *d;
uint32_t status;
if (ahci_check_ids(ch, ccb))
return;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
d = &ch->curr[ccb->ccb_h.target_id];
else
d = &ch->user[ccb->ccb_h.target_id];
cts->protocol = PROTO_UNSPECIFIED;
cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
cts->transport = XPORT_SATA;
cts->transport_version = XPORT_VERSION_UNSPECIFIED;
cts->proto_specific.valid = 0;
cts->xport_specific.sata.valid = 0;
if (cts->type == CTS_TYPE_CURRENT_SETTINGS &&
(ccb->ccb_h.target_id == 15 ||
(ccb->ccb_h.target_id == 0 && !ch->pm_present))) {
status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK;
if (status & 0x0f0) {
cts->xport_specific.sata.revision =
(status & 0x0f0) >> 4;
cts->xport_specific.sata.valid |=
CTS_SATA_VALID_REVISION;
}
cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D;
if (ch->pm_level) {
if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC))
cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ;
if (ch->caps2 & AHCI_CAP2_APST)
cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST;
}
if ((ch->caps & AHCI_CAP_SNCQ) &&
(ch->quirks & AHCI_Q_NOAA) == 0)
cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA;
cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN;
cts->xport_specific.sata.caps &=
ch->user[ccb->ccb_h.target_id].caps;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
} else {
cts->xport_specific.sata.revision = d->revision;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
cts->xport_specific.sata.caps = d->caps;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
}
cts->xport_specific.sata.mode = d->mode;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
cts->xport_specific.sata.bytecount = d->bytecount;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
cts->xport_specific.sata.pm_present = ch->pm_present;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM;
cts->xport_specific.sata.tags = d->tags;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS;
cts->xport_specific.sata.atapi = d->atapi;
cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
case XPT_RESET_BUS: /* Reset the specified SCSI bus */
case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
ahci_reset(ch);
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_TERM_IO: /* Terminate the I/O process */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1; /* XXX??? */
cpi->hba_inquiry = PI_SDTR_ABLE;
if (ch->caps & AHCI_CAP_SNCQ)
cpi->hba_inquiry |= PI_TAG_ABLE;
if (ch->caps & AHCI_CAP_SPM)
cpi->hba_inquiry |= PI_SATAPM;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
if ((ch->quirks & AHCI_Q_NOAUX) == 0)
cpi->hba_misc |= PIM_ATA_EXT;
cpi->hba_eng_cnt = 0;
if (ch->caps & AHCI_CAP_SPM)
cpi->max_target = 15;
else
cpi->max_target = 0;
cpi->max_lun = 0;
cpi->initiator_id = 0;
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 150000;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->transport = XPORT_SATA;
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
cpi->protocol = PROTO_ATA;
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
cpi->maxio = ctob(AHCI_SG_ENTRIES - 1);
/* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */
if (ch->quirks & AHCI_Q_MAXIO_64K)
cpi->maxio = min(cpi->maxio, 128 * 512);
cpi->hba_vendor = ch->vendorid;
cpi->hba_device = ch->deviceid;
cpi->hba_subvendor = ch->subvendorid;
cpi->hba_subdevice = ch->subdeviceid;
cpi->ccb_h.status = CAM_REQ_CMP;
break;
}
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
ahci_done(ch, ccb);
}
static void
ahcipoll(struct cam_sim *sim)
{
struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim);
uint32_t istatus;
/* Read interrupt statuses and process if any. */
istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
if (istatus != 0)
ahci_ch_intr_main(ch, istatus);
if (ch->resetting != 0 &&
(--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) {
ch->resetpolldiv = 1000;
ahci_reset_to(ch);
}
}
MODULE_VERSION(ahci, 1);
MODULE_DEPEND(ahci, cam, 1, 1, 1);
diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c
index 8c22bb6ff427..6d8b8fb3aad1 100644
--- a/sys/dev/ata/ata-pci.c
+++ b/sys/dev/ata/ata-pci.c
@@ -1,918 +1,921 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1998 - 2008 Søren Schmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
MALLOC_DEFINE(M_ATAPCI, "ata_pci", "ATA driver PCI");
/* misc defines */
#define IOMASK 0xfffffffc
/*
* generic PCI ATA device probe
*/
int
ata_pci_probe(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
/* is this a storage class device ? */
if (pci_get_class(dev) != PCIC_STORAGE)
return (ENXIO);
/* is this an IDE/ATA type device ? */
if (pci_get_subclass(dev) != PCIS_STORAGE_IDE)
return (ENXIO);
device_set_descf(dev, "%s ATA controller", ata_pcivendor2str(dev));
ctlr->chipinit = ata_generic_chipinit;
/* we are a low priority handler */
return (BUS_PROBE_GENERIC);
}
int
ata_pci_attach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
device_t child;
u_int32_t cmd;
int unit;
/* do chipset specific setups only needed once */
ctlr->legacy = ata_legacy(dev);
if (ctlr->legacy || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK)
ctlr->channels = 2;
else
ctlr->channels = 1;
ctlr->ichannels = -1;
ctlr->ch_attach = ata_pci_ch_attach;
ctlr->ch_detach = ata_pci_ch_detach;
ctlr->dev = dev;
/* if needed try to enable busmastering */
pci_enable_busmaster(dev);
cmd = pci_read_config(dev, PCIR_COMMAND, 2);
/* if busmastering mode "stuck" use it */
if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) {
ctlr->r_type1 = SYS_RES_IOPORT;
ctlr->r_rid1 = ATA_BMADDR_RID;
ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1,
RF_ACTIVE);
}
if (ctlr->chipinit(dev)) {
if (ctlr->r_res1)
bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1,
ctlr->r_res1);
return ENXIO;
}
/* attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
if ((ctlr->ichannels & (1 << unit)) == 0)
continue;
child = device_add_child(dev, "ata",
((unit == 0 || unit == 1) && ctlr->legacy) ?
unit : devclass_find_free_unit(ata_devclass, 2));
if (child == NULL)
device_printf(dev, "failed to add ata child device\n");
else
device_set_ivars(child, (void *)(intptr_t)unit);
}
bus_attach_children(dev);
return 0;
}
int
ata_pci_detach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
+ int error;
/* detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (ctlr->r_irq) {
bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle);
bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq);
if (ctlr->r_irq_rid != ATA_IRQ_RID)
pci_release_msi(dev);
}
if (ctlr->chipdeinit != NULL)
ctlr->chipdeinit(dev);
if (ctlr->r_res2) {
bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2);
}
if (ctlr->r_res1) {
bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1);
}
return 0;
}
int
ata_pci_suspend(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
int error = 0;
bus_generic_suspend(dev);
if (ctlr->suspend)
error = ctlr->suspend(dev);
return error;
}
int
ata_pci_resume(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
int error = 0;
if (ctlr->resume)
error = ctlr->resume(dev);
bus_generic_resume(dev);
return error;
}
int
ata_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result));
}
int
ata_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
{
return (BUS_WRITE_IVAR(device_get_parent(dev), dev, which, value));
}
uint32_t
ata_pci_read_config(device_t dev, device_t child, int reg, int width)
{
return (pci_read_config(dev, reg, width));
}
void
ata_pci_write_config(device_t dev, device_t child, int reg,
uint32_t val, int width)
{
pci_write_config(dev, reg, val, width);
}
struct resource *
ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count,
u_int flags)
{
struct ata_pci_controller *controller = device_get_softc(dev);
struct resource *res = NULL;
if (device_get_devclass(child) == ata_devclass) {
int unit = ((struct ata_channel *)device_get_softc(child))->unit;
int myrid;
if (type == SYS_RES_IOPORT) {
switch (*rid) {
case ATA_IOADDR_RID:
if (controller->legacy) {
start = (unit ? ATA_SECONDARY : ATA_PRIMARY);
count = ATA_IOSIZE;
end = start + count - 1;
}
myrid = PCIR_BAR(0) + (unit << 3);
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
break;
case ATA_CTLADDR_RID:
if (controller->legacy) {
start = (unit ? ATA_SECONDARY : ATA_PRIMARY) +
ATA_CTLOFFSET;
count = ATA_CTLIOSIZE;
end = start + count - 1;
}
myrid = PCIR_BAR(1) + (unit << 3);
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
SYS_RES_IOPORT, &myrid,
start, end, count, flags);
break;
}
}
if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) {
if (controller->legacy) {
int irq = (unit == 0 ? 14 : 15);
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
SYS_RES_IRQ, rid, irq, irq, 1, flags);
} else
res = controller->r_irq;
}
} else {
if (type == SYS_RES_IRQ) {
if (*rid != ATA_IRQ_RID)
return (NULL);
res = controller->r_irq;
} else {
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
type, rid, start, end, count, flags);
}
}
return (res);
}
int
ata_pci_release_resource(device_t dev, device_t child, struct resource *r)
{
int rid = rman_get_rid(r);
int type = rman_get_type(r);
if (device_get_devclass(child) == ata_devclass) {
struct ata_pci_controller *controller = device_get_softc(dev);
if (type == SYS_RES_IOPORT) {
switch (rid) {
case ATA_IOADDR_RID:
case ATA_CTLADDR_RID:
return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev,
r);
default:
return ENOENT;
}
}
if (type == SYS_RES_IRQ) {
if (rid != ATA_IRQ_RID)
return ENOENT;
if (controller->legacy) {
return BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
r);
} else
return 0;
}
} else {
if (type == SYS_RES_IRQ) {
if (rid != ATA_IRQ_RID)
return (ENOENT);
return (0);
} else {
return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
r));
}
}
return (EINVAL);
}
int
ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq,
int flags, driver_filter_t *filter, driver_intr_t *function,
void *argument, void **cookiep)
{
struct ata_pci_controller *controller = device_get_softc(dev);
if (controller->legacy) {
return BUS_SETUP_INTR(device_get_parent(dev), child, irq,
flags, filter, function, argument, cookiep);
} else {
struct ata_pci_controller *controller = device_get_softc(dev);
int unit;
if (filter != NULL) {
printf("ata-pci.c: we cannot use a filter here\n");
return (EINVAL);
}
if (device_get_devclass(child) == ata_devclass)
unit = ((struct ata_channel *)device_get_softc(child))->unit;
else
unit = ATA_PCI_MAX_CH - 1;
controller->interrupt[unit].function = function;
controller->interrupt[unit].argument = argument;
*cookiep = controller;
return 0;
}
}
int
ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
void *cookie)
{
struct ata_pci_controller *controller = device_get_softc(dev);
if (controller->legacy) {
return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie);
} else {
struct ata_pci_controller *controller = device_get_softc(dev);
int unit;
if (device_get_devclass(child) == ata_devclass)
unit = ((struct ata_channel *)device_get_softc(child))->unit;
else
unit = ATA_PCI_MAX_CH - 1;
controller->interrupt[unit].function = NULL;
controller->interrupt[unit].argument = NULL;
return 0;
}
}
int
ata_generic_setmode(device_t dev, int target, int mode)
{
return (min(mode, ATA_UDMA2));
}
int
ata_generic_chipinit(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
if (ata_setup_interrupt(dev, ata_generic_intr))
return ENXIO;
ctlr->setmode = ata_generic_setmode;
return 0;
}
int
ata_pci_ch_attach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
struct resource *io = NULL, *ctlio = NULL;
int i, rid;
rid = ATA_IOADDR_RID;
if (!(io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE)))
return ENXIO;
rid = ATA_CTLADDR_RID;
if (!(ctlio = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,RF_ACTIVE))){
bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io);
return ENXIO;
}
ata_pci_dmainit(dev);
for (i = ATA_DATA; i <= ATA_COMMAND; i ++) {
ch->r_io[i].res = io;
ch->r_io[i].offset = i;
}
ch->r_io[ATA_CONTROL].res = ctlio;
ch->r_io[ATA_CONTROL].offset = ctlr->legacy ? 0 : 2;
ch->r_io[ATA_IDX_ADDR].res = io;
ata_default_registers(dev);
if (ctlr->r_res1) {
for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) {
ch->r_io[i].res = ctlr->r_res1;
ch->r_io[i].offset = (i - ATA_BMCMD_PORT) + (ch->unit*ATA_BMIOSIZE);
}
}
ata_pci_hw(dev);
return 0;
}
int
ata_pci_ch_detach(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ata_pci_dmafini(dev);
bus_release_resource(dev, SYS_RES_IOPORT, ATA_CTLADDR_RID,
ch->r_io[ATA_CONTROL].res);
bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID,
ch->r_io[ATA_IDX_ADDR].res);
return (0);
}
int
ata_pci_status(device_t dev)
{
struct ata_pci_controller *controller =
device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
if ((dumping || !controller->legacy) &&
((ch->flags & ATA_ALWAYS_DMASTAT) ||
(ch->dma.flags & ATA_DMA_ACTIVE))) {
int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
if ((bmstat & ATA_BMSTAT_INTERRUPT) == 0)
return 0;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR);
DELAY(1);
}
if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
DELAY(100);
if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
return 0;
}
return 1;
}
void
ata_pci_hw(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ata_generic_hw(dev);
ch->hw.status = ata_pci_status;
}
static int
ata_pci_dmastart(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(request->parent);
ATA_DEBUG_RQ(request, "dmastart");
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) |
(ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR)));
ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, request->dma->sg_bus);
ch->dma.flags |= ATA_DMA_ACTIVE;
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
(ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_WRITE_READ) |
((request->flags & ATA_R_READ) ? ATA_BMCMD_WRITE_READ : 0)|
ATA_BMCMD_START_STOP);
return 0;
}
static int
ata_pci_dmastop(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(request->parent);
int error;
ATA_DEBUG_RQ(request, "dmastop");
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
ch->dma.flags &= ~ATA_DMA_ACTIVE;
error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
return error;
}
static void
ata_pci_dmareset(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
struct ata_request *request;
ATA_IDX_OUTB(ch, ATA_BMCMD_PORT,
ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP);
ch->dma.flags &= ~ATA_DMA_ACTIVE;
ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR);
if ((request = ch->running)) {
device_printf(dev, "DMA reset calling unload\n");
ch->dma.unload(request);
}
}
void
ata_pci_dmainit(device_t dev)
{
struct ata_channel *ch = device_get_softc(dev);
ata_dmainit(dev);
ch->dma.start = ata_pci_dmastart;
ch->dma.stop = ata_pci_dmastop;
ch->dma.reset = ata_pci_dmareset;
}
void
ata_pci_dmafini(device_t dev)
{
ata_dmafini(dev);
}
int
ata_pci_print_child(device_t dev, device_t child)
{
int retval;
retval = bus_print_child_header(dev, child);
retval += printf(" at channel %d",
(int)(intptr_t)device_get_ivars(child));
retval += bus_print_child_footer(dev, child);
return (retval);
}
int
ata_pci_child_location(device_t dev, device_t child, struct sbuf *sb)
{
sbuf_printf(sb, "channel=%d",
(int)(intptr_t)device_get_ivars(child));
return (0);
}
static bus_dma_tag_t
ata_pci_get_dma_tag(device_t bus, device_t child)
{
return (bus_get_dma_tag(bus));
}
static device_method_t ata_pci_methods[] = {
/* device interface */
DEVMETHOD(device_probe, ata_pci_probe),
DEVMETHOD(device_attach, ata_pci_attach),
DEVMETHOD(device_detach, ata_pci_detach),
DEVMETHOD(device_suspend, ata_pci_suspend),
DEVMETHOD(device_resume, ata_pci_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* bus methods */
DEVMETHOD(bus_read_ivar, ata_pci_read_ivar),
DEVMETHOD(bus_write_ivar, ata_pci_write_ivar),
DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource),
DEVMETHOD(bus_release_resource, ata_pci_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
DEVMETHOD(bus_setup_intr, ata_pci_setup_intr),
DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr),
DEVMETHOD(pci_read_config, ata_pci_read_config),
DEVMETHOD(pci_write_config, ata_pci_write_config),
DEVMETHOD(bus_print_child, ata_pci_print_child),
DEVMETHOD(bus_child_location, ata_pci_child_location),
DEVMETHOD(bus_get_dma_tag, ata_pci_get_dma_tag),
DEVMETHOD_END
};
static driver_t ata_pci_driver = {
"atapci",
ata_pci_methods,
sizeof(struct ata_pci_controller),
};
DRIVER_MODULE(atapci, pci, ata_pci_driver, NULL, NULL);
MODULE_VERSION(atapci, 1);
MODULE_DEPEND(atapci, ata, 1, 1, 1);
static int
ata_pcichannel_probe(device_t dev)
{
if ((intptr_t)device_get_ivars(dev) < 0)
return (ENXIO);
device_set_desc(dev, "ATA channel");
return ata_probe(dev);
}
static int
ata_pcichannel_attach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
int error;
if (ch->attached)
return (0);
ch->attached = 1;
ch->dev = dev;
ch->unit = (intptr_t)device_get_ivars(dev);
resource_int_value(device_get_name(dev),
device_get_unit(dev), "pm_level", &ch->pm_level);
if ((error = ctlr->ch_attach(dev)))
return error;
return ata_attach(dev);
}
static int
ata_pcichannel_detach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
int error;
if (!ch->attached)
return (0);
ch->attached = 0;
if ((error = ata_detach(dev)))
return error;
if (ctlr->ch_detach)
return (ctlr->ch_detach(dev));
return (0);
}
static int
ata_pcichannel_suspend(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
int error;
if (!ch->attached)
return (0);
if ((error = ata_suspend(dev)))
return (error);
if (ctlr->ch_suspend != NULL && (error = ctlr->ch_suspend(dev)))
return (error);
return (0);
}
static int
ata_pcichannel_resume(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
int error;
if (!ch->attached)
return (0);
if (ctlr->ch_resume != NULL && (error = ctlr->ch_resume(dev)))
return (error);
return ata_resume(dev);
}
static void
ata_pcichannel_reset(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
/* if DMA engine present reset it */
if (ch->dma.reset)
ch->dma.reset(dev);
/* reset the controller HW */
if (ctlr->reset)
ctlr->reset(dev);
else
ata_generic_reset(dev);
}
static int
ata_pcichannel_setmode(device_t dev, int target, int mode)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
if (ctlr->setmode)
return (ctlr->setmode(dev, target, mode));
else
return (ata_generic_setmode(dev, target, mode));
}
static int
ata_pcichannel_getrev(device_t dev, int target)
{
struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
struct ata_channel *ch = device_get_softc(dev);
if (ch->flags & ATA_SATA) {
if (ctlr->getrev)
return (ctlr->getrev(dev, target));
else
return (0xff);
} else
return (0);
}
static device_method_t ata_pcichannel_methods[] = {
/* device interface */
DEVMETHOD(device_probe, ata_pcichannel_probe),
DEVMETHOD(device_attach, ata_pcichannel_attach),
DEVMETHOD(device_detach, ata_pcichannel_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, ata_pcichannel_suspend),
DEVMETHOD(device_resume, ata_pcichannel_resume),
/* ATA methods */
DEVMETHOD(ata_setmode, ata_pcichannel_setmode),
DEVMETHOD(ata_getrev, ata_pcichannel_getrev),
DEVMETHOD(ata_reset, ata_pcichannel_reset),
DEVMETHOD_END
};
driver_t ata_pcichannel_driver = {
"ata",
ata_pcichannel_methods,
sizeof(struct ata_channel),
};
DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, NULL, NULL);
/*
* misc support functions
*/
int
ata_legacy(device_t dev)
{
return (((pci_read_config(dev, PCIR_SUBCLASS, 1) == PCIS_STORAGE_IDE) &&
(pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV)&&
((pci_read_config(dev, PCIR_PROGIF, 1) &
(PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) !=
(PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))) ||
(!pci_read_config(dev, PCIR_BAR(0), 4) &&
!pci_read_config(dev, PCIR_BAR(1), 4) &&
!pci_read_config(dev, PCIR_BAR(2), 4) &&
!pci_read_config(dev, PCIR_BAR(3), 4) &&
!pci_read_config(dev, PCIR_BAR(5), 4)));
}
void
ata_generic_intr(void *data)
{
struct ata_pci_controller *ctlr = data;
struct ata_channel *ch;
int unit;
for (unit = 0; unit < ATA_PCI_MAX_CH; unit++) {
if ((ch = ctlr->interrupt[unit].argument))
ctlr->interrupt[unit].function(ch);
}
}
int
ata_setup_interrupt(device_t dev, void *intr_func)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
int i, msi = 0;
if (!ctlr->legacy) {
if (resource_int_value(device_get_name(dev),
device_get_unit(dev), "msi", &i) == 0 && i != 0)
msi = 1;
if (msi && pci_msi_count(dev) > 0 && pci_alloc_msi(dev, &msi) == 0) {
ctlr->r_irq_rid = 0x1;
} else {
msi = 0;
ctlr->r_irq_rid = ATA_IRQ_RID;
}
if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&ctlr->r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) {
device_printf(dev, "unable to map interrupt\n");
if (msi)
pci_release_msi(dev);
return ENXIO;
}
if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, NULL,
intr_func, ctlr, &ctlr->handle))) {
device_printf(dev, "unable to setup interrupt\n");
bus_release_resource(dev,
SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq);
if (msi)
pci_release_msi(dev);
return ENXIO;
}
}
return 0;
}
void
ata_set_desc(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
device_set_descf(dev, "%s %s %s controller",
ata_pcivendor2str(dev), ctlr->chip->text,
ata_mode2str(ctlr->chip->max_dma));
}
const struct ata_chip_id *
ata_match_chip(device_t dev, const struct ata_chip_id *index)
{
uint32_t devid;
uint8_t revid;
devid = pci_get_devid(dev);
revid = pci_get_revid(dev);
while (index->chipid != 0) {
if (devid == index->chipid && revid >= index->chiprev)
return (index);
index++;
}
return (NULL);
}
const struct ata_chip_id *
ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot)
{
const struct ata_chip_id *idx;
device_t *children;
int nchildren, i;
uint8_t s;
if (device_get_children(device_get_parent(dev), &children, &nchildren))
return (NULL);
for (i = 0; i < nchildren; i++) {
s = pci_get_slot(children[i]);
if ((slot >= 0 && s == slot) || (slot < 0 && s <= -slot)) {
idx = ata_match_chip(children[i], index);
if (idx != NULL) {
free(children, M_TEMP);
return (idx);
}
}
}
free(children, M_TEMP);
return (NULL);
}
const char *
ata_pcivendor2str(device_t dev)
{
switch (pci_get_vendor(dev)) {
case ATA_ACARD_ID: return "Acard";
case ATA_ACER_LABS_ID: return "AcerLabs";
case ATA_AMD_ID: return "AMD";
case ATA_ADAPTEC_ID: return "Adaptec";
case ATA_ATI_ID: return "ATI";
case ATA_CYRIX_ID: return "Cyrix";
case ATA_CYPRESS_ID: return "Cypress";
case ATA_HIGHPOINT_ID: return "HighPoint";
case ATA_INTEL_ID: return "Intel";
case ATA_ITE_ID: return "ITE";
case ATA_JMICRON_ID: return "JMicron";
case ATA_MARVELL_ID: return "Marvell";
case ATA_MARVELL2_ID: return "Marvell";
case ATA_NATIONAL_ID: return "National";
case ATA_NETCELL_ID: return "Netcell";
case ATA_NVIDIA_ID: return "nVidia";
case ATA_PROMISE_ID: return "Promise";
case ATA_SERVERWORKS_ID: return "ServerWorks";
case ATA_SILICON_IMAGE_ID: return "SiI";
case ATA_SIS_ID: return "SiS";
case ATA_VIA_ID: return "VIA";
case ATA_CENATEK_ID: return "Cenatek";
case ATA_MICRON_ID: return "Micron";
default: return "Generic";
}
}
int
ata_mode2idx(int mode)
{
if ((mode & ATA_DMA_MASK) == ATA_UDMA0)
return (mode & ATA_MODE_MASK) + 8;
if ((mode & ATA_DMA_MASK) == ATA_WDMA0)
return (mode & ATA_MODE_MASK) + 5;
return (mode & ATA_MODE_MASK) - ATA_PIO0;
}
diff --git a/sys/dev/atopcase/atopcase.c b/sys/dev/atopcase/atopcase.c
index e4e248f7ce0a..9e64b389c9e3 100644
--- a/sys/dev/atopcase/atopcase.c
+++ b/sys/dev/atopcase/atopcase.c
@@ -1,723 +1,723 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021-2023 Val Packett
* Copyright (c) 2023 Vladimir Kondratyev
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_hid.h"
#include "opt_spi.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include