Index: head/sys/arm/allwinner/a10_gpio.c =================================================================== --- head/sys/arm/allwinner/a10_gpio.c (revision 306901) +++ head/sys/arm/allwinner/a10_gpio.c (revision 306902) @@ -1,782 +1,780 @@ /*- * Copyright (c) 2013 Ganbold Tsagaankhuu * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012 Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include "opt_soc.h" #endif #include "gpio_if.h" #define A10_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN) #define A10_GPIO_NONE 0 #define A10_GPIO_PULLUP 1 #define A10_GPIO_PULLDOWN 2 #define A10_GPIO_INPUT 0 #define A10_GPIO_OUTPUT 1 #define AW_GPIO_DRV_MASK 0x3 #define AW_GPIO_PUD_MASK 0x3 #define AW_PINCTRL 1 #define AW_R_PINCTRL 2 /* Defined in a10_padconf.c */ #ifdef SOC_ALLWINNER_A10 extern const struct allwinner_padconf a10_padconf; #endif /* Defined in a13_padconf.c */ #ifdef SOC_ALLWINNER_A13 extern const struct allwinner_padconf a13_padconf; #endif /* Defined in a20_padconf.c */ #ifdef SOC_ALLWINNER_A20 extern const struct allwinner_padconf a20_padconf; #endif /* Defined in a31_padconf.c */ #ifdef SOC_ALLWINNER_A31 extern const struct allwinner_padconf a31_padconf; #endif /* Defined in a31s_padconf.c */ #ifdef SOC_ALLWINNER_A31S extern const struct allwinner_padconf a31s_padconf; #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) extern const struct allwinner_padconf a31_r_padconf; #endif /* Defined in h3_padconf.c */ #ifdef SOC_ALLWINNER_H3 extern const struct allwinner_padconf h3_padconf; extern const struct allwinner_padconf h3_r_padconf; #endif /* Defined in a83t_padconf.c */ #ifdef SOC_ALLWINNER_A83T extern const struct allwinner_padconf a83t_padconf; extern const struct allwinner_padconf a83t_r_padconf; #endif /* Defined in a64_padconf.c */ #ifdef SOC_ALLWINNER_A64 extern const struct allwinner_padconf a64_padconf; extern const struct allwinner_padconf a64_r_padconf; #endif static struct ofw_compat_data compat_data[] = { #ifdef SOC_ALLWINNER_A10 {"allwinner,sun4i-a10-pinctrl", (uintptr_t)&a10_padconf}, #endif #ifdef SOC_ALLWINNER_A13 {"allwinner,sun5i-a13-pinctrl", (uintptr_t)&a13_padconf}, #endif #ifdef SOC_ALLWINNER_A20 {"allwinner,sun7i-a20-pinctrl", (uintptr_t)&a20_padconf}, #endif #ifdef SOC_ALLWINNER_A31 {"allwinner,sun6i-a31-pinctrl", (uintptr_t)&a31_padconf}, #endif #ifdef SOC_ALLWINNER_A31S {"allwinner,sun6i-a31s-pinctrl", (uintptr_t)&a31s_padconf}, #endif #if defined(SOC_ALLWINNER_A31) || defined(SOC_ALLWINNER_A31S) {"allwinner,sun6i-a31-r-pinctrl", (uintptr_t)&a31_r_padconf}, #endif #ifdef SOC_ALLWINNER_A83T {"allwinner,sun8i-a83t-pinctrl", (uintptr_t)&a83t_padconf}, {"allwinner,sun8i-a83t-r-pinctrl", (uintptr_t)&a83t_r_padconf}, #endif #ifdef SOC_ALLWINNER_H3 {"allwinner,sun8i-h3-pinctrl", (uintptr_t)&h3_padconf}, {"allwinner,sun8i-h3-r-pinctrl", (uintptr_t)&h3_r_padconf}, #endif #ifdef SOC_ALLWINNER_A64 {"allwinner,sun50i-a64-pinctrl", (uintptr_t)&a64_padconf}, {"allwinner,sun50i-a64-r-pinctrl", (uintptr_t)&a64_r_padconf}, #endif {NULL, 0} }; struct a10_gpio_softc { device_t sc_dev; device_t sc_busdev; struct mtx sc_mtx; struct resource * sc_mem_res; struct resource * sc_irq_res; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; void * sc_intrhand; const struct allwinner_padconf * padconf; }; #define A10_GPIO_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx) #define A10_GPIO_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx) #define A10_GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define A10_GPIO_GP_CFG(_bank, _idx) 0x00 + ((_bank) * 0x24) + ((_idx) << 2) #define A10_GPIO_GP_DAT(_bank) 0x10 + ((_bank) * 0x24) #define A10_GPIO_GP_DRV(_bank, _idx) 0x14 + ((_bank) * 0x24) + ((_idx) << 2) #define A10_GPIO_GP_PUL(_bank, _idx) 0x1c + ((_bank) * 0x24) + ((_idx) << 2) #define A10_GPIO_GP_INT_CFG0 0x200 #define A10_GPIO_GP_INT_CFG1 0x204 #define A10_GPIO_GP_INT_CFG2 0x208 #define A10_GPIO_GP_INT_CFG3 0x20c #define A10_GPIO_GP_INT_CTL 0x210 #define A10_GPIO_GP_INT_STA 0x214 #define A10_GPIO_GP_INT_DEB 0x218 #define A10_GPIO_WRITE(_sc, _off, _val) \ bus_space_write_4(_sc->sc_bst, _sc->sc_bsh, _off, _val) #define A10_GPIO_READ(_sc, _off) \ bus_space_read_4(_sc->sc_bst, _sc->sc_bsh, _off) static uint32_t a10_gpio_get_function(struct a10_gpio_softc *sc, uint32_t pin) { uint32_t bank, func, offset; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); if (pin > sc->padconf->npins) return (0); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); func = A10_GPIO_READ(sc, A10_GPIO_GP_CFG(bank, pin >> 3)); return ((func >> offset) & 0x7); } static int a10_gpio_set_function(struct a10_gpio_softc *sc, uint32_t pin, uint32_t f) { uint32_t bank, data, offset; /* Check if the function exists in the padconf data */ if (sc->padconf->pins[pin].functions[f] == NULL) return (EINVAL); /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x07) << 2); data = A10_GPIO_READ(sc, A10_GPIO_GP_CFG(bank, pin >> 3)); data &= ~(7 << offset); data |= (f << offset); A10_GPIO_WRITE(sc, A10_GPIO_GP_CFG(bank, pin >> 3), data); return (0); } static uint32_t a10_gpio_get_pud(struct a10_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = A10_GPIO_READ(sc, A10_GPIO_GP_PUL(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_PUD_MASK); } static void a10_gpio_set_pud(struct a10_gpio_softc *sc, uint32_t pin, uint32_t state) { uint32_t bank, offset, val; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = A10_GPIO_READ(sc, A10_GPIO_GP_PUL(bank, pin >> 4)); val &= ~(AW_GPIO_PUD_MASK << offset); val |= (state << offset); A10_GPIO_WRITE(sc, A10_GPIO_GP_PUL(bank, pin >> 4), val); } static uint32_t a10_gpio_get_drv(struct a10_gpio_softc *sc, uint32_t pin) { uint32_t bank, offset, val; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = A10_GPIO_READ(sc, A10_GPIO_GP_DRV(bank, pin >> 4)); return ((val >> offset) & AW_GPIO_DRV_MASK); } static void a10_gpio_set_drv(struct a10_gpio_softc *sc, uint32_t pin, uint32_t drive) { uint32_t bank, offset, val; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; offset = ((pin & 0x0f) << 1); val = A10_GPIO_READ(sc, A10_GPIO_GP_DRV(bank, pin >> 4)); val &= ~(AW_GPIO_DRV_MASK << offset); val |= (drive << offset); A10_GPIO_WRITE(sc, A10_GPIO_GP_DRV(bank, pin >> 4), val); } static int a10_gpio_pin_configure(struct a10_gpio_softc *sc, uint32_t pin, uint32_t flags) { int err = 0; /* Must be called with lock held. */ A10_GPIO_LOCK_ASSERT(sc); /* Manage input/output. */ if (flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) { if (flags & GPIO_PIN_OUTPUT) err = a10_gpio_set_function(sc, pin, A10_GPIO_OUTPUT); else err = a10_gpio_set_function(sc, pin, A10_GPIO_INPUT); } if (err) return (err); /* Manage Pull-up/pull-down. */ if (flags & (GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN)) { if (flags & GPIO_PIN_PULLUP) a10_gpio_set_pud(sc, pin, A10_GPIO_PULLUP); else a10_gpio_set_pud(sc, pin, A10_GPIO_PULLDOWN); } else a10_gpio_set_pud(sc, pin, A10_GPIO_NONE); return (0); } static device_t a10_gpio_get_bus(device_t dev) { struct a10_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int a10_gpio_pin_max(device_t dev, int *maxpin) { struct a10_gpio_softc *sc; sc = device_get_softc(dev); *maxpin = sc->padconf->npins - 1; return (0); } static int a10_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct a10_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->padconf->npins) return (EINVAL); *caps = A10_GPIO_DEFAULT_CAPS; return (0); } static int a10_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct a10_gpio_softc *sc; uint32_t func; uint32_t pud; sc = device_get_softc(dev); if (pin >= sc->padconf->npins) return (EINVAL); A10_GPIO_LOCK(sc); func = a10_gpio_get_function(sc, pin); switch (func) { case A10_GPIO_INPUT: *flags = GPIO_PIN_INPUT; break; case A10_GPIO_OUTPUT: *flags = GPIO_PIN_OUTPUT; break; default: *flags = 0; break; } pud = a10_gpio_get_pud(sc, pin); switch (pud) { case A10_GPIO_PULLDOWN: *flags |= GPIO_PIN_PULLDOWN; break; case A10_GPIO_PULLUP: *flags |= GPIO_PIN_PULLUP; break; default: break; } A10_GPIO_UNLOCK(sc); return (0); } static int a10_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct a10_gpio_softc *sc; sc = device_get_softc(dev); if (pin >= sc->padconf->npins) return (EINVAL); snprintf(name, GPIOMAXNAME - 1, "%s", sc->padconf->pins[pin].name); name[GPIOMAXNAME - 1] = '\0'; return (0); } static int a10_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct a10_gpio_softc *sc; int err; sc = device_get_softc(dev); if (pin > sc->padconf->npins) return (EINVAL); A10_GPIO_LOCK(sc); err = a10_gpio_pin_configure(sc, pin, flags); A10_GPIO_UNLOCK(sc); return (err); } static int a10_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct a10_gpio_softc *sc; uint32_t bank, data; sc = device_get_softc(dev); if (pin > sc->padconf->npins) return (EINVAL); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; A10_GPIO_LOCK(sc); data = A10_GPIO_READ(sc, A10_GPIO_GP_DAT(bank)); if (value) data |= (1 << pin); else data &= ~(1 << pin); A10_GPIO_WRITE(sc, A10_GPIO_GP_DAT(bank), data); A10_GPIO_UNLOCK(sc); return (0); } static int a10_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct a10_gpio_softc *sc; uint32_t bank, reg_data; sc = device_get_softc(dev); if (pin > sc->padconf->npins) return (EINVAL); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; A10_GPIO_LOCK(sc); reg_data = A10_GPIO_READ(sc, A10_GPIO_GP_DAT(bank)); A10_GPIO_UNLOCK(sc); *val = (reg_data & (1 << pin)) ? 1 : 0; return (0); } static int a10_gpio_pin_toggle(device_t dev, uint32_t pin) { struct a10_gpio_softc *sc; uint32_t bank, data; sc = device_get_softc(dev); if (pin > sc->padconf->npins) return (EINVAL); bank = sc->padconf->pins[pin].port; pin = sc->padconf->pins[pin].pin; A10_GPIO_LOCK(sc); data = A10_GPIO_READ(sc, A10_GPIO_GP_DAT(bank)); if (data & (1 << pin)) data &= ~(1 << pin); else data |= (1 << pin); A10_GPIO_WRITE(sc, A10_GPIO_GP_DAT(bank), data); A10_GPIO_UNLOCK(sc); return (0); } static int aw_find_pinnum_by_name(struct a10_gpio_softc *sc, const char *pinname) { int i; for (i = 0; i < sc->padconf->npins; i++) if (!strcmp(pinname, sc->padconf->pins[i].name)) return i; return (-1); } static int aw_find_pin_func(struct a10_gpio_softc *sc, int pin, const char *func) { int i; for (i = 0; i < AW_MAX_FUNC_BY_PIN; i++) if (sc->padconf->pins[pin].functions[i] && !strcmp(func, sc->padconf->pins[pin].functions[i])) return (i); return (-1); } static int aw_fdt_configure_pins(device_t dev, phandle_t cfgxref) { struct a10_gpio_softc *sc; phandle_t node; const char **pinlist = NULL; char *pin_function = NULL; uint32_t pin_drive, pin_pull; int pins_nb, pin_num, pin_func, i, ret; sc = device_get_softc(dev); node = OF_node_from_xref(cfgxref); ret = 0; /* Getting all prop for configuring pins */ pins_nb = ofw_bus_string_list_to_array(node, "allwinner,pins", &pinlist); if (pins_nb <= 0) return (ENOENT); if (OF_getprop_alloc(node, "allwinner,function", sizeof(*pin_function), (void **)&pin_function) == -1) { ret = ENOENT; goto out; } if (OF_getencprop(node, "allwinner,drive", &pin_drive, sizeof(pin_drive)) == -1) { ret = ENOENT; goto out; } if (OF_getencprop(node, "allwinner,pull", &pin_pull, sizeof(pin_pull)) == -1) { ret = ENOENT; goto out; } /* Configure each pin to the correct function, drive and pull */ for (i = 0; i < pins_nb; i++) { pin_num = aw_find_pinnum_by_name(sc, pinlist[i]); if (pin_num == -1) { ret = ENOENT; goto out; } pin_func = aw_find_pin_func(sc, pin_num, pin_function); if (pin_func == -1) { ret = ENOENT; goto out; } A10_GPIO_LOCK(sc); if (a10_gpio_get_function(sc, pin_num) != pin_func) a10_gpio_set_function(sc, pin_num, pin_func); if (a10_gpio_get_drv(sc, pin_num) != pin_drive) a10_gpio_set_drv(sc, pin_num, pin_drive); if (a10_gpio_get_pud(sc, pin_num) != pin_pull && (pin_pull == A10_GPIO_PULLUP || pin_pull == A10_GPIO_PULLDOWN)) a10_gpio_set_pud(sc, pin_num, pin_pull); A10_GPIO_UNLOCK(sc); } out: OF_prop_free(pinlist); OF_prop_free(pin_function); return (ret); } static int a10_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner GPIO/Pinmux controller"); return (BUS_PROBE_DEFAULT); } static int a10_gpio_attach(device_t dev) { int rid, error; phandle_t gpio; struct a10_gpio_softc *sc; clk_t clk; hwreset_t rst; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "a10 gpio", "gpio", MTX_SPIN); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Use the right pin data for the current SoC */ sc->padconf = (struct allwinner_padconf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) == 0) { error = hwreset_deassert(rst); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); return (error); } } if (clk_get_by_ofw_index(dev, 0, 0, &clk) == 0) { error = clk_enable(clk); if (error != 0) { device_printf(dev, "could not enable clock\n"); return (error); } } sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; /* * Register as a pinctrl device */ fdt_pinctrl_register(dev, "allwinner,pins"); fdt_pinctrl_configure_tree(dev); return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); } static int a10_gpio_detach(device_t dev) { return (EBUSY); } static phandle_t a10_gpio_get_node(device_t dev, device_t bus) { /* We only have one child, the GPIO bus, which needs our own node. */ return (ofw_bus_get_node(dev)); } static int a10_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { struct a10_gpio_softc *sc; int i; sc = device_get_softc(bus); /* The GPIO pins are mapped as: . */ for (i = 0; i < sc->padconf->npins; i++) if (sc->padconf->pins[i].port == gpios[0] && sc->padconf->pins[i].pin == gpios[1]) { *pin = i; break; } *flags = gpios[gcells - 1]; return (0); } static device_method_t a10_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10_gpio_probe), DEVMETHOD(device_attach, a10_gpio_attach), DEVMETHOD(device_detach, a10_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, a10_gpio_get_bus), DEVMETHOD(gpio_pin_max, a10_gpio_pin_max), DEVMETHOD(gpio_pin_getname, a10_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, a10_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, a10_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, a10_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, a10_gpio_pin_get), DEVMETHOD(gpio_pin_set, a10_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, a10_gpio_pin_toggle), DEVMETHOD(gpio_map_gpios, a10_gpio_map_gpios), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, a10_gpio_get_node), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure,aw_fdt_configure_pins), DEVMETHOD_END }; static devclass_t a10_gpio_devclass; static driver_t a10_gpio_driver = { "gpio", a10_gpio_methods, sizeof(struct a10_gpio_softc), }; EARLY_DRIVER_MODULE(a10_gpio, simplebus, a10_gpio_driver, a10_gpio_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); Index: head/sys/arm/allwinner/aw_wdog.c =================================================================== --- head/sys/arm/allwinner/aw_wdog.c (revision 306901) +++ head/sys/arm/allwinner/aw_wdog.c (revision 306902) @@ -1,277 +1,276 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * Copyright (c) 2016 Emmanuel Vadot * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #define READ(_sc, _r) bus_read_4((_sc)->res, (_r)) #define WRITE(_sc, _r, _v) bus_write_4((_sc)->res, (_r), (_v)) #define A10_WDOG_CTRL 0x00 #define A31_WDOG_CTRL 0x10 #define WDOG_CTRL_RESTART (1 << 0) #define A31_WDOG_CTRL_KEY (0xa57 << 1) #define A10_WDOG_MODE 0x04 #define A31_WDOG_MODE 0x18 #define A10_WDOG_MODE_INTVL_SHIFT 3 #define A31_WDOG_MODE_INTVL_SHIFT 4 #define A10_WDOG_MODE_RST_EN (1 << 1) #define WDOG_MODE_EN (1 << 0) #define A31_WDOG_CONFIG 0x14 #define A31_WDOG_CONFIG_RST_EN_SYSTEM (1 << 0) #define A31_WDOG_CONFIG_RST_EN_INT (2 << 0) struct aw_wdog_interval { uint64_t milliseconds; unsigned int value; }; struct aw_wdog_interval wd_intervals[] = { { 500, 0 }, { 1000, 1 }, { 2000, 2 }, { 3000, 3 }, { 4000, 4 }, { 5000, 5 }, { 6000, 6 }, { 8000, 7 }, { 10000, 8 }, { 12000, 9 }, { 14000, 10 }, { 16000, 11 }, { 0, 0 } /* sentinel */ }; static struct aw_wdog_softc *aw_wdog_sc = NULL; struct aw_wdog_softc { device_t dev; struct resource * res; struct mtx mtx; uint8_t wdog_ctrl; uint32_t wdog_ctrl_key; uint8_t wdog_mode; uint8_t wdog_mode_intvl_shift; uint8_t wdog_mode_en; uint8_t wdog_config; uint8_t wdog_config_value; }; #define A10_WATCHDOG 1 #define A31_WATCHDOG 2 static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-wdt", A10_WATCHDOG}, {"allwinner,sun6i-a31-wdt", A31_WATCHDOG}, {NULL, 0} }; static void aw_wdog_watchdog_fn(void *, u_int, int *); static void aw_wdog_shutdown_fn(void *, int); static int aw_wdog_probe(device_t dev) { struct aw_wdog_softc *sc; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case A10_WATCHDOG: device_set_desc(dev, "Allwinner A10 Watchdog"); return (BUS_PROBE_DEFAULT); case A31_WATCHDOG: device_set_desc(dev, "Allwinner A31 Watchdog"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int aw_wdog_attach(device_t dev) { struct aw_wdog_softc *sc; int rid; if (aw_wdog_sc != NULL) return (ENXIO); sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } aw_wdog_sc = sc; switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case A10_WATCHDOG: sc->wdog_ctrl = A10_WDOG_CTRL; sc->wdog_mode = A10_WDOG_MODE; sc->wdog_mode_intvl_shift = A10_WDOG_MODE_INTVL_SHIFT; sc->wdog_mode_en = A10_WDOG_MODE_RST_EN | WDOG_MODE_EN; break; case A31_WATCHDOG: sc->wdog_ctrl = A31_WDOG_CTRL; sc->wdog_ctrl_key = A31_WDOG_CTRL_KEY; sc->wdog_mode = A31_WDOG_MODE; sc->wdog_mode_intvl_shift = A31_WDOG_MODE_INTVL_SHIFT; sc->wdog_mode_en = WDOG_MODE_EN; sc->wdog_config = A31_WDOG_CONFIG; sc->wdog_config_value = A31_WDOG_CONFIG_RST_EN_SYSTEM; break; default: bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res); return (ENXIO); } mtx_init(&sc->mtx, "AW Watchdog", "aw_wdog", MTX_DEF); EVENTHANDLER_REGISTER(watchdog_list, aw_wdog_watchdog_fn, sc, 0); EVENTHANDLER_REGISTER(shutdown_final, aw_wdog_shutdown_fn, sc, SHUTDOWN_PRI_LAST - 1); return (0); } static void aw_wdog_watchdog_fn(void *private, u_int cmd, int *error) { struct aw_wdog_softc *sc; uint64_t ms; int i; sc = private; mtx_lock(&sc->mtx); cmd &= WD_INTERVAL; if (cmd > 0) { ms = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000; i = 0; while (wd_intervals[i].milliseconds && (ms > wd_intervals[i].milliseconds)) i++; if (wd_intervals[i].milliseconds) { WRITE(sc, sc->wdog_mode, (wd_intervals[i].value << sc->wdog_mode_intvl_shift) | sc->wdog_mode_en); WRITE(sc, sc->wdog_ctrl, WDOG_CTRL_RESTART | sc->wdog_ctrl_key); if (sc->wdog_config) WRITE(sc, sc->wdog_config, sc->wdog_config_value); *error = 0; } else { /* * Can't arm * disable watchdog as watchdog(9) requires */ device_printf(sc->dev, "Can't arm, timeout is more than 16 sec\n"); mtx_unlock(&sc->mtx); WRITE(sc, sc->wdog_mode, 0); return; } } else WRITE(sc, sc->wdog_mode, 0); mtx_unlock(&sc->mtx); } static void aw_wdog_shutdown_fn(void *private, int howto) { if ((howto & (RB_POWEROFF|RB_HALT)) == 0) aw_wdog_watchdog_reset(); } void aw_wdog_watchdog_reset() { if (aw_wdog_sc == NULL) { printf("Reset: watchdog device has not been initialized\n"); return; } WRITE(aw_wdog_sc, aw_wdog_sc->wdog_mode, (wd_intervals[0].value << aw_wdog_sc->wdog_mode_intvl_shift) | aw_wdog_sc->wdog_mode_en); if (aw_wdog_sc->wdog_config) WRITE(aw_wdog_sc, aw_wdog_sc->wdog_config, aw_wdog_sc->wdog_config_value); WRITE(aw_wdog_sc, aw_wdog_sc->wdog_ctrl, WDOG_CTRL_RESTART | aw_wdog_sc->wdog_ctrl_key); while(1) ; } static device_method_t aw_wdog_methods[] = { DEVMETHOD(device_probe, aw_wdog_probe), DEVMETHOD(device_attach, aw_wdog_attach), DEVMETHOD_END }; static driver_t aw_wdog_driver = { "aw_wdog", aw_wdog_methods, sizeof(struct aw_wdog_softc), }; static devclass_t aw_wdog_devclass; DRIVER_MODULE(aw_wdog, simplebus, aw_wdog_driver, aw_wdog_devclass, 0, 0); Index: head/sys/arm/amlogic/aml8726/aml8726_machdep.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_machdep.c (revision 306901) +++ head/sys/arm/amlogic/aml8726/aml8726_machdep.c (revision 306902) @@ -1,197 +1,196 @@ /*- * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_global.h" #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #if defined(SOCDEV_PA) && defined(SOCDEV_VA) vm_offset_t aml8726_aobus_kva_base = SOCDEV_VA; #else vm_offset_t aml8726_aobus_kva_base; #endif static void aml8726_fixup_busfreq() { phandle_t node; pcell_t freq, prop; ssize_t len; /* * Set the bus-frequency for the SoC simple-bus if it * needs updating (meaning the current frequency is zero). */ if ((freq = aml8726_clkmsr_bus_frequency()) == 0 || (node = OF_finddevice("/soc")) == 0 || fdt_is_compatible_strict(node, "simple-bus") == 0) while (1); freq = cpu_to_fdt32(freq); len = OF_getencprop(node, "bus-frequency", &prop, sizeof(prop)); if ((len / sizeof(prop)) == 1 && prop == 0) OF_setprop(node, "bus-frequency", (void *)&freq, sizeof(freq)); } vm_offset_t platform_lastaddr(void) { return (devmap_lastaddr()); } void platform_probe_and_attach(void) { } void platform_gpio_init(void) { /* * The UART console driver used for debugging early boot code * needs to know the virtual base address of the aobus. It's * expected to equal SOCDEV_VA prior to initarm calling setttb * ... afterwards it needs to be updated due to the new page * tables. * * This means there's a deadzone in initarm between setttb * and platform_gpio_init during which printf can't be used. */ aml8726_aobus_kva_base = (vm_offset_t)devmap_ptov(0xc8100000, 0x100000); /* * The hardware mux used by clkmsr is unique to the SoC (though * currently clk81 is at a fixed location, however that might * change in the future). */ aml8726_identify_soc(); /* * My aml8726-m3 development box which identifies the CPU as * a Cortex A9-r2 rev 4 randomly locks up during boot when WFI * is used. */ switch (aml8726_soc_hw_rev) { case AML_SOC_HW_REV_M3: cpufuncs.cf_sleep = (void *)cpufunc_nullop; break; default: break; } /* * This FDT fixup should arguably be called through fdt_fixup_table, * however currently there's no mechanism to specify a fixup which * should always be invoked. * * It needs to be called prior to the console being initialized which * is why it's called here, rather than from platform_late_init. */ aml8726_fixup_busfreq(); } void platform_late_init(void) { } /* * Construct static devmap entries to map out the core * peripherals using 1mb section mappings. */ int platform_devmap_init(void) { devmap_add_entry(0xc1100000, 0x200000); /* cbus */ devmap_add_entry(0xc4200000, 0x100000); /* pl310 */ devmap_add_entry(0xc4300000, 0x100000); /* periph */ devmap_add_entry(0xc8000000, 0x100000); /* apbbus */ devmap_add_entry(0xc8100000, 0x100000); /* aobus */ devmap_add_entry(0xc9000000, 0x800000); /* ahbbus */ devmap_add_entry(0xd9000000, 0x100000); /* ahb */ devmap_add_entry(0xda000000, 0x100000); /* secbus */ return (0); } #ifndef INTRNG #ifndef DEV_GIC static int fdt_pic_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, int *pol) { /* * The single core chips have just an Amlogic PIC. */ if (!fdt_is_compatible_strict(node, "amlogic,aml8726-pic")) return (ENXIO); *interrupt = fdt32_to_cpu(intr[1]); *trig = INTR_TRIGGER_EDGE; *pol = INTR_POLARITY_HIGH; return (0); } #endif fdt_pic_decode_t fdt_pic_table[] = { #ifdef DEV_GIC &gic_decode_fdt, #else &fdt_pic_decode_ic, #endif NULL }; #endif /* INTRNG */ Index: head/sys/arm/amlogic/aml8726/aml8726_wdt.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_wdt.c (revision 306901) +++ head/sys/arm/amlogic/aml8726/aml8726_wdt.c (revision 306902) @@ -1,308 +1,307 @@ /*- * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Amlogic aml8726 watchdog driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include -#include #include #include #include #include struct aml8726_wdt_softc { device_t dev; struct resource * res[2]; struct mtx mtx; void * ih_cookie; }; static struct resource_spec aml8726_wdt_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static struct { uint32_t ctrl_cpu_mask; uint32_t ctrl_en; uint32_t term_cnt_mask; uint32_t reset_cnt_mask; } aml8726_wdt_soc_params; /* * devclass_get_device / device_get_softc could be used * to dynamically locate this, however the wdt is a * required device which can't be unloaded so there's * no need for the overhead. */ static struct aml8726_wdt_softc *aml8726_wdt_sc = NULL; #define AML_WDT_LOCK(sc) mtx_lock_spin(&(sc)->mtx) #define AML_WDT_UNLOCK(sc) mtx_unlock_spin(&(sc)->mtx) #define AML_WDT_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), \ "wdt", MTX_SPIN) #define AML_WDT_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx); #define AML_WDT_CTRL_REG 0 #define AML_WDT_CTRL_CPU_WDRESET_MASK aml8726_wdt_soc_params.ctrl_cpu_mask #define AML_WDT_CTRL_CPU_WDRESET_SHIFT 24 #define AML_WDT_CTRL_IRQ_EN (1 << 23) #define AML_WDT_CTRL_EN aml8726_wdt_soc_params.ctrl_en #define AML_WDT_CTRL_TERMINAL_CNT_MASK aml8726_wdt_soc_params.term_cnt_mask #define AML_WDT_CTRL_TERMINAL_CNT_SHIFT 0 #define AML_WDT_RESET_REG 4 #define AML_WDT_RESET_CNT_MASK aml8726_wdt_soc_params.reset_cnt_mask #define AML_WDT_RESET_CNT_SHIFT 0 #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) #define CSR_BARRIER(sc, reg) bus_barrier((sc)->res[0], reg, 4, \ (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) static void aml8726_wdt_watchdog(void *private, u_int cmd, int *error) { struct aml8726_wdt_softc *sc = (struct aml8726_wdt_softc *)private; uint32_t wcr; uint64_t tens_of_usec; AML_WDT_LOCK(sc); tens_of_usec = (((uint64_t)1 << (cmd & WD_INTERVAL)) + 9999) / 10000; if (cmd != 0 && tens_of_usec <= (AML_WDT_CTRL_TERMINAL_CNT_MASK >> AML_WDT_CTRL_TERMINAL_CNT_SHIFT)) { wcr = AML_WDT_CTRL_CPU_WDRESET_MASK | AML_WDT_CTRL_EN | ((uint32_t)tens_of_usec << AML_WDT_CTRL_TERMINAL_CNT_SHIFT); CSR_WRITE_4(sc, AML_WDT_RESET_REG, 0); CSR_WRITE_4(sc, AML_WDT_CTRL_REG, wcr); *error = 0; } else CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); AML_WDT_UNLOCK(sc); } static int aml8726_wdt_intr(void *arg) { struct aml8726_wdt_softc *sc = (struct aml8726_wdt_softc *)arg; /* * Normally a timeout causes a hardware reset, however * the watchdog timer can be configured to cause an * interrupt instead by setting AML_WDT_CTRL_IRQ_EN * and clearing AML_WDT_CTRL_CPU_WDRESET_MASK. */ AML_WDT_LOCK(sc); CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); CSR_BARRIER(sc, AML_WDT_CTRL_REG); AML_WDT_UNLOCK(sc); device_printf(sc->dev, "timeout expired\n"); return (FILTER_HANDLED); } static int aml8726_wdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "amlogic,meson6-wdt")) return (ENXIO); device_set_desc(dev, "Amlogic aml8726 WDT"); return (BUS_PROBE_DEFAULT); } static int aml8726_wdt_attach(device_t dev) { struct aml8726_wdt_softc *sc = device_get_softc(dev); /* There should be exactly one instance. */ if (aml8726_wdt_sc != NULL) return (ENXIO); sc->dev = dev; if (bus_alloc_resources(dev, aml8726_wdt_spec, sc->res)) { device_printf(dev, "can not allocate resources for device\n"); return (ENXIO); } /* * Certain bitfields are dependent on the hardware revision. */ switch (aml8726_soc_hw_rev) { case AML_SOC_HW_REV_M8: aml8726_wdt_soc_params.ctrl_cpu_mask = 0xf << AML_WDT_CTRL_CPU_WDRESET_SHIFT; switch (aml8726_soc_metal_rev) { case AML_SOC_M8_METAL_REV_M2_A: aml8726_wdt_soc_params.ctrl_en = 1 << 19; aml8726_wdt_soc_params.term_cnt_mask = 0x07ffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x07ffff << AML_WDT_RESET_CNT_SHIFT; break; default: aml8726_wdt_soc_params.ctrl_en = 1 << 22; aml8726_wdt_soc_params.term_cnt_mask = 0x3fffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x3fffff << AML_WDT_RESET_CNT_SHIFT; break; } break; case AML_SOC_HW_REV_M8B: aml8726_wdt_soc_params.ctrl_cpu_mask = 0xf << AML_WDT_CTRL_CPU_WDRESET_SHIFT; aml8726_wdt_soc_params.ctrl_en = 1 << 19; aml8726_wdt_soc_params.term_cnt_mask = 0x07ffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x07ffff << AML_WDT_RESET_CNT_SHIFT; break; default: aml8726_wdt_soc_params.ctrl_cpu_mask = 3 << AML_WDT_CTRL_CPU_WDRESET_SHIFT; aml8726_wdt_soc_params.ctrl_en = 1 << 22; aml8726_wdt_soc_params.term_cnt_mask = 0x3fffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x3fffff << AML_WDT_RESET_CNT_SHIFT; break; } /* * Disable the watchdog. */ CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); /* * Initialize the mutex prior to installing the interrupt handler * in case of a spurious interrupt. */ AML_WDT_LOCK_INIT(sc); if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, aml8726_wdt_intr, NULL, sc, &sc->ih_cookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, aml8726_wdt_spec, sc->res); AML_WDT_LOCK_DESTROY(sc); return (ENXIO); } aml8726_wdt_sc = sc; EVENTHANDLER_REGISTER(watchdog_list, aml8726_wdt_watchdog, sc, 0); return (0); } static int aml8726_wdt_detach(device_t dev) { return (EBUSY); } static device_method_t aml8726_wdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_wdt_probe), DEVMETHOD(device_attach, aml8726_wdt_attach), DEVMETHOD(device_detach, aml8726_wdt_detach), DEVMETHOD_END }; static driver_t aml8726_wdt_driver = { "wdt", aml8726_wdt_methods, sizeof(struct aml8726_wdt_softc), }; static devclass_t aml8726_wdt_devclass; EARLY_DRIVER_MODULE(wdt, simplebus, aml8726_wdt_driver, aml8726_wdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); void cpu_reset() { /* Watchdog has not yet been initialized */ if (aml8726_wdt_sc == NULL) printf("Reset hardware has not yet been initialized.\n"); else { CSR_WRITE_4(aml8726_wdt_sc, AML_WDT_RESET_REG, 0); CSR_WRITE_4(aml8726_wdt_sc, AML_WDT_CTRL_REG, (AML_WDT_CTRL_CPU_WDRESET_MASK | AML_WDT_CTRL_EN | (10 << AML_WDT_CTRL_TERMINAL_CNT_SHIFT))); } while (1); } Index: head/sys/arm/at91/at91_aic.c =================================================================== --- head/sys/arm/at91/at91_aic.c (revision 306901) +++ head/sys/arm/at91/at91_aic.c (revision 306902) @@ -1,185 +1,183 @@ /*- * Copyright (c) 2014 Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #ifdef FDT #include #include #include #endif static struct aic_softc { struct resource *mem_res; /* Memory resource */ void *intrhand; /* Interrupt handle */ device_t sc_dev; } *sc; static inline uint32_t RD4(struct aic_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct aic_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } void arm_mask_irq(uintptr_t nb) { WR4(sc, IC_IDCR, 1 << nb); } int arm_get_next_irq(int last __unused) { int status; int irq; irq = RD4(sc, IC_IVR); status = RD4(sc, IC_ISR); if (status == 0) { WR4(sc, IC_EOICR, 1); return (-1); } return (irq); } void arm_unmask_irq(uintptr_t nb) { WR4(sc, IC_IECR, 1 << nb); WR4(sc, IC_EOICR, 0); } static int at91_aic_probe(device_t dev) { #ifdef FDT if (!ofw_bus_is_compatible(dev, "atmel,at91rm9200-aic")) return (ENXIO); #endif device_set_desc(dev, "AIC"); return (0); } static int at91_aic_attach(device_t dev) { int i, rid, err = 0; device_printf(dev, "Attach %d\n", bus_current_pass); sc = device_get_softc(dev); sc->sc_dev = dev; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) panic("couldn't allocate register resources"); /* * Setup the interrupt table. */ if (soc_info.soc_data == NULL || soc_info.soc_data->soc_irq_prio == NULL) panic("Interrupt priority table missing\n"); for (i = 0; i < 32; i++) { WR4(sc, IC_SVR + i * 4, i); /* Priority. */ WR4(sc, IC_SMR + i * 4, soc_info.soc_data->soc_irq_prio[i]); if (i < 8) WR4(sc, IC_EOICR, 1); } WR4(sc, IC_SPU, 32); /* No debug. */ WR4(sc, IC_DCR, 0); /* Disable and clear all interrupts. */ WR4(sc, IC_IDCR, 0xffffffff); WR4(sc, IC_ICCR, 0xffffffff); enable_interrupts(PSR_I | PSR_F); return (err); } static void at91_aic_new_pass(device_t dev) { device_printf(dev, "Pass %d\n", bus_current_pass); } static device_method_t at91_aic_methods[] = { DEVMETHOD(device_probe, at91_aic_probe), DEVMETHOD(device_attach, at91_aic_attach), DEVMETHOD(bus_new_pass, at91_aic_new_pass), DEVMETHOD_END }; static driver_t at91_aic_driver = { "at91_aic", at91_aic_methods, sizeof(struct aic_softc), }; static devclass_t at91_aic_devclass; #ifdef FDT EARLY_DRIVER_MODULE(at91_aic, simplebus, at91_aic_driver, at91_aic_devclass, NULL, NULL, BUS_PASS_INTERRUPT); #else EARLY_DRIVER_MODULE(at91_aic, atmelarm, at91_aic_driver, at91_aic_devclass, NULL, NULL, BUS_PASS_INTERRUPT); #endif Index: head/sys/arm/at91/at91_cfata.c =================================================================== --- head/sys/arm/at91/at91_cfata.c (revision 306901) +++ head/sys/arm/at91/at91_cfata.c (revision 306902) @@ -1,281 +1,279 @@ /*- * Copyright (c) 2008 Deglitch Networks, Stanislav Sedov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Driver for the AT91RM9200 CompactFlash controller operating in a * common memory mode. Interrupts are driven by polling. The driver * implements an ATA bridge and attached ATA channel driver on top * of it. * NOTE WELL: this driver uses polling mode. To achieve an acceptable * operating speed you will probably want to use HZ=2000 in kernel * config. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include struct at91_cfata_softc { device_t dev; struct resource *mem_res; struct resource irq; void (*isr_cb)(void *); void *isr_arg; struct callout tick; }; static int at91_cfata_detach(device_t dev); static void at91_cfata_callout(void *arg); static int at91_cfata_probe(device_t dev) { device_set_desc_copy(dev, "AT91RM9200 CompactFlash controller"); return (0); } static int at91_cfata_attach(device_t dev) { struct at91_cfata_softc *sc; int rid, error; sc = device_get_softc(dev); sc->dev = dev; rid = 0; error = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) return (ENOMEM); /* XXX: init CF controller? */ callout_init(&sc->tick, 1); /* Callout to poll the device. */ device_add_child(dev, "ata", -1); bus_generic_attach(dev); return (0); } static int at91_cfata_detach(device_t dev) { struct at91_cfata_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; } return (0); } static struct resource * ata_at91_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct at91_cfata_softc *sc = device_get_softc(dev); KASSERT(type == SYS_RES_IRQ && *rid == ATA_IRQ_RID, ("[at91_cfata, %d]: illegal resource request (type %u rid %u)", __LINE__, type, *rid)); return (&sc->irq); } static int ata_at91_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { KASSERT(type == SYS_RES_IRQ && rid == ATA_IRQ_RID, ("[at91_cfata, %d]: illegal resource request (type %u rid %u)", __LINE__, type, rid)); return (0); } static int ata_at91_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *function, void *argument, void **cookiep) { struct at91_cfata_softc *sc = device_get_softc(dev); KASSERT(sc->isr_cb == NULL, ("[at91_cfata, %d]: overwriting the old handler", __LINE__)); sc->isr_cb = function; sc->isr_arg = argument; *cookiep = sc; callout_reset(&sc->tick, 1, at91_cfata_callout, sc); return (0); } static int ata_at91_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct at91_cfata_softc *sc = device_get_softc(dev); sc->isr_cb = NULL; sc->isr_arg = NULL; return (0); } static void at91_cfata_callout(void *arg) { struct at91_cfata_softc *sc; sc = (struct at91_cfata_softc *)arg; if (sc->isr_cb != NULL) sc->isr_cb(sc->isr_arg); callout_reset(&sc->tick, 1, at91_cfata_callout, sc); } static int at91_channel_probe(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ch->unit = 0; ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE; device_set_desc_copy(dev, "ATA channel 0"); return (ata_probe(dev)); } static int at91_channel_attach(device_t dev) { struct at91_cfata_softc *sc = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; for (i = 0; i < ATA_MAX_RES; i++) ch->r_io[i].res = sc->mem_res; /* * CF+ Specification. * 6.1.3 Memory Mapped Addressing. */ ch->r_io[ATA_DATA].offset = 0x00; ch->r_io[ATA_FEATURE].offset = 0x01; ch->r_io[ATA_COUNT].offset = 0x02; ch->r_io[ATA_SECTOR].offset = 0x03; ch->r_io[ATA_CYL_LSB].offset = 0x04; ch->r_io[ATA_CYL_MSB].offset = 0x05; ch->r_io[ATA_DRIVE].offset = 0x06; ch->r_io[ATA_COMMAND].offset = 0x07; ch->r_io[ATA_ERROR].offset = 0x01; ch->r_io[ATA_IREASON].offset = 0x02; ch->r_io[ATA_STATUS].offset = 0x07; ch->r_io[ATA_ALTSTAT].offset = 0x0e; ch->r_io[ATA_CONTROL].offset = 0x0e; /* Should point at the base of registers. */ ch->r_io[ATA_IDX_ADDR].offset = 0x0; ata_generic_hw(dev); return (ata_attach(dev)); } static device_method_t at91_cfata_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, at91_cfata_probe), DEVMETHOD(device_attach, at91_cfata_attach), DEVMETHOD(device_detach, at91_cfata_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* ATA bus methods. */ DEVMETHOD(bus_alloc_resource, ata_at91_alloc_resource), DEVMETHOD(bus_release_resource, ata_at91_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_at91_setup_intr), DEVMETHOD(bus_teardown_intr, ata_at91_teardown_intr), { 0, 0 } }; devclass_t at91_cfata_devclass; static driver_t at91_cfata_driver = { "at91_cfata", at91_cfata_methods, sizeof(struct at91_cfata_softc), }; DRIVER_MODULE(at91_cfata, atmelarm, at91_cfata_driver, at91_cfata_devclass, 0, 0); MODULE_VERSION(at91_cfata, 1); MODULE_DEPEND(at91_cfata, ata, 1, 1, 1); /* * ATA channel driver. */ static device_method_t at91_channel_methods[] = { /* device interface */ DEVMETHOD(device_probe, at91_channel_probe), DEVMETHOD(device_attach, at91_channel_attach), DEVMETHOD(device_detach, ata_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, ata_suspend), DEVMETHOD(device_resume, ata_resume), { 0, 0 } }; driver_t at91_channel_driver = { "ata", at91_channel_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, at91_cfata, at91_channel_driver, ata_devclass, 0, 0); Index: head/sys/arm/at91/at91_mci.c =================================================================== --- head/sys/arm/at91/at91_mci.c (revision 306901) +++ head/sys/arm/at91/at91_mci.c (revision 306902) @@ -1,1415 +1,1413 @@ /*- * Copyright (c) 2006 Bernd Walter. All rights reserved. * Copyright (c) 2006 M. Warner Losh. All rights reserved. * Copyright (c) 2010 Greg Ansley. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include "mmcbr_if.h" #include "opt_at91.h" /* * About running the MCI bus above 25MHz * * Historically, the MCI bus has been run at 30MHz on systems with a 60MHz * master clock, in part due to a bug in dev/mmc.c making always request * 30MHz, and in part over clocking the bus because 15MHz was too slow. * Fixing that bug causes the mmc driver to request a 25MHz clock (as it * should) and the logic in at91_mci_update_ios() picks the highest speed that * doesn't exceed that limit. With a 60MHz MCK that would be 15MHz, and * that's a real performance buzzkill when you've been getting away with 30MHz * all along. * * By defining AT91_MCI_ALLOW_OVERCLOCK (or setting the allow_overclock=1 * device hint or sysctl) you can enable logic in at91_mci_update_ios() to * overlcock the SD bus a little by running it at MCK / 2 when the requested * speed is 25MHz and the next highest speed is 15MHz or less. This appears * to work on virtually all SD cards, since it is what this driver has been * doing prior to the introduction of this option, where the overclocking vs * underclocking decision was automatically "overclock". Modern SD cards can * run at 45mhz/1-bit in standard mode (high speed mode enable commands not * sent) without problems. * * Speaking of high-speed mode, the rm9200 manual says the MCI device supports * the SD v1.0 specification and can run up to 50MHz. This is interesting in * that the SD v1.0 spec caps the speed at 25MHz; high speed mode was added in * the v1.10 spec. Furthermore, high speed mode doesn't just crank up the * clock, it alters the signal timing. The rm9200 MCI device doesn't support * these altered timings. So while speeds over 25MHz may work, they only work * in what the SD spec calls "default" speed mode, and it amounts to violating * the spec by overclocking the bus. * * If you also enable 4-wire mode it's possible transfers faster than 25MHz * will fail. On the AT91RM9200, due to bugs in the bus contention logic, if * you have the USB host device and OHCI driver enabled will fail. Even * underclocking to 15MHz, intermittant overrun and underrun errors occur. * Note that you don't even need to have usb devices attached to the system, * the errors begin to occur as soon as the OHCI driver sets the register bit * to enable periodic transfers. It appears (based on brief investigation) * that the usb host controller uses so much ASB bandwidth that sometimes the * DMA for MCI transfers doesn't get a bus grant in time and data gets * dropped. Adding even a modicum of network activity changes the symptom * from intermittant to very frequent. Members of the AT91SAM9 family have * corrected this problem, or are at least better about their use of the bus. */ #ifndef AT91_MCI_ALLOW_OVERCLOCK #define AT91_MCI_ALLOW_OVERCLOCK 1 #endif /* * Allocate 2 bounce buffers we'll use to endian-swap the data due to the rm9200 * erratum. We use a pair of buffers because when reading that lets us begin * endian-swapping the data in the first buffer while the DMA is reading into * the second buffer. (We can't use the same trick for writing because we might * not get all the data in the 2nd buffer swapped before the hardware needs it; * dealing with that would add complexity to the driver.) * * The buffers are sized at 16K each due to the way the busdma cache sync * operations work on arm. A dcache_inv_range() operation on a range larger * than 16K gets turned into a dcache_wbinv_all(). That needlessly flushes the * entire data cache, impacting overall system performance. */ #define BBCOUNT 2 #define BBSIZE (16*1024) #define MAX_BLOCKS ((BBSIZE*BBCOUNT)/512) static int mci_debug; struct at91_mci_softc { void *intrhand; /* Interrupt handle */ device_t dev; int sc_cap; #define CAP_HAS_4WIRE 1 /* Has 4 wire bus */ #define CAP_NEEDS_BYTESWAP 2 /* broken hardware needing bounce */ #define CAP_MCI1_REV2XX 4 /* MCI 1 rev 2.x */ int flags; #define PENDING_CMD 0x01 #define PENDING_STOP 0x02 #define CMD_MULTIREAD 0x10 #define CMD_MULTIWRITE 0x20 int has_4wire; int allow_overclock; struct resource *irq_res; /* IRQ resource */ struct resource *mem_res; /* Memory resource */ struct mtx sc_mtx; bus_dma_tag_t dmatag; struct mmc_host host; int bus_busy; struct mmc_request *req; struct mmc_command *curcmd; bus_dmamap_t bbuf_map[BBCOUNT]; char * bbuf_vaddr[BBCOUNT]; /* bounce bufs in KVA space */ uint32_t bbuf_len[BBCOUNT]; /* len currently queued for bounce buf */ uint32_t bbuf_curidx; /* which bbuf is the active DMA buffer */ uint32_t xfer_offset; /* offset so far into caller's buf */ }; /* bus entry points */ static int at91_mci_probe(device_t dev); static int at91_mci_attach(device_t dev); static int at91_mci_detach(device_t dev); static void at91_mci_intr(void *); /* helper routines */ static int at91_mci_activate(device_t dev); static void at91_mci_deactivate(device_t dev); static int at91_mci_is_mci1rev2xx(void); #define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define AT91_MCI_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ "mci", MTX_DEF) #define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); static inline uint32_t RD4(struct at91_mci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } static void at91_bswap_buf(struct at91_mci_softc *sc, void * dptr, void * sptr, uint32_t memsize) { uint32_t * dst = (uint32_t *)dptr; uint32_t * src = (uint32_t *)sptr; uint32_t i; /* * If the hardware doesn't need byte-swapping, let bcopy() do the * work. Use bounce buffer even if we don't need byteswap, since * buffer may straddle a page boundary, and we don't handle * multi-segment transfers in hardware. Seen from 'bsdlabel -w' which * uses raw geom access to the volume. Greg Ansley (gja (at) * ansley.com) */ if (!(sc->sc_cap & CAP_NEEDS_BYTESWAP)) { memcpy(dptr, sptr, memsize); return; } /* * Nice performance boost for slightly unrolling this loop. * (But very little extra boost for further unrolling it.) */ for (i = 0; i < memsize; i += 16) { *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); *dst++ = bswap32(*src++); } /* Mop up the last 1-3 words, if any. */ for (i = 0; i < (memsize & 0x0F); i += 4) { *dst++ = bswap32(*src++); } } static void at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static void at91_mci_pdc_disable(struct at91_mci_softc *sc) { WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS); WR4(sc, PDC_RPR, 0); WR4(sc, PDC_RCR, 0); WR4(sc, PDC_RNPR, 0); WR4(sc, PDC_RNCR, 0); WR4(sc, PDC_TPR, 0); WR4(sc, PDC_TCR, 0); WR4(sc, PDC_TNPR, 0); WR4(sc, PDC_TNCR, 0); } /* * Reset the controller, then restore most of the current state. * * This is called after detecting an error. It's also called after stopping a * multi-block write, to un-wedge the device so that it will handle the NOTBUSY * signal correctly. See comments in at91_mci_stop_done() for more details. */ static void at91_mci_reset(struct at91_mci_softc *sc) { uint32_t mr; uint32_t sdcr; uint32_t dtor; uint32_t imr; at91_mci_pdc_disable(sc); /* save current state */ imr = RD4(sc, MCI_IMR); mr = RD4(sc, MCI_MR) & 0x7fff; sdcr = RD4(sc, MCI_SDCR); dtor = RD4(sc, MCI_DTOR); /* reset the controller */ WR4(sc, MCI_IDR, 0xffffffff); WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* restore state */ WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); WR4(sc, MCI_MR, mr); WR4(sc, MCI_SDCR, sdcr); WR4(sc, MCI_DTOR, dtor); WR4(sc, MCI_IER, imr); /* * Make sure sdio interrupts will fire. Not sure why reading * SR ensures that, but this is in the linux driver. */ RD4(sc, MCI_SR); } static void at91_mci_init(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); uint32_t val; WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1); val = MCI_MR_PDCMODE; val |= 0x34a; /* PWSDIV = 3; CLKDIV = 74 */ // if (sc->sc_cap & CAP_MCI1_REV2XX) // val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF; WR4(sc, MCI_MR, val); #ifndef AT91_MCI_SLOT_B WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */ #else /* * XXX Really should add second "unit" but nobody using using * a two slot card that we know of. XXX */ WR4(sc, MCI_SDCR, 1); /* SLOT B, 1 bit bus */ #endif /* * Enable controller, including power-save. The slower clock * of the power-save mode is only in effect when there is no * transfer in progress, so it can be left in this mode all * the time. */ WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); } static void at91_mci_fini(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ at91_mci_pdc_disable(sc); WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ } static int at91_mci_probe(device_t dev) { #ifdef FDT if (!ofw_bus_is_compatible(dev, "atmel,hsmci")) return (ENXIO); #endif device_set_desc(dev, "MCI mmc/sd host bridge"); return (0); } static int at91_mci_attach(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; device_t child; int err, i; sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); sc->dev = dev; sc->sc_cap = 0; if (at91_is_rm92()) sc->sc_cap |= CAP_NEEDS_BYTESWAP; /* * MCI1 Rev 2 controllers need some workarounds, flag if so. */ if (at91_mci_is_mci1rev2xx()) sc->sc_cap |= CAP_MCI1_REV2XX; err = at91_mci_activate(dev); if (err) goto out; AT91_MCI_LOCK_INIT(sc); at91_mci_fini(dev); at91_mci_init(dev); /* * Allocate DMA tags and maps and bounce buffers. * * The parms in the tag_create call cause the dmamem_alloc call to * create each bounce buffer as a single contiguous buffer of BBSIZE * bytes aligned to a 4096 byte boundary. * * Do not use DMA_COHERENT for these buffers because that maps the * memory as non-cachable, which prevents cache line burst fills/writes, * which is something we need since we're trying to overlap the * byte-swapping with the DMA operations. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag); if (err != 0) goto out; for (i = 0; i < BBCOUNT; ++i) { err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i], BUS_DMA_NOWAIT, &sc->bbuf_map[i]); if (err != 0) goto out; } /* * Activate the interrupt */ err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, at91_mci_intr, sc, &sc->intrhand); if (err) { AT91_MCI_LOCK_DESTROY(sc); goto out; } /* * Allow 4-wire to be initially set via #define. * Allow a device hint to override that. * Allow a sysctl to override that. */ #if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0 sc->has_4wire = 1; #endif resource_int_value(device_get_name(dev), device_get_unit(dev), "4wire", &sc->has_4wire); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire", CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus"); if (sc->has_4wire) sc->sc_cap |= CAP_HAS_4WIRE; sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK; resource_int_value(device_get_name(dev), device_get_unit(dev), "allow_overclock", &sc->allow_overclock); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock", CTLFLAG_RW, &sc->allow_overclock, 0, "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less."); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug", CTLFLAG_RWTUN, &mci_debug, 0, "enable debug output"); /* * Our real min freq is master_clock/512, but upper driver layers are * going to set the min speed during card discovery, and the right speed * for that is 400kHz, so advertise a safe value just under that. * * For max speed, while the rm9200 manual says the max is 50mhz, it also * says it supports only the SD v1.0 spec, which means the real limit is * 25mhz. On the other hand, historical use has been to slightly violate * the standard by running the bus at 30MHz. For more information on * that, see the comments at the top of this file. */ sc->host.f_min = 375000; sc->host.f_max = at91_master_clock / 2; if (sc->host.f_max > 25000000) sc->host.f_max = 25000000; sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->host.caps = 0; if (sc->sc_cap & CAP_HAS_4WIRE) sc->host.caps |= MMC_CAP_4_BIT_DATA; child = device_add_child(dev, "mmc", 0); device_set_ivars(dev, &sc->host); err = bus_generic_attach(dev); out: if (err) at91_mci_deactivate(dev); return (err); } static int at91_mci_detach(device_t dev) { struct at91_mci_softc *sc = device_get_softc(dev); at91_mci_fini(dev); at91_mci_deactivate(dev); bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]); bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]); bus_dma_tag_destroy(sc->dmatag); return (EBUSY); /* XXX */ } static int at91_mci_activate(device_t dev) { struct at91_mci_softc *sc; int rid; sc = device_get_softc(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) goto errout; rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) goto errout; return (0); errout: at91_mci_deactivate(dev); return (ENOMEM); } static void at91_mci_deactivate(device_t dev) { struct at91_mci_softc *sc; sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = NULL; bus_generic_detach(sc->dev); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sc->irq_res = NULL; return; } static int at91_mci_is_mci1rev2xx(void) { switch (soc_info.type) { case AT91_T_SAM9260: case AT91_T_SAM9263: case AT91_T_CAP9: case AT91_T_SAM9G10: case AT91_T_SAM9G20: case AT91_T_SAM9RL: return(1); default: return (0); } } static int at91_mci_update_ios(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc; struct mmc_ios *ios; uint32_t clkdiv; uint32_t freq; sc = device_get_softc(brdev); ios = &sc->host.ios; /* * Calculate our closest available clock speed that doesn't exceed the * requested speed. * * When overclocking is allowed, the requested clock is 25MHz, the * computed frequency is 15MHz or smaller and clockdiv is 1, use * clockdiv of 0 to double that. If less than 12.5MHz, double * regardless of the overclocking setting. * * Whatever we come up with, store it back into ios->clock so that the * upper layer drivers can report the actual speed of the bus. */ if (ios->clock == 0) { WR4(sc, MCI_CR, MCI_CR_MCIDIS); clkdiv = 0; } else { WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); if ((at91_master_clock % (ios->clock * 2)) == 0) clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; else clkdiv = (at91_master_clock / ios->clock) / 2; freq = at91_master_clock / ((clkdiv+1) * 2); if (clkdiv == 1 && ios->clock == 25000000 && freq <= 15000000) { if (sc->allow_overclock || freq <= 12500000) { clkdiv = 0; freq = at91_master_clock / ((clkdiv+1) * 2); } } ios->clock = freq; } if (ios->bus_width == bus_width_4) WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS); else WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS); WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv); /* Do we need a settle time here? */ /* XXX We need to turn the device on/off here with a GPIO pin */ return (0); } static void at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd) { uint32_t cmdr, mr; struct mmc_data *data; sc->curcmd = cmd; data = cmd->data; /* XXX Upper layers don't always set this */ cmd->mrq = sc->req; /* Begin setting up command register. */ cmdr = cmd->opcode; if (sc->host.ios.bus_mode == opendrain) cmdr |= MCI_CMDR_OPDCMD; /* Set up response handling. Allow max timeout for responses. */ if (MMC_RSP(cmd->flags) == MMC_RSP_NONE) cmdr |= MCI_CMDR_RSPTYP_NO; else { cmdr |= MCI_CMDR_MAXLAT; if (cmd->flags & MMC_RSP_136) cmdr |= MCI_CMDR_RSPTYP_136; else cmdr |= MCI_CMDR_RSPTYP_48; } /* * If there is no data transfer, just set up the right interrupt mask * and start the command. * * The interrupt mask needs to be CMDRDY plus all non-data-transfer * errors. It's important to leave the transfer-related errors out, to * avoid spurious timeout or crc errors on a STOP command following a * multiblock read. When a multiblock read is in progress, sending a * STOP in the middle of a block occasionally triggers such errors, but * we're totally disinterested in them because we've already gotten all * the data we wanted without error before sending the STOP command. */ if (data == NULL) { uint32_t ier = MCI_SR_CMDRDY | MCI_SR_RTOE | MCI_SR_RENDE | MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE; at91_mci_pdc_disable(sc); if (cmd->opcode == MMC_STOP_TRANSMISSION) cmdr |= MCI_CMDR_TRCMD_STOP; /* Ignore response CRC on CMD2 and ACMD41, per standard. */ if (cmd->opcode == MMC_SEND_OP_COND || cmd->opcode == ACMD_SD_SEND_OP_COND) ier &= ~MCI_SR_RCRCE; if (mci_debug) printf("CMDR %x (opcode %d) ARGR %x no data\n", cmdr, cmd->opcode, cmd->arg); WR4(sc, MCI_ARGR, cmd->arg); WR4(sc, MCI_CMDR, cmdr); WR4(sc, MCI_IDR, 0xffffffff); WR4(sc, MCI_IER, ier); return; } /* There is data, set up the transfer-related parts of the command. */ if (data->flags & MMC_DATA_READ) cmdr |= MCI_CMDR_TRDIR; if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) cmdr |= MCI_CMDR_TRCMD_START; if (data->flags & MMC_DATA_STREAM) cmdr |= MCI_CMDR_TRTYP_STREAM; else if (data->flags & MMC_DATA_MULTI) { cmdr |= MCI_CMDR_TRTYP_MULTIPLE; sc->flags |= (data->flags & MMC_DATA_READ) ? CMD_MULTIREAD : CMD_MULTIWRITE; } /* * Disable PDC until we're ready. * * Set block size and turn on PDC mode for dma xfer. * Note that the block size is the smaller of the amount of data to be * transferred, or 512 bytes. The 512 size is fixed by the standard; * smaller blocks are possible, but never larger. */ WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN; mr |= min(data->len, 512) << 16; WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV); /* * Set up DMA. * * Use bounce buffers even if we don't need to byteswap, because doing * multi-block IO with large DMA buffers is way fast (compared to * single-block IO), even after incurring the overhead of also copying * from/to the caller's buffers (which may be in non-contiguous physical * pages). * * In an ideal non-byteswap world we could create a dma tag that allows * for discontiguous segments and do the IO directly from/to the * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the * discontiguous buffers through the PDC. Someday. * * If a read is bigger than 2k, split it in half so that we can start * byte-swapping the first half while the second half is on the wire. * It would be best if we could split it into 8k chunks, but we can't * always keep up with the byte-swapping due to other system activity, * and if an RXBUFF interrupt happens while we're still handling the * byte-swap from the prior buffer (IE, we haven't returned from * handling the prior interrupt yet), then data will get dropped on the * floor and we can't easily recover from that. The right fix for that * would be to have the interrupt handling only keep the DMA flowing and * enqueue filled buffers to be byte-swapped in a non-interrupt context. * Even that won't work on the write side of things though; in that * context we have to have all the data ready to go before starting the * dma. * * XXX what about stream transfers? */ sc->xfer_offset = 0; sc->bbuf_curidx = 0; if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) { uint32_t len; uint32_t remaining = data->len; bus_addr_t paddr; int err; if (remaining > (BBCOUNT*BBSIZE)) panic("IO read size exceeds MAXDATA\n"); if (data->flags & MMC_DATA_READ) { if (remaining > 2048) // XXX len = remaining / 2; else len = remaining; err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], sc->bbuf_vaddr[0], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO read dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], BUS_DMASYNC_PREREAD); WR4(sc, PDC_RPR, paddr); WR4(sc, PDC_RCR, len / 4); sc->bbuf_len[0] = len; remaining -= len; if (remaining == 0) { sc->bbuf_len[1] = 0; } else { len = remaining; err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], sc->bbuf_vaddr[1], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO read dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], BUS_DMASYNC_PREREAD); WR4(sc, PDC_RNPR, paddr); WR4(sc, PDC_RNCR, len / 4); sc->bbuf_len[1] = len; remaining -= len; } WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN); } else { len = min(BBSIZE, remaining); at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len); err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], sc->bbuf_vaddr[0], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO write dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], BUS_DMASYNC_PREWRITE); /* * Erratum workaround: PDC transfer length on a write * must not be smaller than 12 bytes (3 words); only * blklen bytes (set above) are actually transferred. */ WR4(sc, PDC_TPR,paddr); WR4(sc, PDC_TCR, (len < 12) ? 3 : len / 4); sc->bbuf_len[0] = len; remaining -= len; if (remaining == 0) { sc->bbuf_len[1] = 0; } else { len = remaining; at91_bswap_buf(sc, sc->bbuf_vaddr[1], ((char *)data->data)+BBSIZE, len); err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], sc->bbuf_vaddr[1], len, at91_mci_getaddr, &paddr, BUS_DMA_NOWAIT); if (err != 0) panic("IO write dmamap_load failed\n"); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], BUS_DMASYNC_PREWRITE); WR4(sc, PDC_TNPR, paddr); WR4(sc, PDC_TNCR, (len < 12) ? 3 : len / 4); sc->bbuf_len[1] = len; remaining -= len; } /* do not enable PDC xfer until CMDRDY asserted */ } data->xfer_len = 0; /* XXX what's this? appears to be unused. */ } if (mci_debug) printf("CMDR %x (opcode %d) ARGR %x with data len %d\n", cmdr, cmd->opcode, cmd->arg, cmd->data->len); WR4(sc, MCI_ARGR, cmd->arg); WR4(sc, MCI_CMDR, cmdr); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY); } static void at91_mci_next_operation(struct at91_mci_softc *sc) { struct mmc_request *req; req = sc->req; if (req == NULL) return; if (sc->flags & PENDING_CMD) { sc->flags &= ~PENDING_CMD; at91_mci_start_cmd(sc, req->cmd); return; } else if (sc->flags & PENDING_STOP) { sc->flags &= ~PENDING_STOP; at91_mci_start_cmd(sc, req->stop); return; } WR4(sc, MCI_IDR, 0xffffffff); sc->req = NULL; sc->curcmd = NULL; //printf("req done\n"); req->done(req); } static int at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req) { struct at91_mci_softc *sc = device_get_softc(brdev); AT91_MCI_LOCK(sc); if (sc->req != NULL) { AT91_MCI_UNLOCK(sc); return (EBUSY); } //printf("new req\n"); sc->req = req; sc->flags = PENDING_CMD; if (sc->req->stop) sc->flags |= PENDING_STOP; at91_mci_next_operation(sc); AT91_MCI_UNLOCK(sc); return (0); } static int at91_mci_get_ro(device_t brdev, device_t reqdev) { return (0); } static int at91_mci_acquire_host(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc = device_get_softc(brdev); int err = 0; AT91_MCI_LOCK(sc); while (sc->bus_busy) msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5); sc->bus_busy++; AT91_MCI_UNLOCK(sc); return (err); } static int at91_mci_release_host(device_t brdev, device_t reqdev) { struct at91_mci_softc *sc = device_get_softc(brdev); AT91_MCI_LOCK(sc); sc->bus_busy--; wakeup(sc); AT91_MCI_UNLOCK(sc); return (0); } static void at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; char * dataptr = (char *)cmd->data->data; uint32_t curidx = sc->bbuf_curidx; uint32_t len = sc->bbuf_len[curidx]; /* * We arrive here when a DMA transfer for a read is done, whether it's * a single or multi-block read. * * We byte-swap the buffer that just completed, and if that is the * last buffer that's part of this read then we move on to the next * operation, otherwise we wait for another ENDRX for the next bufer. */ bus_dmamap_sync(sc->dmatag, sc->bbuf_map[curidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->dmatag, sc->bbuf_map[curidx]); at91_bswap_buf(sc, dataptr + sc->xfer_offset, sc->bbuf_vaddr[curidx], len); if (mci_debug) { printf("read done sr %x curidx %d len %d xfer_offset %d\n", sr, curidx, len, sc->xfer_offset); } sc->xfer_offset += len; sc->bbuf_curidx = !curidx; /* swap buffers */ /* * If we've transferred all the data, move on to the next operation. * * If we're still transferring the last buffer, RNCR is already zero but * we have to write a zero anyway to clear the ENDRX status so we don't * re-interrupt until the last buffer is done. */ if (sc->xfer_offset == cmd->data->len) { WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } else { WR4(sc, PDC_RNCR, 0); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_ENDRX); } } static void at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here when the entire DMA transfer for a write is done, * whether it's a single or multi-block write. If it's multi-block we * have to immediately move on to the next operation which is to send * the stop command. If it's a single-block transfer we need to wait * for NOTBUSY, but if that's already asserted we can avoid another * interrupt and just move on to completing the request right away. */ WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); bus_dmamap_sync(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx]); if ((cmd->data->flags & MMC_DATA_MULTI) || (sr & MCI_SR_NOTBUSY)) { cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } else { WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); } } static void at91_mci_notbusy(struct at91_mci_softc *sc) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here by either completion of a single-block write, or * completion of the stop command that ended a multi-block write (and, * I suppose, after a card-select or erase, but I haven't tested * those). Anyway, we're done and it's time to move on to the next * command. */ cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; /* * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION * command. Depending on the operation being stopped, we may have to * do some unusual things to work around hardware bugs. */ /* * This is known to be true of at91rm9200 hardware; it may or may not * apply to more recent chips: * * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does * not properly reflect the actual busy state of the card as signaled * on the DAT0 line; it always claims the card is not-busy. If we * believe that and let operations continue, following commands will * fail with response timeouts (except of course MMC_SEND_STATUS -- it * indicates the card is busy in the PRG state, which was the smoking * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly). * * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only * for Write Operations." I guess technically since we sent a stop * it's not a write operation anymore. But then just what did they * think it meant for the stop command to have "...an optional busy * signal transmitted on the data line" according to the SD spec? * * I tried a variety of things to un-wedge the MCI and get the status * register to reflect NOTBUSY correctly again, but the only thing * that worked was a full device reset. It feels like an awfully big * hammer, but doing a full reset after every multiblock write is * still faster than doing single-block IO (by almost two orders of * magnitude: 20KB/sec improves to about 1.8MB/sec best case). * * After doing the reset, wait for a NOTBUSY interrupt before * continuing with the next operation. * * This workaround breaks multiwrite on the rev2xx parts, but some other * workaround is needed. */ if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { at91_mci_reset(sc); WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); return; } /* * This is known to be true of at91rm9200 hardware; it may or may not * apply to more recent chips: * * After stopping a multi-block read, loop to read and discard any * data that coasts in after we sent the stop command. The docs don't * say anything about it, but empirical testing shows that 1-3 * additional words of data get buffered up in some unmentioned * internal fifo and if we don't read and discard them here they end * up on the front of the next read DMA transfer we do. * * This appears to be unnecessary for rev2xx parts. */ if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { uint32_t sr; int count = 0; do { sr = RD4(sc, MCI_SR); if (sr & MCI_SR_RXRDY) { RD4(sc, MCI_RDR); ++count; } } while (sr & MCI_SR_RXRDY); at91_mci_reset(sc); } cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr) { struct mmc_command *cmd = sc->curcmd; int i; if (cmd == NULL) return; /* * We get here at the end of EVERY command. We retrieve the command * response (if any) then decide what to do next based on the command. */ if (cmd->flags & MMC_RSP_PRESENT) { for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) { cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4); if (mci_debug) printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i], sr); } } /* * If this was a stop command, go handle the various special * conditions (read: bugs) that have to be dealt with following a stop. */ if (cmd->opcode == MMC_STOP_TRANSMISSION) { at91_mci_stop_done(sc, sr); return; } /* * If this command can continue to assert BUSY beyond the response then * we need to wait for NOTBUSY before the command is really done. * * Note that this may not work properly on the at91rm9200. It certainly * doesn't work for the STOP command that follows a multi-block write, * so post-stop CMDRDY is handled separately; see the special handling * in at91_mci_stop_done(). * * Beside STOP, there are other R1B-type commands that use the busy * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect), * CMD38 (erase). I haven't tested any of them, but I rather expect * them all to have the same sort of problem with MCI_SR not actually * reflecting the state of the DAT0-line busy indicator. So this code * may need to grow some sort of special handling for them too. (This * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly * sets the response flags to R1 rather than R1B.) XXX */ if ((cmd->flags & MMC_RSP_BUSY)) { WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); return; } /* * If there is a data transfer with this command, then... * - If it's a read, we need to wait for ENDRX. * - If it's a write, now is the time to enable the PDC, and we need * to wait for a BLKE that follows a TXBUFE, because if we're doing * a split transfer we get a BLKE after the first half (when TPR/TCR * get loaded from TNPR/TNCR). So first we wait for the TXBUFE, and * the handling for that interrupt will then invoke the wait for the * subsequent BLKE which indicates actual completion. */ if (cmd->data) { uint32_t ier; if (cmd->data->flags & MMC_DATA_READ) { ier = MCI_SR_ENDRX; } else { ier = MCI_SR_TXBUFE; WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN); } WR4(sc, MCI_IER, MCI_SR_ERROR | ier); return; } /* * If we made it to here, we don't need to wait for anything more for * the current command, move on to the next command (will complete the * request if there is no next command). */ cmd->error = MMC_ERR_NONE; at91_mci_next_operation(sc); } static void at91_mci_intr(void *arg) { struct at91_mci_softc *sc = (struct at91_mci_softc*)arg; struct mmc_command *cmd = sc->curcmd; uint32_t sr, isr; AT91_MCI_LOCK(sc); sr = RD4(sc, MCI_SR); isr = sr & RD4(sc, MCI_IMR); if (mci_debug) printf("i 0x%x sr 0x%x\n", isr, sr); /* * All interrupts are one-shot; disable it now. * The next operation will re-enable whatever interrupts it wants. */ WR4(sc, MCI_IDR, isr); if (isr & MCI_SR_ERROR) { if (isr & (MCI_SR_RTOE | MCI_SR_DTOE)) cmd->error = MMC_ERR_TIMEOUT; else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE)) cmd->error = MMC_ERR_BADCRC; else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE)) cmd->error = MMC_ERR_FIFO; else cmd->error = MMC_ERR_FAILED; /* * CMD8 is used to probe for SDHC cards, a standard SD card * will get a response timeout; don't report it because it's a * normal and expected condition. One might argue that all * error reporting should be left to higher levels, but when * they report at all it's always EIO, which isn't very * helpful. XXX bootverbose? */ if (cmd->opcode != 8) { device_printf(sc->dev, "IO error; status MCI_SR = 0x%b cmd opcode = %d%s\n", sr, MCI_SR_BITSTRING, cmd->opcode, (cmd->opcode != 12) ? "" : (sc->flags & CMD_MULTIREAD) ? " after read" : " after write"); /* XXX not sure RTOE needs a full reset, just a retry */ at91_mci_reset(sc); } at91_mci_next_operation(sc); } else { if (isr & MCI_SR_TXBUFE) { // printf("TXBUFE\n"); /* * We need to wait for a BLKE that follows TXBUFE * (intermediate BLKEs might happen after ENDTXes if * we're chaining multiple buffers). If BLKE is also * asserted at the time we get TXBUFE, we can avoid * another interrupt and process it right away, below. */ if (sr & MCI_SR_BLKE) isr |= MCI_SR_BLKE; else WR4(sc, MCI_IER, MCI_SR_BLKE); } if (isr & MCI_SR_RXBUFF) { // printf("RXBUFF\n"); } if (isr & MCI_SR_ENDTX) { // printf("ENDTX\n"); } if (isr & MCI_SR_ENDRX) { // printf("ENDRX\n"); at91_mci_read_done(sc, sr); } if (isr & MCI_SR_NOTBUSY) { // printf("NOTBUSY\n"); at91_mci_notbusy(sc); } if (isr & MCI_SR_DTIP) { // printf("Data transfer in progress\n"); } if (isr & MCI_SR_BLKE) { // printf("Block transfer end\n"); at91_mci_write_done(sc, sr); } if (isr & MCI_SR_TXRDY) { // printf("Ready to transmit\n"); } if (isr & MCI_SR_RXRDY) { // printf("Ready to receive\n"); } if (isr & MCI_SR_CMDRDY) { // printf("Command ready\n"); at91_mci_cmdrdy(sc, sr); } } AT91_MCI_UNLOCK(sc); } static int at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct at91_mci_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->host.ios.vdd; break; case MMCBR_IVAR_CAPS: if (sc->has_4wire) { sc->sc_cap |= CAP_HAS_4WIRE; sc->host.caps |= MMC_CAP_4_BIT_DATA; } else { sc->sc_cap &= ~CAP_HAS_4WIRE; sc->host.caps &= ~MMC_CAP_4_BIT_DATA; } *(int *)result = sc->host.caps; break; case MMCBR_IVAR_MAX_DATA: /* * Something is wrong with the 2x parts and multiblock, so * just do 1 block at a time for now, which really kills * performance. */ if (sc->sc_cap & CAP_MCI1_REV2XX) *(int *)result = 1; else *(int *)result = MAX_BLOCKS; break; } return (0); } static int at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct at91_mci_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->host.mode = value; break; case MMCBR_IVAR_OCR: sc->host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static device_method_t at91_mci_methods[] = { /* device_if */ DEVMETHOD(device_probe, at91_mci_probe), DEVMETHOD(device_attach, at91_mci_attach), DEVMETHOD(device_detach, at91_mci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, at91_mci_read_ivar), DEVMETHOD(bus_write_ivar, at91_mci_write_ivar), /* mmcbr_if */ DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios), DEVMETHOD(mmcbr_request, at91_mci_request), DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro), DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host), DEVMETHOD(mmcbr_release_host, at91_mci_release_host), DEVMETHOD_END }; static driver_t at91_mci_driver = { "at91_mci", at91_mci_methods, sizeof(struct at91_mci_softc), }; static devclass_t at91_mci_devclass; #ifdef FDT DRIVER_MODULE(at91_mci, simplebus, at91_mci_driver, at91_mci_devclass, NULL, NULL); #else DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL, NULL); #endif DRIVER_MODULE(mmc, at91_mci, mmc_driver, mmc_devclass, NULL, NULL); MODULE_DEPEND(at91_mci, mmc, 1, 1, 1); Index: head/sys/arm/at91/at91_pit.c =================================================================== --- head/sys/arm/at91/at91_pit.c (revision 306901) +++ head/sys/arm/at91/at91_pit.c (revision 306902) @@ -1,222 +1,221 @@ /*- * Copyright (c) 2009 Gallon Sylvestre. All rights reserved. * Copyright (c) 2010 Greg Ansley. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #ifdef FDT #include #include #include #endif #ifndef PIT_PRESCALE #define PIT_PRESCALE (16) #endif static struct pit_softc { struct resource *mem_res; /* Memory resource */ void *intrhand; /* Interrupt handle */ device_t sc_dev; } *sc; static uint32_t timecount = 0; static unsigned at91_pit_get_timecount(struct timecounter *tc); static int pit_intr(void *arg); static inline uint32_t RD4(struct pit_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct pit_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } void at91_pit_delay(int us) { int32_t cnt, last, piv; uint64_t pit_freq; const uint64_t mhz = 1E6; if (sc == NULL) return; last = PIT_PIV(RD4(sc, PIT_PIIR)); /* Max delay ~= 260s. @ 133Mhz */ pit_freq = at91_master_clock / PIT_PRESCALE; cnt = howmany(pit_freq * us, mhz); cnt = (cnt <= 0) ? 1 : cnt; while (cnt > 0) { piv = PIT_PIV(RD4(sc, PIT_PIIR)); cnt -= piv - last ; if (piv < last) cnt -= PIT_PIV(~0u) - last; last = piv; } } static struct timecounter at91_pit_timecounter = { at91_pit_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ 0xffffffff, /* counter mask */ 0 / PIT_PRESCALE, /* frequency */ "AT91SAM9 timer", /* name */ 1000 /* quality */ }; static int at91_pit_probe(device_t dev) { #ifdef FDT if (!ofw_bus_is_compatible(dev, "atmel,at91sam9260-pit")) return (ENXIO); #endif device_set_desc(dev, "AT91SAM9 PIT"); return (0); } static int at91_pit_attach(device_t dev) { void *ih; int rid, err = 0; struct resource *irq; sc = device_get_softc(dev); sc->sc_dev = dev; rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) panic("couldn't allocate register resources"); rid = 0; irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 1, 1, 1, RF_ACTIVE | RF_SHAREABLE); if (!irq) { device_printf(dev, "could not allocate interrupt resources.\n"); err = ENOMEM; goto out; } /* Activate the interrupt. */ err = bus_setup_intr(dev, irq, INTR_TYPE_CLK, pit_intr, NULL, NULL, &ih); at91_pit_timecounter.tc_frequency = at91_master_clock / PIT_PRESCALE; tc_init(&at91_pit_timecounter); /* Enable the PIT here. */ WR4(sc, PIT_MR, PIT_PIV(at91_master_clock / PIT_PRESCALE / hz) | PIT_EN | PIT_IEN); out: return (err); } static int pit_intr(void *arg) { struct trapframe *fp = arg; uint32_t icnt; if (RD4(sc, PIT_SR) & PIT_PITS_DONE) { icnt = RD4(sc, PIT_PIVR) >> 20; /* Just add in the overflows we just read */ timecount += PIT_PIV(RD4(sc, PIT_MR)) * icnt; hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); } return (FILTER_STRAY); } static unsigned at91_pit_get_timecount(struct timecounter *tc) { uint32_t piir, icnt; piir = RD4(sc, PIT_PIIR); /* Current count | over flows */ icnt = piir >> 20; /* Overflows */ return (timecount + PIT_PIV(piir) + PIT_PIV(RD4(sc, PIT_MR)) * icnt); } static device_method_t at91_pit_methods[] = { DEVMETHOD(device_probe, at91_pit_probe), DEVMETHOD(device_attach, at91_pit_attach), DEVMETHOD_END }; static driver_t at91_pit_driver = { "at91_pit", at91_pit_methods, sizeof(struct pit_softc), }; static devclass_t at91_pit_devclass; #ifdef FDT EARLY_DRIVER_MODULE(at91_pit, simplebus, at91_pit_driver, at91_pit_devclass, NULL, NULL, BUS_PASS_TIMER); #else EARLY_DRIVER_MODULE(at91_pit, atmelarm, at91_pit_driver, at91_pit_devclass, NULL, NULL, BUS_PASS_TIMER); #endif Index: head/sys/arm/at91/at91_pmc.c =================================================================== --- head/sys/arm/at91/at91_pmc.c (revision 306901) +++ head/sys/arm/at91/at91_pmc.c (revision 306902) @@ -1,720 +1,718 @@ /*- * Copyright (c) 2006 M. Warner Losh. All rights reserved. * Copyright (c) 2010 Greg Ansley. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif static struct at91_pmc_softc { bus_space_tag_t sc_st; bus_space_handle_t sc_sh; struct resource *mem_res; /* Memory resource */ device_t dev; } *pmc_softc; static uint32_t pllb_init; MALLOC_DECLARE(M_PMC); MALLOC_DEFINE(M_PMC, "at91_pmc_clocks", "AT91 PMC Clock descriptors"); #define AT91_PMC_BASE 0xffffc00 static void at91_pmc_set_pllb_mode(struct at91_pmc_clock *, int); static void at91_pmc_set_upll_mode(struct at91_pmc_clock *, int); static void at91_pmc_set_sys_mode(struct at91_pmc_clock *, int); static void at91_pmc_set_periph_mode(struct at91_pmc_clock *, int); static void at91_pmc_clock_alias(const char *name, const char *alias); static struct at91_pmc_clock slck = { .name = "slck", /* 32,768 Hz slow clock */ .hz = 32768, .refcnt = 1, .id = 0, .primary = 1, }; /* * NOTE: Clocks for "ordinary peripheral" devices e.g. spi0, udp0, uhp0 etc. * are now created automatically. Only "system" clocks need be defined here. */ static struct at91_pmc_clock main_ck = { .name = "main", /* Main clock */ .refcnt = 0, .id = 1, .primary = 1, .pmc_mask = PMC_IER_MOSCS, }; static struct at91_pmc_clock plla = { .name = "plla", /* PLLA Clock, used for CPU clocking */ .parent = &main_ck, .refcnt = 1, .id = 0, .primary = 1, .pll = 1, .pmc_mask = PMC_IER_LOCKA, }; static struct at91_pmc_clock pllb = { .name = "pllb", /* PLLB Clock, used for USB functions */ .parent = &main_ck, .refcnt = 0, .id = 0, .primary = 1, .pll = 1, .pmc_mask = PMC_IER_LOCKB, .set_mode = &at91_pmc_set_pllb_mode, }; /* Used by USB on at91sam9g45 */ static struct at91_pmc_clock upll = { .name = "upll", /* UTMI PLL, used for USB functions on 9G45 family */ .parent = &main_ck, .refcnt = 0, .id = 0, .primary = 1, .pll = 1, .pmc_mask = (1 << 6), .set_mode = &at91_pmc_set_upll_mode, }; static struct at91_pmc_clock udpck = { .name = "udpck", .parent = &pllb, .pmc_mask = PMC_SCER_UDP, .set_mode = at91_pmc_set_sys_mode }; static struct at91_pmc_clock uhpck = { .name = "uhpck", .parent = &pllb, .pmc_mask = PMC_SCER_UHP, .set_mode = at91_pmc_set_sys_mode }; static struct at91_pmc_clock mck = { .name = "mck", /* Master (Peripheral) Clock */ .pmc_mask = PMC_IER_MCKRDY, .refcnt = 0, }; static struct at91_pmc_clock cpu = { .name = "cpu", /* CPU Clock */ .parent = &plla, .pmc_mask = PMC_SCER_PCK, .refcnt = 0, }; /* "+32" or the automatic peripheral clocks */ static struct at91_pmc_clock *clock_list[16+32] = { &slck, &main_ck, &plla, &pllb, &upll, &udpck, &uhpck, &mck, &cpu }; static inline uint32_t RD4(struct at91_pmc_softc *sc, bus_size_t off) { if (sc == NULL) { uint32_t *p = (uint32_t *)(AT91_BASE + AT91_PMC_BASE + off); return *p; } return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct at91_pmc_softc *sc, bus_size_t off, uint32_t val) { if (sc == NULL) { uint32_t *p = (uint32_t *)(AT91_BASE + AT91_PMC_BASE + off); *p = val; } else bus_write_4(sc->mem_res, off, val); } /* * The following is unused currently since we don't ever set the PLLA * frequency of the device. If we did, we'd have to also pay attention * to the ICPLLA bit in the PMC_PLLICPR register for frequencies lower * than ~600MHz, which the PMC code doesn't do right now. */ uint32_t at91_pmc_800mhz_plla_outb(int freq) { uint32_t outa; /* * Set OUTA, per the data sheet. See Table 46-16 titled * PLLA Frequency Regarding ICPLLA and OUTA in the SAM9X25 doc, * Table 46-17 in the SAM9G20 doc, or Table 46-16 in the SAM9G45 doc. * Note: the frequencies overlap by 5MHz, so we add 3 here to * center shoot the transition. */ freq /= 1000000; /* MHz */ if (freq >= 800) freq = 800; freq += 3; /* Allow for overlap. */ outa = 3 - ((freq / 50) & 3); /* 750 / 50 = 7, see table */ return (1 << 29)| (outa << 14); } uint32_t at91_pmc_800mhz_pllb_outb(int freq) { return (0); } void at91_pmc_set_pllb_mode(struct at91_pmc_clock *clk, int on) { struct at91_pmc_softc *sc = pmc_softc; uint32_t value; value = on ? pllb_init : 0; /* * Only write to the register if the value is changing. Besides being * good common sense, this works around RM9200 Errata #26 (CKGR_PLL[AB]R * must not be written with the same value currently in the register). */ if (RD4(sc, CKGR_PLLBR) != value) { WR4(sc, CKGR_PLLBR, value); while (on && (RD4(sc, PMC_SR) & PMC_IER_LOCKB) != PMC_IER_LOCKB) continue; } } static void at91_pmc_set_upll_mode(struct at91_pmc_clock *clk, int on) { struct at91_pmc_softc *sc = pmc_softc; uint32_t value; if (on) { on = PMC_IER_LOCKU; value = CKGR_UCKR_UPLLEN | CKGR_UCKR_BIASEN; } else value = 0; WR4(sc, CKGR_UCKR, RD4(sc, CKGR_UCKR) | value); while ((RD4(sc, PMC_SR) & PMC_IER_LOCKU) != on) continue; WR4(sc, PMC_USB, PMC_USB_USBDIV(9) | PMC_USB_USBS); WR4(sc, PMC_SCER, PMC_SCER_UHP_SAM9); } static void at91_pmc_set_sys_mode(struct at91_pmc_clock *clk, int on) { struct at91_pmc_softc *sc = pmc_softc; WR4(sc, on ? PMC_SCER : PMC_SCDR, clk->pmc_mask); if (on) while ((RD4(sc, PMC_SCSR) & clk->pmc_mask) != clk->pmc_mask) continue; else while ((RD4(sc, PMC_SCSR) & clk->pmc_mask) == clk->pmc_mask) continue; } static void at91_pmc_set_periph_mode(struct at91_pmc_clock *clk, int on) { struct at91_pmc_softc *sc = pmc_softc; WR4(sc, on ? PMC_PCER : PMC_PCDR, clk->pmc_mask); if (on) while ((RD4(sc, PMC_PCSR) & clk->pmc_mask) != clk->pmc_mask) continue; else while ((RD4(sc, PMC_PCSR) & clk->pmc_mask) == clk->pmc_mask) continue; } struct at91_pmc_clock * at91_pmc_clock_add(const char *name, uint32_t irq, struct at91_pmc_clock *parent) { struct at91_pmc_clock *clk; int i, buflen; clk = malloc(sizeof(*clk), M_PMC, M_NOWAIT | M_ZERO); if (clk == NULL) goto err; buflen = strlen(name) + 1; clk->name = malloc(buflen, M_PMC, M_NOWAIT); if (clk->name == NULL) goto err; strlcpy(clk->name, name, buflen); clk->pmc_mask = 1 << irq; clk->set_mode = &at91_pmc_set_periph_mode; if (parent == NULL) clk->parent = &mck; else clk->parent = parent; for (i = 0; i < nitems(clock_list); i++) { if (clock_list[i] == NULL) { clock_list[i] = clk; return (clk); } } err: if (clk != NULL) { if (clk->name != NULL) free(clk->name, M_PMC); free(clk, M_PMC); } panic("could not allocate pmc clock '%s'", name); return (NULL); } static void at91_pmc_clock_alias(const char *name, const char *alias) { struct at91_pmc_clock *clk, *alias_clk; clk = at91_pmc_clock_ref(name); if (clk) alias_clk = at91_pmc_clock_add(alias, 0, clk->parent); if (clk && alias_clk) { alias_clk->hz = clk->hz; alias_clk->pmc_mask = clk->pmc_mask; alias_clk->set_mode = clk->set_mode; } } struct at91_pmc_clock * at91_pmc_clock_ref(const char *name) { int i; for (i = 0; i < nitems(clock_list); i++) { if (clock_list[i] == NULL) break; if (strcmp(name, clock_list[i]->name) == 0) return (clock_list[i]); } return (NULL); } void at91_pmc_clock_deref(struct at91_pmc_clock *clk) { if (clk == NULL) return; } void at91_pmc_clock_enable(struct at91_pmc_clock *clk) { if (clk == NULL) return; /* XXX LOCKING? XXX */ if (clk->parent) at91_pmc_clock_enable(clk->parent); if (clk->refcnt++ == 0 && clk->set_mode) clk->set_mode(clk, 1); } void at91_pmc_clock_disable(struct at91_pmc_clock *clk) { if (clk == NULL) return; /* XXX LOCKING? XXX */ if (--clk->refcnt == 0 && clk->set_mode) clk->set_mode(clk, 0); if (clk->parent) at91_pmc_clock_disable(clk->parent); } static int at91_pmc_pll_rate(struct at91_pmc_clock *clk, uint32_t reg) { uint32_t mul, div, freq; freq = clk->parent->hz; div = (reg >> clk->pll_div_shift) & clk->pll_div_mask; mul = (reg >> clk->pll_mul_shift) & clk->pll_mul_mask; #if 0 printf("pll = (%d / %d) * %d = %d\n", freq, div, mul + 1, (freq/div) * (mul+1)); #endif if (div != 0 && mul != 0) { freq /= div; freq *= mul + 1; } else freq = 0; clk->hz = freq; return (freq); } static uint32_t at91_pmc_pll_calc(struct at91_pmc_clock *clk, uint32_t out_freq) { uint32_t i, div = 0, mul = 0, diff = 1 << 30; unsigned ret = 0x3e00; if (out_freq > clk->pll_max_out) goto fail; for (i = 1; i < 256; i++) { int32_t diff1; uint32_t input, mul1; input = clk->parent->hz / i; if (input < clk->pll_min_in) break; if (input > clk->pll_max_in) continue; mul1 = out_freq / input; if (mul1 > (clk->pll_mul_mask + 1)) continue; if (mul1 == 0) break; diff1 = out_freq - input * mul1; if (diff1 < 0) diff1 = -diff1; if (diff > diff1) { diff = diff1; div = i; mul = mul1; if (diff == 0) break; } } if (diff > (out_freq >> PMC_PLL_SHIFT_TOL)) goto fail; if (clk->set_outb != NULL) ret |= clk->set_outb(out_freq); return (ret | ((mul - 1) << clk->pll_mul_shift) | (div << clk->pll_div_shift)); fail: return (0); } #if !defined(AT91C_MAIN_CLOCK) static const unsigned int at91_main_clock_tbl[] = { 3000000, 3276800, 3686400, 3840000, 4000000, 4433619, 4915200, 5000000, 5242880, 6000000, 6144000, 6400000, 6553600, 7159090, 7372800, 7864320, 8000000, 9830400, 10000000, 11059200, 12000000, 12288000, 13560000, 14318180, 14745600, 16000000, 17344700, 18432000, 20000000 }; #define MAIN_CLOCK_TBL_LEN nitems(at91_main_clock_tbl) #endif static unsigned int at91_pmc_sense_main_clock(void) { #if !defined(AT91C_MAIN_CLOCK) unsigned int ckgr_val; unsigned int diff, matchdiff, freq; int i; ckgr_val = (RD4(NULL, CKGR_MCFR) & CKGR_MCFR_MAINF_MASK) << 11; /* * Clocks up to 50MHz can be connected to some models. If * the frequency is >= 21MHz, assume that the slow clock can * measure it correctly, and that any error can be adequately * compensated for by roudning to the nearest 500Hz. Users * with fast, or odd-ball clocks will need to set * AT91C_MAIN_CLOCK in the kernel config file. */ if (ckgr_val >= 21000000) return (rounddown(ckgr_val + 250, 500)); /* * Try to find the standard frequency that match best. */ freq = at91_main_clock_tbl[0]; matchdiff = abs(ckgr_val - at91_main_clock_tbl[0]); for (i = 1; i < MAIN_CLOCK_TBL_LEN; i++) { diff = abs(ckgr_val - at91_main_clock_tbl[i]); if (diff < matchdiff) { freq = at91_main_clock_tbl[i]; matchdiff = diff; } } return (freq); #else return (AT91C_MAIN_CLOCK); #endif } void at91_pmc_init_clock(void) { struct at91_pmc_softc *sc = NULL; unsigned int main_clock; uint32_t mckr; uint32_t mdiv; soc_info.soc_data->soc_clock_init(); main_clock = at91_pmc_sense_main_clock(); if (at91_is_sam9() || at91_is_sam9xe()) { uhpck.pmc_mask = PMC_SCER_UHP_SAM9; udpck.pmc_mask = PMC_SCER_UDP_SAM9; } /* There is no pllb on AT91SAM9G45 */ if (at91_cpu_is(AT91_T_SAM9G45)) { uhpck.parent = &upll; uhpck.pmc_mask = PMC_SCER_UHP_SAM9; } mckr = RD4(sc, PMC_MCKR); main_ck.hz = main_clock; /* * Note: this means outa calc code for plla never used since * we never change it. If we did, we'd also have to mind * ICPLLA to get the charge pump current right. */ at91_pmc_pll_rate(&plla, RD4(sc, CKGR_PLLAR)); if (at91_cpu_is(AT91_T_SAM9G45) && (mckr & PMC_MCKR_PLLADIV2)) plla.hz /= 2; /* * Initialize the usb clock. This sets up pllb, but disables the * actual clock. XXX except for the if 0 :( */ if (!at91_cpu_is(AT91_T_SAM9G45)) { pllb_init = at91_pmc_pll_calc(&pllb, 48000000 * 2) | 0x10000000; at91_pmc_pll_rate(&pllb, pllb_init); #if 0 /* Turn off USB clocks */ at91_pmc_set_periph_mode(&ohci_clk, 0); at91_pmc_set_periph_mode(&udc_clk, 0); #endif } if (at91_is_rm92()) { WR4(sc, PMC_SCDR, PMC_SCER_UHP | PMC_SCER_UDP); WR4(sc, PMC_SCER, PMC_SCER_MCKUDP); } else WR4(sc, PMC_SCDR, PMC_SCER_UHP_SAM9 | PMC_SCER_UDP_SAM9); /* * MCK and PCU derive from one of the primary clocks. Initialize * this relationship. */ mck.parent = clock_list[mckr & 0x3]; mck.parent->refcnt++; cpu.hz = mck.hz = mck.parent->hz / (1 << ((mckr & PMC_MCKR_PRES_MASK) >> 2)); mdiv = (mckr & PMC_MCKR_MDIV_MASK) >> 8; if (at91_is_sam9() || at91_is_sam9xe()) { /* * On AT91SAM9G45 when mdiv == 3 we need to divide * MCK by 3 but not, for example, on 9g20. */ if (!at91_cpu_is(AT91_T_SAM9G45) || mdiv <= 2) mdiv *= 2; if (mdiv > 0) mck.hz /= mdiv; } else mck.hz /= (1 + mdiv); /* Only found on SAM9G20 */ if (at91_cpu_is(AT91_T_SAM9G20)) cpu.hz /= (mckr & PMC_MCKR_PDIV) ? 2 : 1; at91_master_clock = mck.hz; /* These clocks refrenced by "special" names */ at91_pmc_clock_alias("ohci0", "ohci_clk"); at91_pmc_clock_alias("udp0", "udp_clk"); /* Turn off "Progamable" clocks */ WR4(sc, PMC_SCDR, PMC_SCER_PCK0 | PMC_SCER_PCK1 | PMC_SCER_PCK2 | PMC_SCER_PCK3); /* XXX kludge, turn on all peripherals */ WR4(sc, PMC_PCER, 0xffffffff); /* Disable all interrupts for PMC */ WR4(sc, PMC_IDR, 0xffffffff); } static void at91_pmc_deactivate(device_t dev) { struct at91_pmc_softc *sc; sc = device_get_softc(dev); bus_generic_detach(sc->dev); if (sc->mem_res) bus_release_resource(dev, SYS_RES_IOPORT, rman_get_rid(sc->mem_res), sc->mem_res); sc->mem_res = NULL; } static int at91_pmc_activate(device_t dev) { struct at91_pmc_softc *sc; int rid; sc = device_get_softc(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) goto errout; return (0); errout: at91_pmc_deactivate(dev); return (ENOMEM); } static int at91_pmc_probe(device_t dev) { #ifdef FDT if (!ofw_bus_is_compatible(dev, "atmel,at91rm9200-pmc") && !ofw_bus_is_compatible(dev, "atmel,at91sam9260-pmc") && !ofw_bus_is_compatible(dev, "atmel,at91sam9g45-pmc") && !ofw_bus_is_compatible(dev, "atmel,at91sam9x5-pmc")) return (ENXIO); #endif device_set_desc(dev, "PMC"); return (0); } static int at91_pmc_attach(device_t dev) { int err; pmc_softc = device_get_softc(dev); pmc_softc->dev = dev; if ((err = at91_pmc_activate(dev)) != 0) return (err); /* * Configure main clock frequency. */ at91_pmc_init_clock(); /* * Display info about clocks previously computed */ device_printf(dev, "Primary: %d Hz PLLA: %d MHz CPU: %d MHz MCK: %d MHz\n", main_ck.hz, plla.hz / 1000000, cpu.hz / 1000000, mck.hz / 1000000); return (0); } static device_method_t at91_pmc_methods[] = { DEVMETHOD(device_probe, at91_pmc_probe), DEVMETHOD(device_attach, at91_pmc_attach), DEVMETHOD_END }; static driver_t at91_pmc_driver = { "at91_pmc", at91_pmc_methods, sizeof(struct at91_pmc_softc), }; static devclass_t at91_pmc_devclass; #ifdef FDT EARLY_DRIVER_MODULE(at91_pmc, simplebus, at91_pmc_driver, at91_pmc_devclass, NULL, NULL, BUS_PASS_CPU); #else EARLY_DRIVER_MODULE(at91_pmc, atmelarm, at91_pmc_driver, at91_pmc_devclass, NULL, NULL, BUS_PASS_CPU); #endif Index: head/sys/arm/at91/at91_st.c =================================================================== --- head/sys/arm/at91/at91_st.c (revision 306901) +++ head/sys/arm/at91/at91_st.c (revision 306902) @@ -1,313 +1,312 @@ /*- * Copyright (c) 2005 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include static struct at91_st_softc { struct resource * sc_irq_res; struct resource * sc_mem_res; void * sc_intrhand; eventhandler_tag sc_wet; /* watchdog event handler tag */ } *timer_softc; static inline uint32_t RD4(bus_size_t off) { if (timer_softc == NULL) { uint32_t *p = (uint32_t *)(AT91_BASE + AT91RM92_ST_BASE + off); return *p; } return (bus_read_4(timer_softc->sc_mem_res, off)); } static inline void WR4(bus_size_t off, uint32_t val) { if (timer_softc == NULL) { uint32_t *p = (uint32_t *)(AT91_BASE + AT91RM92_ST_BASE + off); *p = val; } else bus_write_4(timer_softc->sc_mem_res, off, val); } static void at91_st_watchdog(void *, u_int, int *); static void at91_st_initclocks(device_t , struct at91_st_softc *); static inline int st_crtr(void) { int cur1, cur2; do { cur1 = RD4(ST_CRTR); cur2 = RD4(ST_CRTR); } while (cur1 != cur2); return (cur1); } static unsigned at91_st_get_timecount(struct timecounter *tc); static struct timecounter at91_st_timecounter = { at91_st_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ 0xfffffu, /* counter_mask */ 32768, /* frequency */ "AT91RM9200 timer", /* name */ 1000 /* quality */ }; static int clock_intr(void *arg) { struct trapframe *fp = arg; /* The interrupt is shared, so we have to make sure it's for us. */ if (RD4(ST_SR) & ST_SR_PITS) { hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); } return (FILTER_STRAY); } void at91_st_delay(int n) { uint32_t start, end, cur; start = st_crtr(); n = (n * 1000) / 32768; if (n <= 0) n = 1; end = (start + n) & ST_CRTR_MASK; cur = start; if (start > end) { while (cur >= start || cur < end) cur = st_crtr(); } else { while (cur < end) cur = st_crtr(); } } void at91_st_cpu_reset(void) { /* * Reset the CPU by programmig the watchdog timer to reset the * CPU after 128 'slow' clocks, or about ~4ms. Loop until * the reset happens for safety. */ WR4(ST_WDMR, ST_WDMR_RSTEN | 2); WR4(ST_CR, ST_CR_WDRST); while (1) continue; } static int at91_st_probe(device_t dev) { device_set_desc(dev, "ST"); return (0); } static void at91_st_deactivate(device_t dev) { struct at91_st_softc *sc = timer_softc; if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); sc->sc_intrhand = NULL; if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); sc->sc_irq_res = NULL; if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); sc->sc_mem_res = NULL; } static int at91_st_activate(device_t dev) { int rid; int err; struct at91_st_softc *sc = timer_softc; rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); err = ENOMEM; if (sc->sc_mem_res == NULL) goto out; /* Disable all interrupts */ WR4(ST_IDR, 0xffffffff); /* The system timer shares the system irq (1) */ rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_irq_res == NULL) { printf("Unable to allocate irq for the system timer"); goto out; } err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_CLK, clock_intr, NULL, NULL, &sc->sc_intrhand); out: if (err != 0) at91_st_deactivate(dev); return (err); } static int at91_st_attach(device_t dev) { int err; timer_softc = device_get_softc(dev); err = at91_st_activate(dev); if (err) return err; timer_softc->sc_wet = EVENTHANDLER_REGISTER(watchdog_list, at91_st_watchdog, dev, 0); device_printf(dev, "watchdog registered, timeout intervall max. 64 sec\n"); at91_st_initclocks(dev, timer_softc); return (0); } static device_method_t at91_st_methods[] = { DEVMETHOD(device_probe, at91_st_probe), DEVMETHOD(device_attach, at91_st_attach), {0, 0}, }; static driver_t at91_st_driver = { "at91_st", at91_st_methods, sizeof(struct at91_st_softc), }; static devclass_t at91_st_devclass; DRIVER_MODULE(at91_st, atmelarm, at91_st_driver, at91_st_devclass, 0, 0); static unsigned at91_st_get_timecount(struct timecounter *tc) { return (st_crtr()); } /* * t below is in a weird unit. The watchdog is set to 2^t * nanoseconds. Since our watchdog timer can't really do that too * well, we approximate it by assuming that the timeout interval for * the lsb is 2^22 ns, which is 4.194ms. This is an overestimation of * the actual time (3.906ms), but close enough for watchdogging. * These approximations, though a violation of the spec, improve the * performance of the application which typically specifies things as * WD_TO_32SEC. In that last case, we'd wait 32s before the wdog * reset. The spec says we should wait closer to 34s, but given how * it is likely to be used, and the extremely coarse nature time * interval, I think this is the best solution. */ static void at91_st_watchdog(void *argp, u_int cmd, int *error) { uint32_t wdog; int t; t = cmd & WD_INTERVAL; if (t >= 22 && t <= 37) { wdog = (1 << (t - 22)) | ST_WDMR_RSTEN; *error = 0; } else { wdog = 0; } WR4(ST_WDMR, wdog); WR4(ST_CR, ST_CR_WDRST); } static void at91_st_initclocks(device_t dev, struct at91_st_softc *sc) { int rel_value; /* * Real time counter increments every clock cycle, need to set before * initializing clocks so that DELAY works. */ WR4(ST_RTMR, 1); /* disable watchdog timer */ WR4(ST_WDMR, 0); rel_value = 32768 / hz; if (rel_value < 1) rel_value = 1; if (32768 % hz) { device_printf(dev, "Cannot get %d Hz clock; using %dHz\n", hz, 32768 / rel_value); hz = 32768 / rel_value; tick = 1000000 / hz; } WR4(ST_PIMR, rel_value); /* Enable PITS interrupts. */ WR4(ST_IER, ST_SR_PITS); tc_init(&at91_st_timecounter); } Index: head/sys/arm/broadcom/bcm2835/bcm2835_dma.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_dma.c (revision 306901) +++ head/sys/arm/broadcom/bcm2835/bcm2835_dma.c (revision 306902) @@ -1,766 +1,764 @@ /* * Copyright (c) 2013 Daisuke Aoyama * Copyright (c) 2013 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include "bcm2835_dma.h" #include "bcm2835_vcbus.h" #define MAX_REG 9 /* private flags */ #define BCM_DMA_CH_USED 0x00000001 #define BCM_DMA_CH_FREE 0x40000000 #define BCM_DMA_CH_UNMAP 0x80000000 /* Register Map (4.2.1.2) */ #define BCM_DMA_CS(n) (0x100*(n) + 0x00) #define CS_ACTIVE (1 << 0) #define CS_END (1 << 1) #define CS_INT (1 << 2) #define CS_DREQ (1 << 3) #define CS_ISPAUSED (1 << 4) #define CS_ISHELD (1 << 5) #define CS_ISWAIT (1 << 6) #define CS_ERR (1 << 8) #define CS_WAITWRT (1 << 28) #define CS_DISDBG (1 << 29) #define CS_ABORT (1 << 30) #define CS_RESET (1U << 31) #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04) #define BCM_DMA_INFO(n) (0x100*(n) + 0x08) #define INFO_INT_EN (1 << 0) #define INFO_TDMODE (1 << 1) #define INFO_WAIT_RESP (1 << 3) #define INFO_D_INC (1 << 4) #define INFO_D_WIDTH (1 << 5) #define INFO_D_DREQ (1 << 6) #define INFO_S_INC (1 << 8) #define INFO_S_WIDTH (1 << 9) #define INFO_S_DREQ (1 << 10) #define INFO_WAITS_SHIFT (21) #define INFO_PERMAP_SHIFT (16) #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT) #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C) #define BCM_DMA_DST(n) (0x100*(n) + 0x10) #define BCM_DMA_LEN(n) (0x100*(n) + 0x14) #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18) #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C) #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20) #define DEBUG_ERROR_MASK (7) #define BCM_DMA_INT_STATUS 0xfe0 #define BCM_DMA_ENABLE 0xff0 /* relative offset from BCM_VC_DMA0_BASE (p.39) */ #define BCM_DMA_CH(n) (0x100*(n)) /* channels used by GPU */ #define BCM_DMA_CH_BULK 0 #define BCM_DMA_CH_FAST1 2 #define BCM_DMA_CH_FAST2 3 #define BCM_DMA_CH_GPU_MASK ((1 << BCM_DMA_CH_BULK) | \ (1 << BCM_DMA_CH_FAST1) | \ (1 << BCM_DMA_CH_FAST2)) /* DMA Control Block - 256bit aligned (p.40) */ struct bcm_dma_cb { uint32_t info; /* Transfer Information */ uint32_t src; /* Source Address */ uint32_t dst; /* Destination Address */ uint32_t len; /* Transfer Length */ uint32_t stride; /* 2D Mode Stride */ uint32_t next; /* Next Control Block Address */ uint32_t rsvd1; /* Reserved */ uint32_t rsvd2; /* Reserved */ }; #ifdef DEBUG static void bcm_dma_cb_dump(struct bcm_dma_cb *cb); static void bcm_dma_reg_dump(int ch); #endif /* DMA channel private info */ struct bcm_dma_ch { int ch; uint32_t flags; struct bcm_dma_cb * cb; uint32_t vc_cb; bus_dmamap_t dma_map; void (*intr_func)(int, void *); void * intr_arg; }; struct bcm_dma_softc { device_t sc_dev; struct mtx sc_mtx; struct resource * sc_mem; struct resource * sc_irq[BCM_DMA_CH_MAX]; void * sc_intrhand[BCM_DMA_CH_MAX]; struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX]; bus_dma_tag_t sc_dma_tag; }; static struct bcm_dma_softc *bcm_dma_sc = NULL; static uint32_t bcm_dma_channel_mask; static void bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = PHYS_TO_VCBUS(segs[0].ds_addr); } static void bcm_dma_reset(device_t dev, int ch) { struct bcm_dma_softc *sc = device_get_softc(dev); struct bcm_dma_cb *cb; uint32_t cs; int count; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return; cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); if (cs & CS_ACTIVE) { /* pause current task */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); count = 1000; do { cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); } while (!(cs & CS_ISPAUSED) && (count-- > 0)); if (!(cs & CS_ISPAUSED)) { device_printf(dev, "Can't abort DMA transfer at channel %d\n", ch); } bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Complete everything, clear interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ABORT | CS_INT | CS_END| CS_ACTIVE); } /* clear control blocks */ bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Reset control block */ cb = sc->sc_dma_ch[ch].cb; bzero(cb, sizeof(*cb)); cb->info = INFO_WAIT_RESP; } static int bcm_dma_init(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); uint32_t reg; struct bcm_dma_ch *ch; void *cb_virt; vm_paddr_t cb_phys; int err; int i; /* * Only channels set in bcm_dma_channel_mask can be controlled by us. * The others are out of our control as well as the corresponding bits * in both BCM_DMA_ENABLE and BCM_DMA_INT_STATUS global registers. As * these registers are RW ones, there is no safe way how to write only * the bits which can be controlled by us. * * Fortunately, after reset, all channels are enabled in BCM_DMA_ENABLE * register and all statuses are cleared in BCM_DMA_INT_STATUS one. * Not touching these registers is a trade off between correct * initialization which does not count on anything and not messing up * something we have no control over. */ reg = bus_read_4(sc->sc_mem, BCM_DMA_ENABLE); if ((reg & bcm_dma_channel_mask) != bcm_dma_channel_mask) device_printf(dev, "channels are not enabled\n"); reg = bus_read_4(sc->sc_mem, BCM_DMA_INT_STATUS); if ((reg & bcm_dma_channel_mask) != 0) device_printf(dev, "statuses are not cleared\n"); /* Allocate DMA chunks control blocks */ /* p.40 of spec - control block should be 32-bit aligned */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct bcm_dma_cb), 1, sizeof(struct bcm_dma_cb), BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_dma_tag); if (err) { device_printf(dev, "failed allocate DMA tag\n"); return (err); } /* setup initial settings */ for (i = 0; i < BCM_DMA_CH_MAX; i++) { ch = &sc->sc_dma_ch[i]; bzero(ch, sizeof(struct bcm_dma_ch)); ch->ch = i; ch->flags = BCM_DMA_CH_UNMAP; if ((bcm_dma_channel_mask & (1 << i)) == 0) continue; err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ch->dma_map); if (err) { device_printf(dev, "cannot allocate DMA memory\n"); break; } /* * Least alignment for busdma-allocated stuff is cache * line size, so just make sure nothing stupid happened * and we got properly aligned address */ if ((uintptr_t)cb_virt & 0x1f) { device_printf(dev, "DMA address is not 32-bytes aligned: %p\n", (void*)cb_virt); break; } err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, BUS_DMA_WAITOK); if (err) { device_printf(dev, "cannot load DMA memory\n"); break; } ch->cb = cb_virt; ch->vc_cb = cb_phys; ch->flags = BCM_DMA_CH_FREE; ch->cb->info = INFO_WAIT_RESP; /* reset DMA engine */ bus_write_4(sc->sc_mem, BCM_DMA_CS(i), CS_RESET); } return (0); } /* * Allocate DMA channel for further use, returns channel # or * BCM_DMA_CH_INVALID */ int bcm_dma_allocate(int req_ch) { struct bcm_dma_softc *sc = bcm_dma_sc; int ch = BCM_DMA_CH_INVALID; int i; if (req_ch >= BCM_DMA_CH_MAX) return (BCM_DMA_CH_INVALID); /* Auto(req_ch < 0) or CH specified */ mtx_lock(&sc->sc_mtx); if (req_ch < 0) { for (i = 0; i < BCM_DMA_CH_MAX; i++) { if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) { ch = i; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; break; } } } else { if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) { ch = req_ch; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; } } mtx_unlock(&sc->sc_mtx); return (ch); } /* * Frees allocated channel. Returns 0 on success, -1 otherwise */ int bcm_dma_free(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); mtx_lock(&sc->sc_mtx); if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) { sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE; sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED; sc->sc_dma_ch[ch].intr_func = NULL; sc->sc_dma_ch[ch].intr_arg = NULL; /* reset DMA engine */ bcm_dma_reset(sc->sc_dev, ch); } mtx_unlock(&sc->sc_mtx); return (0); } /* * Assign handler function for channel interrupt * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); sc->sc_dma_ch[ch].intr_func = func; sc->sc_dma_ch[ch].intr_arg = arg; cb = sc->sc_dma_ch[ch].cb; cb->info |= INFO_INT_EN; return (0); } /* * Setup DMA source parameters * ch - channel number * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if * source is physical memory * inc_addr - BCM_DMA_INC_ADDR if source address * should be increased after each access or * BCM_DMA_SAME_ADDR if address should remain * the same * width - size of read operation, BCM_DMA_32BIT * for 32bit bursts, BCM_DMA_128BIT for 128 bits * * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width) { struct bcm_dma_softc *sc = bcm_dma_sc; uint32_t info; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); info = sc->sc_dma_ch[ch].cb->info; info &= ~INFO_PERMAP_MASK; info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; if (dreq) info |= INFO_S_DREQ; else info &= ~INFO_S_DREQ; if (width == BCM_DMA_128BIT) info |= INFO_S_WIDTH; else info &= ~INFO_S_WIDTH; if (inc_addr == BCM_DMA_INC_ADDR) info |= INFO_S_INC; else info &= ~INFO_S_INC; sc->sc_dma_ch[ch].cb->info = info; return (0); } /* * Setup DMA destination parameters * ch - channel number * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if * destination is physical memory * inc_addr - BCM_DMA_INC_ADDR if source address * should be increased after each access or * BCM_DMA_SAME_ADDR if address should remain * the same * width - size of write operation, BCM_DMA_32BIT * for 32bit bursts, BCM_DMA_128BIT for 128 bits * * Returns 0 on success, -1 otherwise */ int bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width) { struct bcm_dma_softc *sc = bcm_dma_sc; uint32_t info; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); info = sc->sc_dma_ch[ch].cb->info; info &= ~INFO_PERMAP_MASK; info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; if (dreq) info |= INFO_D_DREQ; else info &= ~INFO_D_DREQ; if (width == BCM_DMA_128BIT) info |= INFO_D_WIDTH; else info &= ~INFO_D_WIDTH; if (inc_addr == BCM_DMA_INC_ADDR) info |= INFO_D_INC; else info &= ~INFO_D_INC; sc->sc_dma_ch[ch].cb->info = info; return (0); } #ifdef DEBUG void bcm_dma_cb_dump(struct bcm_dma_cb *cb) { printf("DMA CB "); printf("INFO: %8.8x ", cb->info); printf("SRC: %8.8x ", cb->src); printf("DST: %8.8x ", cb->dst); printf("LEN: %8.8x ", cb->len); printf("\n"); printf("STRIDE: %8.8x ", cb->stride); printf("NEXT: %8.8x ", cb->next); printf("RSVD1: %8.8x ", cb->rsvd1); printf("RSVD2: %8.8x ", cb->rsvd2); printf("\n"); } void bcm_dma_reg_dump(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; int i; uint32_t reg; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return; printf("DMA%d: ", ch); for (i = 0; i < MAX_REG; i++) { reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4); printf("%8.8x ", reg); } printf("\n"); } #endif /* * Start DMA transaction * ch - channel number * src, dst - source and destination address in * ARM physical memory address space. * len - amount of bytes to be transferred * * Returns 0 on success, -1 otherwise */ int bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (-1); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (-1); cb = sc->sc_dma_ch[ch].cb; if (BCM2835_ARM_IS_IO(src)) cb->src = IO_TO_VCBUS(src); else cb->src = PHYS_TO_VCBUS(src); if (BCM2835_ARM_IS_IO(dst)) cb->dst = IO_TO_VCBUS(dst); else cb->dst = PHYS_TO_VCBUS(dst); cb->len = len; bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), sc->sc_dma_ch[ch].vc_cb); bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); #ifdef DEBUG bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); bcm_dma_reg_dump(ch); #endif return (0); } /* * Get length requested for DMA transaction * ch - channel number * * Returns size of transaction, 0 if channel is invalid */ uint32_t bcm_dma_length(int ch) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_cb *cb; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return (0); if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) return (0); cb = sc->sc_dma_ch[ch].cb; return (cb->len); } static void bcm_dma_intr(void *arg) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; uint32_t cs, debug; /* my interrupt? */ cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); if (!(cs & (CS_INT | CS_ERR))) { device_printf(sc->sc_dev, "unexpected DMA intr CH=%d, CS=%x\n", ch->ch, cs); return; } /* running? */ if (!(ch->flags & BCM_DMA_CH_USED)) { device_printf(sc->sc_dev, "unused DMA intr CH=%d, CS=%x\n", ch->ch, cs); return; } if (cs & CS_ERR) { debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); device_printf(sc->sc_dev, "DMA error %d on CH%d\n", debug & DEBUG_ERROR_MASK, ch->ch); bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), debug & DEBUG_ERROR_MASK); bcm_dma_reset(sc->sc_dev, ch->ch); } if (cs & CS_INT) { /* acknowledge interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), CS_INT | CS_END); /* Prepare for possible access to len field */ bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, BUS_DMASYNC_POSTWRITE); /* save callback function and argument */ if (ch->intr_func) ch->intr_func(ch->ch, ch->intr_arg); } } static int bcm_dma_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-dma")) return (ENXIO); device_set_desc(dev, "BCM2835 DMA Controller"); return (BUS_PROBE_DEFAULT); } static int bcm_dma_attach(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); phandle_t node; int rid, err = 0; int i; sc->sc_dev = dev; if (bcm_dma_sc) return (ENXIO); for (i = 0; i < BCM_DMA_CH_MAX; i++) { sc->sc_irq[i] = NULL; sc->sc_intrhand[i] = NULL; } /* Get DMA channel mask. */ node = ofw_bus_get_node(sc->sc_dev); if (OF_getencprop(node, "brcm,dma-channel-mask", &bcm_dma_channel_mask, sizeof(bcm_dma_channel_mask)) == -1 && OF_getencprop(node, "broadcom,channels", &bcm_dma_channel_mask, sizeof(bcm_dma_channel_mask)) == -1) { device_printf(dev, "could not get channel mask property\n"); return (ENXIO); } /* Mask out channels used by GPU. */ bcm_dma_channel_mask &= ~BCM_DMA_CH_GPU_MASK; /* DMA0 - DMA14 */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { if ((bcm_dma_channel_mask & (1 << rid)) == 0) continue; sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_irq[rid] == NULL) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], &sc->sc_intrhand[rid])) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } } mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); bcm_dma_sc = sc; err = bcm_dma_init(dev); if (err) goto fail; return (err); fail: if (sc->sc_mem) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); for (i = 0; i < BCM_DMA_CH_MAX; i++) { if (sc->sc_intrhand[i]) bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); if (sc->sc_irq[i]) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); } return (err); } static device_method_t bcm_dma_methods[] = { DEVMETHOD(device_probe, bcm_dma_probe), DEVMETHOD(device_attach, bcm_dma_attach), { 0, 0 } }; static driver_t bcm_dma_driver = { "bcm_dma", bcm_dma_methods, sizeof(struct bcm_dma_softc), }; static devclass_t bcm_dma_devclass; DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, bcm_dma_devclass, 0, 0); MODULE_VERSION(bcm_dma, 1); Index: head/sys/arm/broadcom/bcm2835/bcm2835_spi.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_spi.c (revision 306901) +++ head/sys/arm/broadcom/bcm2835/bcm2835_spi.c (revision 306902) @@ -1,521 +1,519 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2013 Luiz Otavio O Souza * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" static void bcm_spi_intr(void *); #ifdef BCM_SPI_DEBUG static void bcm_spi_printr(device_t dev) { struct bcm_spi_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = BCM_SPI_READ(sc, SPI_CS); device_printf(dev, "CS=%b\n", reg, "\20\1CS0\2CS1\3CPHA\4CPOL\7CSPOL" "\10TA\11DMAEN\12INTD\13INTR\14ADCS\15REN\16LEN" "\21DONE\22RXD\23TXD\24RXR\25RXF\26CSPOL0\27CSPOL1" "\30CSPOL2\31DMA_LEN\32LEN_LONG"); reg = BCM_SPI_READ(sc, SPI_CLK) & SPI_CLK_MASK; if (reg % 2) reg--; if (reg == 0) reg = 65536; device_printf(dev, "CLK=%uMhz/%d=%luhz\n", SPI_CORE_CLK / 1000000, reg, SPI_CORE_CLK / reg); reg = BCM_SPI_READ(sc, SPI_DLEN) & SPI_DLEN_MASK; device_printf(dev, "DLEN=%d\n", reg); reg = BCM_SPI_READ(sc, SPI_LTOH) & SPI_LTOH_MASK; device_printf(dev, "LTOH=%d\n", reg); reg = BCM_SPI_READ(sc, SPI_DC); device_printf(dev, "DC=RPANIC=%#x RDREQ=%#x TPANIC=%#x TDREQ=%#x\n", (reg & SPI_DC_RPANIC_MASK) >> SPI_DC_RPANIC_SHIFT, (reg & SPI_DC_RDREQ_MASK) >> SPI_DC_RDREQ_SHIFT, (reg & SPI_DC_TPANIC_MASK) >> SPI_DC_TPANIC_SHIFT, (reg & SPI_DC_TDREQ_MASK) >> SPI_DC_TDREQ_SHIFT); } #endif static void bcm_spi_modifyreg(struct bcm_spi_softc *sc, uint32_t off, uint32_t mask, uint32_t value) { uint32_t reg; mtx_assert(&sc->sc_mtx, MA_OWNED); reg = BCM_SPI_READ(sc, off); reg &= ~mask; reg |= value; BCM_SPI_WRITE(sc, off, reg); } static int bcm_spi_clock_proc(SYSCTL_HANDLER_ARGS) { struct bcm_spi_softc *sc; uint32_t clk; int error; sc = (struct bcm_spi_softc *)arg1; BCM_SPI_LOCK(sc); clk = BCM_SPI_READ(sc, SPI_CLK); BCM_SPI_UNLOCK(sc); clk &= 0xffff; if (clk == 0) clk = 65536; clk = SPI_CORE_CLK / clk; error = sysctl_handle_int(oidp, &clk, sizeof(clk), req); if (error != 0 || req->newptr == NULL) return (error); clk = SPI_CORE_CLK / clk; if (clk <= 1) clk = 2; else if (clk % 2) clk--; if (clk > 0xffff) clk = 0; BCM_SPI_LOCK(sc); BCM_SPI_WRITE(sc, SPI_CLK, clk); BCM_SPI_UNLOCK(sc); return (0); } static int bcm_spi_cs_bit_proc(SYSCTL_HANDLER_ARGS, uint32_t bit) { struct bcm_spi_softc *sc; uint32_t reg; int error; sc = (struct bcm_spi_softc *)arg1; BCM_SPI_LOCK(sc); reg = BCM_SPI_READ(sc, SPI_CS); BCM_SPI_UNLOCK(sc); reg = (reg & bit) ? 1 : 0; error = sysctl_handle_int(oidp, ®, sizeof(reg), req); if (error != 0 || req->newptr == NULL) return (error); if (reg) reg = bit; BCM_SPI_LOCK(sc); bcm_spi_modifyreg(sc, SPI_CS, bit, reg); BCM_SPI_UNLOCK(sc); return (0); } static int bcm_spi_cpol_proc(SYSCTL_HANDLER_ARGS) { return (bcm_spi_cs_bit_proc(oidp, arg1, arg2, req, SPI_CS_CPOL)); } static int bcm_spi_cpha_proc(SYSCTL_HANDLER_ARGS) { return (bcm_spi_cs_bit_proc(oidp, arg1, arg2, req, SPI_CS_CPHA)); } static int bcm_spi_cspol0_proc(SYSCTL_HANDLER_ARGS) { return (bcm_spi_cs_bit_proc(oidp, arg1, arg2, req, SPI_CS_CSPOL0)); } static int bcm_spi_cspol1_proc(SYSCTL_HANDLER_ARGS) { return (bcm_spi_cs_bit_proc(oidp, arg1, arg2, req, SPI_CS_CSPOL1)); } static void bcm_spi_sysctl_init(struct bcm_spi_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree_node; struct sysctl_oid_list *tree; /* * Add system sysctl tree/handlers. */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree_node = device_get_sysctl_tree(sc->sc_dev); tree = SYSCTL_CHILDREN(tree_node); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "clock", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), bcm_spi_clock_proc, "IU", "SPI BUS clock frequency"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "cpol", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), bcm_spi_cpol_proc, "IU", "SPI BUS clock polarity"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "cpha", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), bcm_spi_cpha_proc, "IU", "SPI BUS clock phase"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "cspol0", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), bcm_spi_cspol0_proc, "IU", "SPI BUS chip select 0 polarity"); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "cspol1", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), bcm_spi_cspol1_proc, "IU", "SPI BUS chip select 1 polarity"); } static int bcm_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-spi")) return (ENXIO); device_set_desc(dev, "BCM2708/2835 SPI controller"); return (BUS_PROBE_DEFAULT); } static int bcm_spi_attach(device_t dev) { struct bcm_spi_softc *sc; device_t gpio; int i, rid; if (device_get_unit(dev) != 0) { device_printf(dev, "only one SPI controller supported\n"); return (ENXIO); } sc = device_get_softc(dev); sc->sc_dev = dev; /* Configure the GPIO pins to ALT0 function to enable SPI the pins. */ gpio = devclass_get_device(devclass_find("gpio"), 0); if (!gpio) { device_printf(dev, "cannot find gpio0\n"); return (ENXIO); } for (i = 0; i < nitems(bcm_spi_pins); i++) bcm_gpio_set_alternate(gpio, bcm_spi_pins[i], BCM_GPIO_ALT0); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Hook up our interrupt handler. */ if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_spi_intr, sc, &sc->sc_intrhand)) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot setup the interrupt handler\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "bcm_spi", NULL, MTX_DEF); /* Add sysctl nodes. */ bcm_spi_sysctl_init(sc); #ifdef BCM_SPI_DEBUG bcm_spi_printr(dev); #endif /* * Enable the SPI controller. Clear the rx and tx FIFO. * Defaults to SPI mode 0. */ BCM_SPI_WRITE(sc, SPI_CS, SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO); /* Set the SPI clock to 500Khz. */ BCM_SPI_WRITE(sc, SPI_CLK, SPI_CORE_CLK / 500000); #ifdef BCM_SPI_DEBUG bcm_spi_printr(dev); #endif device_add_child(dev, "spibus", -1); return (bus_generic_attach(dev)); } static int bcm_spi_detach(device_t dev) { struct bcm_spi_softc *sc; bus_generic_detach(dev); sc = device_get_softc(dev); mtx_destroy(&sc->sc_mtx); if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (0); } static void bcm_spi_fill_fifo(struct bcm_spi_softc *sc) { struct spi_command *cmd; uint32_t cs, written; uint8_t *data; cmd = sc->sc_cmd; cs = BCM_SPI_READ(sc, SPI_CS) & (SPI_CS_TA | SPI_CS_TXD); while (sc->sc_written < sc->sc_len && cs == (SPI_CS_TA | SPI_CS_TXD)) { data = (uint8_t *)cmd->tx_cmd; written = sc->sc_written++; if (written >= cmd->tx_cmd_sz) { data = (uint8_t *)cmd->tx_data; written -= cmd->tx_cmd_sz; } BCM_SPI_WRITE(sc, SPI_FIFO, data[written]); cs = BCM_SPI_READ(sc, SPI_CS) & (SPI_CS_TA | SPI_CS_TXD); } } static void bcm_spi_drain_fifo(struct bcm_spi_softc *sc) { struct spi_command *cmd; uint32_t cs, read; uint8_t *data; cmd = sc->sc_cmd; cs = BCM_SPI_READ(sc, SPI_CS) & SPI_CS_RXD; while (sc->sc_read < sc->sc_len && cs == SPI_CS_RXD) { data = (uint8_t *)cmd->rx_cmd; read = sc->sc_read++; if (read >= cmd->rx_cmd_sz) { data = (uint8_t *)cmd->rx_data; read -= cmd->rx_cmd_sz; } data[read] = BCM_SPI_READ(sc, SPI_FIFO) & 0xff; cs = BCM_SPI_READ(sc, SPI_CS) & SPI_CS_RXD; } } static void bcm_spi_intr(void *arg) { struct bcm_spi_softc *sc; sc = (struct bcm_spi_softc *)arg; BCM_SPI_LOCK(sc); /* Filter stray interrupts. */ if ((sc->sc_flags & BCM_SPI_BUSY) == 0) { BCM_SPI_UNLOCK(sc); return; } /* TX - Fill up the FIFO. */ bcm_spi_fill_fifo(sc); /* RX - Drain the FIFO. */ bcm_spi_drain_fifo(sc); /* Check for end of transfer. */ if (sc->sc_written == sc->sc_len && sc->sc_read == sc->sc_len) { /* Disable interrupts and the SPI engine. */ bcm_spi_modifyreg(sc, SPI_CS, SPI_CS_TA | SPI_CS_INTR | SPI_CS_INTD, 0); wakeup(sc->sc_dev); } BCM_SPI_UNLOCK(sc); } static int bcm_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct bcm_spi_softc *sc; int cs, err; sc = device_get_softc(dev); KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); /* Get the proper chip select for this child. */ spibus_get_cs(child, &cs); if (cs < 0 || cs > 2) { device_printf(dev, "Invalid chip select %d requested by %s\n", cs, device_get_nameunit(child)); return (EINVAL); } BCM_SPI_LOCK(sc); /* If the controller is in use wait until it is available. */ while (sc->sc_flags & BCM_SPI_BUSY) mtx_sleep(dev, &sc->sc_mtx, 0, "bcm_spi", 0); /* Now we have control over SPI controller. */ sc->sc_flags = BCM_SPI_BUSY; /* Clear the FIFO. */ bcm_spi_modifyreg(sc, SPI_CS, SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO, SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO); /* Save a pointer to the SPI command. */ sc->sc_cmd = cmd; sc->sc_read = 0; sc->sc_written = 0; sc->sc_len = cmd->tx_cmd_sz + cmd->tx_data_sz; /* * Set the CS for this transaction, enable interrupts and announce * we're ready to tx. This will kick off the first interrupt. */ bcm_spi_modifyreg(sc, SPI_CS, SPI_CS_MASK | SPI_CS_TA | SPI_CS_INTR | SPI_CS_INTD, cs | SPI_CS_TA | SPI_CS_INTR | SPI_CS_INTD); /* Wait for the transaction to complete. */ err = mtx_sleep(dev, &sc->sc_mtx, 0, "bcm_spi", hz * 2); /* Make sure the SPI engine and interrupts are disabled. */ bcm_spi_modifyreg(sc, SPI_CS, SPI_CS_TA | SPI_CS_INTR | SPI_CS_INTD, 0); /* Release the controller and wakeup the next thread waiting for it. */ sc->sc_flags = 0; wakeup_one(dev); BCM_SPI_UNLOCK(sc); /* * Check for transfer timeout. The SPI controller doesn't * return errors. */ if (err == EWOULDBLOCK) { device_printf(sc->sc_dev, "SPI error\n"); err = EIO; } return (err); } static phandle_t bcm_spi_get_node(device_t bus, device_t dev) { /* We only have one child, the SPI bus, which needs our own node. */ return (ofw_bus_get_node(bus)); } static device_method_t bcm_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcm_spi_probe), DEVMETHOD(device_attach, bcm_spi_attach), DEVMETHOD(device_detach, bcm_spi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, bcm_spi_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, bcm_spi_get_node), DEVMETHOD_END }; static devclass_t bcm_spi_devclass; static driver_t bcm_spi_driver = { "spi", bcm_spi_methods, sizeof(struct bcm_spi_softc), }; DRIVER_MODULE(bcm2835_spi, simplebus, bcm_spi_driver, bcm_spi_devclass, 0, 0); Index: head/sys/arm/broadcom/bcm2835/bcm2835_wdog.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_wdog.c (revision 306901) +++ head/sys/arm/broadcom/bcm2835/bcm2835_wdog.c (revision 306902) @@ -1,208 +1,207 @@ /*- * Copyright (c) 2012 Alexander Rybalko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #define BCM2835_PASWORD 0x5a #define BCM2835_WDOG_RESET 0 #define BCM2835_PASSWORD_MASK 0xff000000 #define BCM2835_PASSWORD_SHIFT 24 #define BCM2835_WDOG_TIME_MASK 0x000fffff #define BCM2835_WDOG_TIME_SHIFT 0 #define READ(_sc, _r) bus_space_read_4((_sc)->bst, (_sc)->bsh, (_r)) #define WRITE(_sc, _r, _v) bus_space_write_4((_sc)->bst, (_sc)->bsh, (_r), (_v)) #define BCM2835_RSTC_WRCFG_CLR 0xffffffcf #define BCM2835_RSTC_WRCFG_SET 0x00000030 #define BCM2835_RSTC_WRCFG_FULL_RESET 0x00000020 #define BCM2835_RSTC_RESET 0x00000102 #define BCM2835_RSTC_REG 0x00 #define BCM2835_RSTS_REG 0x04 #define BCM2835_WDOG_REG 0x08 static struct bcmwd_softc *bcmwd_lsc = NULL; struct bcmwd_softc { device_t dev; struct resource * res; bus_space_tag_t bst; bus_space_handle_t bsh; int wdog_armed; int wdog_period; char wdog_passwd; struct mtx mtx; }; static void bcmwd_watchdog_fn(void *private, u_int cmd, int *error); static int bcmwd_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "broadcom,bcm2835-wdt")) { device_set_desc(dev, "BCM2708/2835 Watchdog"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int bcmwd_attach(device_t dev) { struct bcmwd_softc *sc; int rid; if (bcmwd_lsc != NULL) return (ENXIO); sc = device_get_softc(dev); sc->wdog_period = 7; sc->wdog_passwd = BCM2835_PASWORD; sc->wdog_armed = 0; sc->dev = dev; rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); bcmwd_lsc = sc; mtx_init(&sc->mtx, "BCM2835 Watchdog", "bcmwd", MTX_DEF); EVENTHANDLER_REGISTER(watchdog_list, bcmwd_watchdog_fn, sc, 0); return (0); } static void bcmwd_watchdog_fn(void *private, u_int cmd, int *error) { struct bcmwd_softc *sc; uint64_t sec; uint32_t ticks, reg; sc = private; mtx_lock(&sc->mtx); cmd &= WD_INTERVAL; if (cmd > 0) { sec = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000000; if (sec == 0 || sec > 15) { /* * Can't arm * disable watchdog as watchdog(9) requires */ device_printf(sc->dev, "Can't arm, timeout must be between 1-15 seconds\n"); WRITE(sc, BCM2835_RSTC_REG, (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT) | BCM2835_RSTC_RESET); mtx_unlock(&sc->mtx); return; } ticks = (sec << 16) & BCM2835_WDOG_TIME_MASK; reg = (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT) | ticks; WRITE(sc, BCM2835_WDOG_REG, reg); reg = READ(sc, BCM2835_RSTC_REG); reg &= BCM2835_RSTC_WRCFG_CLR; reg |= BCM2835_RSTC_WRCFG_FULL_RESET; reg |= (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT); WRITE(sc, BCM2835_RSTC_REG, reg); *error = 0; } else WRITE(sc, BCM2835_RSTC_REG, (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT) | BCM2835_RSTC_RESET); mtx_unlock(&sc->mtx); } void bcmwd_watchdog_reset() { if (bcmwd_lsc == NULL) return; WRITE(bcmwd_lsc, BCM2835_WDOG_REG, (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT) | 10); WRITE(bcmwd_lsc, BCM2835_RSTC_REG, (READ(bcmwd_lsc, BCM2835_RSTC_REG) & BCM2835_RSTC_WRCFG_CLR) | (BCM2835_PASWORD << BCM2835_PASSWORD_SHIFT) | BCM2835_RSTC_WRCFG_FULL_RESET); } static device_method_t bcmwd_methods[] = { DEVMETHOD(device_probe, bcmwd_probe), DEVMETHOD(device_attach, bcmwd_attach), DEVMETHOD_END }; static driver_t bcmwd_driver = { "bcmwd", bcmwd_methods, sizeof(struct bcmwd_softc), }; static devclass_t bcmwd_devclass; DRIVER_MODULE(bcmwd, simplebus, bcmwd_driver, bcmwd_devclass, 0, 0); Index: head/sys/arm/freescale/imx/imx51_ipuv3.c =================================================================== --- head/sys/arm/freescale/imx/imx51_ipuv3.c (revision 306901) +++ head/sys/arm/freescale/imx/imx51_ipuv3.c (revision 306902) @@ -1,900 +1,898 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012, 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #define IMX51_IPU_HSP_CLOCK 665000000 #define IPU3FB_FONT_HEIGHT 16 struct ipu3sc_softc { device_t dev; bus_addr_t pbase; bus_addr_t vbase; bus_space_tag_t iot; bus_space_handle_t ioh; bus_space_handle_t cm_ioh; bus_space_handle_t dp_ioh; bus_space_handle_t di0_ioh; bus_space_handle_t di1_ioh; bus_space_handle_t dctmpl_ioh; bus_space_handle_t dc_ioh; bus_space_handle_t dmfc_ioh; bus_space_handle_t idmac_ioh; bus_space_handle_t cpmem_ioh; }; struct video_adapter_softc { /* Videoadpater part */ video_adapter_t va; intptr_t fb_addr; intptr_t fb_paddr; unsigned int fb_size; int bpp; int depth; unsigned int height; unsigned int width; unsigned int stride; unsigned int xmargin; unsigned int ymargin; unsigned char *font; int initialized; }; static struct ipu3sc_softc *ipu3sc_softc; static struct video_adapter_softc va_softc; /* FIXME: not only 2 bytes color supported */ static uint16_t colors[16] = { 0x0000, /* black */ 0x001f, /* blue */ 0x07e0, /* green */ 0x07ff, /* cyan */ 0xf800, /* red */ 0xf81f, /* magenta */ 0x3800, /* brown */ 0xc618, /* light grey */ 0xc618, /* XXX: dark grey */ 0x001f, /* XXX: light blue */ 0x07e0, /* XXX: light green */ 0x07ff, /* XXX: light cyan */ 0xf800, /* XXX: light red */ 0xf81f, /* XXX: light magenta */ 0xffe0, /* yellow */ 0xffff, /* white */ }; static uint32_t colors_24[16] = { 0x000000,/* Black */ 0x000080,/* Blue */ 0x008000,/* Green */ 0x008080,/* Cyan */ 0x800000,/* Red */ 0x800080,/* Magenta */ 0xcc6600,/* brown */ 0xC0C0C0,/* Silver */ 0x808080,/* Gray */ 0x0000FF,/* Light Blue */ 0x00FF00,/* Light Green */ 0x00FFFF,/* Light Cyan */ 0xFF0000,/* Light Red */ 0xFF00FF,/* Light Magenta */ 0xFFFF00,/* Yellow */ 0xFFFFFF,/* White */ }; #define IPUV3_READ(ipuv3, module, reg) \ bus_space_read_4((ipuv3)->iot, (ipuv3)->module##_ioh, (reg)) #define IPUV3_WRITE(ipuv3, module, reg, val) \ bus_space_write_4((ipuv3)->iot, (ipuv3)->module##_ioh, (reg), (val)) #define CPMEM_CHANNEL_OFFSET(_c) ((_c) * 0x40) #define CPMEM_WORD_OFFSET(_w) ((_w) * 0x20) #define CPMEM_DP_OFFSET(_d) ((_d) * 0x10000) #define IMX_IPU_DP0 0 #define IMX_IPU_DP1 1 #define CPMEM_CHANNEL(_dp, _ch, _w) \ (CPMEM_DP_OFFSET(_dp) + CPMEM_CHANNEL_OFFSET(_ch) + \ CPMEM_WORD_OFFSET(_w)) #define CPMEM_OFFSET(_dp, _ch, _w, _o) \ (CPMEM_CHANNEL((_dp), (_ch), (_w)) + (_o)) #define IPUV3_DEBUG 100 #ifdef IPUV3_DEBUG #define SUBMOD_DUMP_REG(_sc, _m, _l) \ { \ int i; \ printf("*** " #_m " ***\n"); \ for (i = 0; i <= (_l); i += 4) { \ if ((i % 32) == 0) \ printf("%04x: ", i & 0xffff); \ printf("0x%08x%c", IPUV3_READ((_sc), _m, i), \ ((i + 4) % 32)?' ':'\n'); \ } \ printf("\n"); \ } #endif #ifdef IPUV3_DEBUG int ipuv3_debug = IPUV3_DEBUG; #define DPRINTFN(n,x) if (ipuv3_debug>(n)) printf x; else #else #define DPRINTFN(n,x) #endif static int ipu3_fb_probe(device_t); static int ipu3_fb_attach(device_t); static int ipu3_fb_malloc(struct ipu3sc_softc *sc, size_t size) { sc->vbase = (uint32_t)contigmalloc(size, M_DEVBUF, M_ZERO, 0, ~0, PAGE_SIZE, 0); sc->pbase = vtophys(sc->vbase); return (0); } static void ipu3_fb_init(void *arg) { struct ipu3sc_softc *sc = arg; struct video_adapter_softc *va_sc = &va_softc; uint64_t w0sh96; uint32_t w1sh96; /* FW W0[137:125] - 96 = [41:29] */ /* FH W0[149:138] - 96 = [53:42] */ w0sh96 = IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 0, 16)); w0sh96 <<= 32; w0sh96 |= IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 0, 12)); va_sc->width = ((w0sh96 >> 29) & 0x1fff) + 1; va_sc->height = ((w0sh96 >> 42) & 0x0fff) + 1; /* SLY W1[115:102] - 96 = [19:6] */ w1sh96 = IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 12)); va_sc->stride = ((w1sh96 >> 6) & 0x3fff) + 1; printf("%dx%d [%d]\n", va_sc->width, va_sc->height, va_sc->stride); va_sc->fb_size = va_sc->height * va_sc->stride; ipu3_fb_malloc(sc, va_sc->fb_size); /* DP1 + config_ch_23 + word_2 */ IPUV3_WRITE(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 0), ((sc->pbase >> 3) | ((sc->pbase >> 3) << 29)) & 0xffffffff); IPUV3_WRITE(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 4), ((sc->pbase >> 3) >> 3) & 0xffffffff); va_sc->fb_addr = (intptr_t)sc->vbase; va_sc->fb_paddr = (intptr_t)sc->pbase; va_sc->bpp = va_sc->stride / va_sc->width; va_sc->depth = va_sc->bpp * 8; } static int ipu3_fb_probe(device_t dev) { int error; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,ipu3")) return (ENXIO); device_set_desc(dev, "i.MX5x Image Processing Unit v3 (FB)"); error = sc_probe_unit(device_get_unit(dev), device_get_flags(dev) | SC_AUTODETECT_KBD); if (error != 0) return (error); return (BUS_PROBE_DEFAULT); } static int ipu3_fb_attach(device_t dev) { struct ipu3sc_softc *sc = device_get_softc(dev); bus_space_tag_t iot; bus_space_handle_t ioh; phandle_t node; pcell_t reg; int err; uintptr_t base; if (ipu3sc_softc) return (ENXIO); ipu3sc_softc = sc; if (bootverbose) device_printf(dev, "clock gate status is %d\n", imx51_get_clk_gating(IMX51CLK_IPU_HSP_CLK_ROOT)); sc->dev = dev; err = (sc_attach_unit(device_get_unit(dev), device_get_flags(dev) | SC_AUTODETECT_KBD)); if (err) { device_printf(dev, "failed to attach syscons\n"); goto fail; } sc = device_get_softc(dev); sc->iot = iot = fdtbus_bs_tag; /* * Retrieve the device address based on the start address in the * DTS. The DTS for i.MX51 specifies 0x5e000000 as the first register * address, so we just subtract IPU_CM_BASE to get the offset at which * the IPU device was memory mapped. * On i.MX53, the offset is 0. */ node = ofw_bus_get_node(dev); if ((OF_getprop(node, "reg", ®, sizeof(reg))) <= 0) base = 0; else base = fdt32_to_cpu(reg) - IPU_CM_BASE(0); /* map controller registers */ err = bus_space_map(iot, IPU_CM_BASE(base), IPU_CM_SIZE, 0, &ioh); if (err) goto fail_retarn_cm; sc->cm_ioh = ioh; /* map Display Multi FIFO Controller registers */ err = bus_space_map(iot, IPU_DMFC_BASE(base), IPU_DMFC_SIZE, 0, &ioh); if (err) goto fail_retarn_dmfc; sc->dmfc_ioh = ioh; /* map Display Interface 0 registers */ err = bus_space_map(iot, IPU_DI0_BASE(base), IPU_DI0_SIZE, 0, &ioh); if (err) goto fail_retarn_di0; sc->di0_ioh = ioh; /* map Display Interface 1 registers */ err = bus_space_map(iot, IPU_DI1_BASE(base), IPU_DI0_SIZE, 0, &ioh); if (err) goto fail_retarn_di1; sc->di1_ioh = ioh; /* map Display Processor registers */ err = bus_space_map(iot, IPU_DP_BASE(base), IPU_DP_SIZE, 0, &ioh); if (err) goto fail_retarn_dp; sc->dp_ioh = ioh; /* map Display Controller registers */ err = bus_space_map(iot, IPU_DC_BASE(base), IPU_DC_SIZE, 0, &ioh); if (err) goto fail_retarn_dc; sc->dc_ioh = ioh; /* map Image DMA Controller registers */ err = bus_space_map(iot, IPU_IDMAC_BASE(base), IPU_IDMAC_SIZE, 0, &ioh); if (err) goto fail_retarn_idmac; sc->idmac_ioh = ioh; /* map CPMEM registers */ err = bus_space_map(iot, IPU_CPMEM_BASE(base), IPU_CPMEM_SIZE, 0, &ioh); if (err) goto fail_retarn_cpmem; sc->cpmem_ioh = ioh; /* map DCTEMPL registers */ err = bus_space_map(iot, IPU_DCTMPL_BASE(base), IPU_DCTMPL_SIZE, 0, &ioh); if (err) goto fail_retarn_dctmpl; sc->dctmpl_ioh = ioh; #ifdef notyet sc->ih = imx51_ipuv3_intr_establish(IMX51_INT_IPUV3, IPL_BIO, ipuv3intr, sc); if (sc->ih == NULL) { device_printf(sc->dev, "unable to establish interrupt at irq %d\n", IMX51_INT_IPUV3); return (ENXIO); } #endif /* * We have to wait until interrupts are enabled. * Mailbox relies on it to get data from VideoCore */ ipu3_fb_init(sc); return (0); fail: return (ENXIO); fail_retarn_dctmpl: bus_space_unmap(sc->iot, sc->cpmem_ioh, IPU_CPMEM_SIZE); fail_retarn_cpmem: bus_space_unmap(sc->iot, sc->idmac_ioh, IPU_IDMAC_SIZE); fail_retarn_idmac: bus_space_unmap(sc->iot, sc->dc_ioh, IPU_DC_SIZE); fail_retarn_dp: bus_space_unmap(sc->iot, sc->dp_ioh, IPU_DP_SIZE); fail_retarn_dc: bus_space_unmap(sc->iot, sc->di1_ioh, IPU_DI1_SIZE); fail_retarn_di1: bus_space_unmap(sc->iot, sc->di0_ioh, IPU_DI0_SIZE); fail_retarn_di0: bus_space_unmap(sc->iot, sc->dmfc_ioh, IPU_DMFC_SIZE); fail_retarn_dmfc: bus_space_unmap(sc->iot, sc->dc_ioh, IPU_CM_SIZE); fail_retarn_cm: device_printf(sc->dev, "failed to map registers (errno=%d)\n", err); return (err); } static device_method_t ipu3_fb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ipu3_fb_probe), DEVMETHOD(device_attach, ipu3_fb_attach), { 0, 0 } }; static devclass_t ipu3_fb_devclass; static driver_t ipu3_fb_driver = { "fb", ipu3_fb_methods, sizeof(struct ipu3sc_softc), }; DRIVER_MODULE(ipu3fb, simplebus, ipu3_fb_driver, ipu3_fb_devclass, 0, 0); /* * Video driver routines and glue. */ static int ipu3fb_configure(int); static vi_probe_t ipu3fb_probe; static vi_init_t ipu3fb_init; static vi_get_info_t ipu3fb_get_info; static vi_query_mode_t ipu3fb_query_mode; static vi_set_mode_t ipu3fb_set_mode; static vi_save_font_t ipu3fb_save_font; static vi_load_font_t ipu3fb_load_font; static vi_show_font_t ipu3fb_show_font; static vi_save_palette_t ipu3fb_save_palette; static vi_load_palette_t ipu3fb_load_palette; static vi_set_border_t ipu3fb_set_border; static vi_save_state_t ipu3fb_save_state; static vi_load_state_t ipu3fb_load_state; static vi_set_win_org_t ipu3fb_set_win_org; static vi_read_hw_cursor_t ipu3fb_read_hw_cursor; static vi_set_hw_cursor_t ipu3fb_set_hw_cursor; static vi_set_hw_cursor_shape_t ipu3fb_set_hw_cursor_shape; static vi_blank_display_t ipu3fb_blank_display; static vi_mmap_t ipu3fb_mmap; static vi_ioctl_t ipu3fb_ioctl; static vi_clear_t ipu3fb_clear; static vi_fill_rect_t ipu3fb_fill_rect; static vi_bitblt_t ipu3fb_bitblt; static vi_diag_t ipu3fb_diag; static vi_save_cursor_palette_t ipu3fb_save_cursor_palette; static vi_load_cursor_palette_t ipu3fb_load_cursor_palette; static vi_copy_t ipu3fb_copy; static vi_putp_t ipu3fb_putp; static vi_putc_t ipu3fb_putc; static vi_puts_t ipu3fb_puts; static vi_putm_t ipu3fb_putm; static video_switch_t ipu3fbvidsw = { .probe = ipu3fb_probe, .init = ipu3fb_init, .get_info = ipu3fb_get_info, .query_mode = ipu3fb_query_mode, .set_mode = ipu3fb_set_mode, .save_font = ipu3fb_save_font, .load_font = ipu3fb_load_font, .show_font = ipu3fb_show_font, .save_palette = ipu3fb_save_palette, .load_palette = ipu3fb_load_palette, .set_border = ipu3fb_set_border, .save_state = ipu3fb_save_state, .load_state = ipu3fb_load_state, .set_win_org = ipu3fb_set_win_org, .read_hw_cursor = ipu3fb_read_hw_cursor, .set_hw_cursor = ipu3fb_set_hw_cursor, .set_hw_cursor_shape = ipu3fb_set_hw_cursor_shape, .blank_display = ipu3fb_blank_display, .mmap = ipu3fb_mmap, .ioctl = ipu3fb_ioctl, .clear = ipu3fb_clear, .fill_rect = ipu3fb_fill_rect, .bitblt = ipu3fb_bitblt, .diag = ipu3fb_diag, .save_cursor_palette = ipu3fb_save_cursor_palette, .load_cursor_palette = ipu3fb_load_cursor_palette, .copy = ipu3fb_copy, .putp = ipu3fb_putp, .putc = ipu3fb_putc, .puts = ipu3fb_puts, .putm = ipu3fb_putm, }; VIDEO_DRIVER(ipu3fb, ipu3fbvidsw, ipu3fb_configure); extern sc_rndr_sw_t txtrndrsw; RENDERER(ipu3fb, 0, txtrndrsw, gfb_set); RENDERER_MODULE(ipu3fb, gfb_set); static uint16_t ipu3fb_static_window[ROW*COL]; extern u_char dflt_font_16[]; static int ipu3fb_configure(int flags) { struct video_adapter_softc *sc; sc = &va_softc; if (sc->initialized) return 0; sc->width = 640; sc->height = 480; sc->bpp = 2; sc->stride = sc->width * sc->bpp; ipu3fb_init(0, &sc->va, 0); sc->initialized = 1; return (0); } static int ipu3fb_probe(int unit, video_adapter_t **adp, void *arg, int flags) { return (0); } static int ipu3fb_init(int unit, video_adapter_t *adp, int flags) { struct video_adapter_softc *sc; video_info_t *vi; sc = (struct video_adapter_softc *)adp; vi = &adp->va_info; vid_init_struct(adp, "ipu3fb", -1, unit); sc->font = dflt_font_16; vi->vi_cheight = IPU3FB_FONT_HEIGHT; vi->vi_cwidth = 8; vi->vi_width = sc->width/8; vi->vi_height = sc->height/vi->vi_cheight; /* * Clamp width/height to syscons maximums */ if (vi->vi_width > COL) vi->vi_width = COL; if (vi->vi_height > ROW) vi->vi_height = ROW; sc->xmargin = (sc->width - (vi->vi_width * vi->vi_cwidth)) / 2; sc->ymargin = (sc->height - (vi->vi_height * vi->vi_cheight))/2; adp->va_window = (vm_offset_t) ipu3fb_static_window; adp->va_flags |= V_ADP_FONT /* | V_ADP_COLOR | V_ADP_MODECHANGE */; adp->va_line_width = sc->stride; adp->va_buffer_size = sc->fb_size; vid_register(&sc->va); return (0); } static int ipu3fb_get_info(video_adapter_t *adp, int mode, video_info_t *info) { bcopy(&adp->va_info, info, sizeof(*info)); return (0); } static int ipu3fb_query_mode(video_adapter_t *adp, video_info_t *info) { return (0); } static int ipu3fb_set_mode(video_adapter_t *adp, int mode) { return (0); } static int ipu3fb_save_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (0); } static int ipu3fb_load_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { struct video_adapter_softc *sc; sc = (struct video_adapter_softc *)adp; sc->font = data; return (0); } static int ipu3fb_show_font(video_adapter_t *adp, int page) { return (0); } static int ipu3fb_save_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int ipu3fb_load_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int ipu3fb_set_border(video_adapter_t *adp, int border) { return (ipu3fb_blank_display(adp, border)); } static int ipu3fb_save_state(video_adapter_t *adp, void *p, size_t size) { return (0); } static int ipu3fb_load_state(video_adapter_t *adp, void *p) { return (0); } static int ipu3fb_set_win_org(video_adapter_t *adp, off_t offset) { return (0); } static int ipu3fb_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { *col = *row = 0; return (0); } static int ipu3fb_set_hw_cursor(video_adapter_t *adp, int col, int row) { return (0); } static int ipu3fb_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { return (0); } static int ipu3fb_blank_display(video_adapter_t *adp, int mode) { return (0); } static int ipu3fb_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { struct video_adapter_softc *sc; sc = (struct video_adapter_softc *)adp; /* * This might be a legacy VGA mem request: if so, just point it at the * framebuffer, since it shouldn't be touched */ if (offset < sc->stride * sc->height) { *paddr = sc->fb_paddr + offset; return (0); } return (EINVAL); } static int ipu3fb_ioctl(video_adapter_t *adp, u_long cmd, caddr_t data) { struct video_adapter_softc *sc; struct fbtype *fb; sc = (struct video_adapter_softc *)adp; switch (cmd) { case FBIOGTYPE: fb = (struct fbtype *)data; fb->fb_type = FBTYPE_PCIMISC; fb->fb_height = sc->height; fb->fb_width = sc->width; fb->fb_depth = sc->depth; if (sc->depth <= 1 || sc->depth > 8) fb->fb_cmsize = 0; else fb->fb_cmsize = 1 << sc->depth; fb->fb_size = sc->fb_size; break; case FBIOSCURSOR: return (ENODEV); default: return (fb_commonioctl(adp, cmd, data)); } return (0); } static int ipu3fb_clear(video_adapter_t *adp) { return (ipu3fb_blank_display(adp, 0)); } static int ipu3fb_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { return (0); } static int ipu3fb_bitblt(video_adapter_t *adp, ...) { return (0); } static int ipu3fb_diag(video_adapter_t *adp, int level) { return (0); } static int ipu3fb_save_cursor_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int ipu3fb_load_cursor_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int ipu3fb_copy(video_adapter_t *adp, vm_offset_t src, vm_offset_t dst, int n) { return (0); } static int ipu3fb_putp(video_adapter_t *adp, vm_offset_t off, uint32_t p, uint32_t a, int size, int bpp, int bit_ltor, int byte_ltor) { return (0); } static int ipu3fb_putc(video_adapter_t *adp, vm_offset_t off, uint8_t c, uint8_t a) { struct video_adapter_softc *sc; int col, row, bpp; int b, i, j, k; uint8_t *addr; u_char *p; uint32_t fg, bg, color; sc = (struct video_adapter_softc *)adp; bpp = sc->bpp; if (sc->fb_addr == 0) return (0); row = (off / adp->va_info.vi_width) * adp->va_info.vi_cheight; col = (off % adp->va_info.vi_width) * adp->va_info.vi_cwidth; p = sc->font + c * IPU3FB_FONT_HEIGHT; addr = (uint8_t *)sc->fb_addr + (row + sc->ymargin) * (sc->stride) + bpp * (col + sc->xmargin); if (bpp == 2) { bg = colors[(a >> 4) & 0x0f]; fg = colors[a & 0x0f]; } else if (bpp == 3) { bg = colors_24[(a >> 4) & 0x0f]; fg = colors_24[a & 0x0f]; } else { return (ENXIO); } for (i = 0; i < IPU3FB_FONT_HEIGHT; i++) { for (j = 0, k = 7; j < 8; j++, k--) { if ((p[i] & (1 << k)) == 0) color = bg; else color = fg; /* FIXME: BPP maybe different */ for (b = 0; b < bpp; b ++) addr[bpp * j + b] = (color >> (b << 3)) & 0xff; } addr += (sc->stride); } return (0); } static int ipu3fb_puts(video_adapter_t *adp, vm_offset_t off, u_int16_t *s, int len) { int i; for (i = 0; i < len; i++) ipu3fb_putc(adp, off + i, s[i] & 0xff, (s[i] & 0xff00) >> 8); return (0); } static int ipu3fb_putm(video_adapter_t *adp, int x, int y, uint8_t *pixel_image, uint32_t pixel_mask, int size, int width) { return (0); } /* * Define a stub keyboard driver in case one hasn't been * compiled into the kernel */ #include #include static int dummy_kbd_configure(int flags); keyboard_switch_t ipu3dummysw; static int dummy_kbd_configure(int flags) { return (0); } KEYBOARD_DRIVER(ipu3dummy, ipu3dummysw, dummy_kbd_configure); Index: head/sys/arm/freescale/imx/imx51_ipuv3_fbd.c =================================================================== --- head/sys/arm/freescale/imx/imx51_ipuv3_fbd.c (revision 306901) +++ head/sys/arm/freescale/imx/imx51_ipuv3_fbd.c (revision 306902) @@ -1,365 +1,363 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012, 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #define IMX51_IPU_HSP_CLOCK 665000000 struct ipu3sc_softc { device_t dev; device_t sc_fbd; /* fbd child */ struct fb_info sc_info; bus_space_tag_t iot; bus_space_handle_t ioh; bus_space_handle_t cm_ioh; bus_space_handle_t dp_ioh; bus_space_handle_t di0_ioh; bus_space_handle_t di1_ioh; bus_space_handle_t dctmpl_ioh; bus_space_handle_t dc_ioh; bus_space_handle_t dmfc_ioh; bus_space_handle_t idmac_ioh; bus_space_handle_t cpmem_ioh; }; static struct ipu3sc_softc *ipu3sc_softc; #define IPUV3_READ(ipuv3, module, reg) \ bus_space_read_4((ipuv3)->iot, (ipuv3)->module##_ioh, (reg)) #define IPUV3_WRITE(ipuv3, module, reg, val) \ bus_space_write_4((ipuv3)->iot, (ipuv3)->module##_ioh, (reg), (val)) #define CPMEM_CHANNEL_OFFSET(_c) ((_c) * 0x40) #define CPMEM_WORD_OFFSET(_w) ((_w) * 0x20) #define CPMEM_DP_OFFSET(_d) ((_d) * 0x10000) #define IMX_IPU_DP0 0 #define IMX_IPU_DP1 1 #define CPMEM_CHANNEL(_dp, _ch, _w) \ (CPMEM_DP_OFFSET(_dp) + CPMEM_CHANNEL_OFFSET(_ch) + \ CPMEM_WORD_OFFSET(_w)) #define CPMEM_OFFSET(_dp, _ch, _w, _o) \ (CPMEM_CHANNEL((_dp), (_ch), (_w)) + (_o)) static int ipu3_fb_probe(device_t); static int ipu3_fb_attach(device_t); static void ipu3_fb_init(struct ipu3sc_softc *sc) { uint64_t w0sh96; uint32_t w1sh96; /* FW W0[137:125] - 96 = [41:29] */ /* FH W0[149:138] - 96 = [53:42] */ w0sh96 = IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 0, 16)); w0sh96 <<= 32; w0sh96 |= IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 0, 12)); sc->sc_info.fb_width = ((w0sh96 >> 29) & 0x1fff) + 1; sc->sc_info.fb_height = ((w0sh96 >> 42) & 0x0fff) + 1; /* SLY W1[115:102] - 96 = [19:6] */ w1sh96 = IPUV3_READ(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 12)); sc->sc_info.fb_stride = ((w1sh96 >> 6) & 0x3fff) + 1; printf("%dx%d [%d]\n", sc->sc_info.fb_width, sc->sc_info.fb_height, sc->sc_info.fb_stride); sc->sc_info.fb_size = sc->sc_info.fb_height * sc->sc_info.fb_stride; sc->sc_info.fb_vbase = (intptr_t)contigmalloc(sc->sc_info.fb_size, M_DEVBUF, M_ZERO, 0, ~0, PAGE_SIZE, 0); sc->sc_info.fb_pbase = (intptr_t)vtophys(sc->sc_info.fb_vbase); /* DP1 + config_ch_23 + word_2 */ IPUV3_WRITE(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 0), (((uint32_t)sc->sc_info.fb_pbase >> 3) | (((uint32_t)sc->sc_info.fb_pbase >> 3) << 29)) & 0xffffffff); IPUV3_WRITE(sc, cpmem, CPMEM_OFFSET(IMX_IPU_DP1, 23, 1, 4), (((uint32_t)sc->sc_info.fb_pbase >> 3) >> 3) & 0xffffffff); /* XXX: fetch or set it from/to IPU. */ sc->sc_info.fb_bpp = sc->sc_info.fb_depth = sc->sc_info.fb_stride / sc->sc_info.fb_width * 8; } /* Use own color map, because of different RGB offset. */ static int ipu3_fb_init_cmap(uint32_t *cmap, int bytespp) { switch (bytespp) { case 8: return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB, 0x7, 5, 0x7, 2, 0x3, 0)); case 15: return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB, 0x1f, 10, 0x1f, 5, 0x1f, 0)); case 16: return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB, 0x1f, 11, 0x3f, 5, 0x1f, 0)); case 24: case 32: /* Ignore alpha. */ return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB, 0xff, 0, 0xff, 8, 0xff, 16)); default: return (1); } } static int ipu3_fb_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,ipu3")) return (ENXIO); device_set_desc(dev, "i.MX5x Image Processing Unit v3 (FB)"); return (BUS_PROBE_DEFAULT); } static int ipu3_fb_attach(device_t dev) { struct ipu3sc_softc *sc = device_get_softc(dev); bus_space_tag_t iot; bus_space_handle_t ioh; phandle_t node; pcell_t reg; int err; uintptr_t base; ipu3sc_softc = sc; if (bootverbose) device_printf(dev, "clock gate status is %d\n", imx51_get_clk_gating(IMX51CLK_IPU_HSP_CLK_ROOT)); sc->dev = dev; sc = device_get_softc(dev); sc->iot = iot = fdtbus_bs_tag; /* * Retrieve the device address based on the start address in the * DTS. The DTS for i.MX51 specifies 0x5e000000 as the first register * address, so we just subtract IPU_CM_BASE to get the offset at which * the IPU device was memory mapped. * On i.MX53, the offset is 0. */ node = ofw_bus_get_node(dev); if ((OF_getprop(node, "reg", ®, sizeof(reg))) <= 0) base = 0; else base = fdt32_to_cpu(reg) - IPU_CM_BASE(0); /* map controller registers */ err = bus_space_map(iot, IPU_CM_BASE(base), IPU_CM_SIZE, 0, &ioh); if (err) goto fail_retarn_cm; sc->cm_ioh = ioh; /* map Display Multi FIFO Controller registers */ err = bus_space_map(iot, IPU_DMFC_BASE(base), IPU_DMFC_SIZE, 0, &ioh); if (err) goto fail_retarn_dmfc; sc->dmfc_ioh = ioh; /* map Display Interface 0 registers */ err = bus_space_map(iot, IPU_DI0_BASE(base), IPU_DI0_SIZE, 0, &ioh); if (err) goto fail_retarn_di0; sc->di0_ioh = ioh; /* map Display Interface 1 registers */ err = bus_space_map(iot, IPU_DI1_BASE(base), IPU_DI0_SIZE, 0, &ioh); if (err) goto fail_retarn_di1; sc->di1_ioh = ioh; /* map Display Processor registers */ err = bus_space_map(iot, IPU_DP_BASE(base), IPU_DP_SIZE, 0, &ioh); if (err) goto fail_retarn_dp; sc->dp_ioh = ioh; /* map Display Controller registers */ err = bus_space_map(iot, IPU_DC_BASE(base), IPU_DC_SIZE, 0, &ioh); if (err) goto fail_retarn_dc; sc->dc_ioh = ioh; /* map Image DMA Controller registers */ err = bus_space_map(iot, IPU_IDMAC_BASE(base), IPU_IDMAC_SIZE, 0, &ioh); if (err) goto fail_retarn_idmac; sc->idmac_ioh = ioh; /* map CPMEM registers */ err = bus_space_map(iot, IPU_CPMEM_BASE(base), IPU_CPMEM_SIZE, 0, &ioh); if (err) goto fail_retarn_cpmem; sc->cpmem_ioh = ioh; /* map DCTEMPL registers */ err = bus_space_map(iot, IPU_DCTMPL_BASE(base), IPU_DCTMPL_SIZE, 0, &ioh); if (err) goto fail_retarn_dctmpl; sc->dctmpl_ioh = ioh; #ifdef notyet sc->ih = imx51_ipuv3_intr_establish(IMX51_INT_IPUV3, IPL_BIO, ipuv3intr, sc); if (sc->ih == NULL) { device_printf(sc->dev, "unable to establish interrupt at irq %d\n", IMX51_INT_IPUV3); return (ENXIO); } #endif /* * We have to wait until interrupts are enabled. * Mailbox relies on it to get data from VideoCore */ ipu3_fb_init(sc); sc->sc_info.fb_name = device_get_nameunit(dev); ipu3_fb_init_cmap(sc->sc_info.fb_cmap, sc->sc_info.fb_depth); sc->sc_info.fb_cmsize = 16; /* Ask newbus to attach framebuffer device to me. */ sc->sc_fbd = device_add_child(dev, "fbd", device_get_unit(dev)); if (sc->sc_fbd == NULL) device_printf(dev, "Can't attach fbd device\n"); return (bus_generic_attach(dev)); fail_retarn_dctmpl: bus_space_unmap(sc->iot, sc->cpmem_ioh, IPU_CPMEM_SIZE); fail_retarn_cpmem: bus_space_unmap(sc->iot, sc->idmac_ioh, IPU_IDMAC_SIZE); fail_retarn_idmac: bus_space_unmap(sc->iot, sc->dc_ioh, IPU_DC_SIZE); fail_retarn_dp: bus_space_unmap(sc->iot, sc->dp_ioh, IPU_DP_SIZE); fail_retarn_dc: bus_space_unmap(sc->iot, sc->di1_ioh, IPU_DI1_SIZE); fail_retarn_di1: bus_space_unmap(sc->iot, sc->di0_ioh, IPU_DI0_SIZE); fail_retarn_di0: bus_space_unmap(sc->iot, sc->dmfc_ioh, IPU_DMFC_SIZE); fail_retarn_dmfc: bus_space_unmap(sc->iot, sc->dc_ioh, IPU_CM_SIZE); fail_retarn_cm: device_printf(sc->dev, "failed to map registers (errno=%d)\n", err); return (err); } static struct fb_info * ipu3_fb_getinfo(device_t dev) { struct ipu3sc_softc *sc = device_get_softc(dev); return (&sc->sc_info); } static device_method_t ipu3_fb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ipu3_fb_probe), DEVMETHOD(device_attach, ipu3_fb_attach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, ipu3_fb_getinfo), { 0, 0 } }; static devclass_t ipu3_fb_devclass; static driver_t ipu3_fb_driver = { "fb", ipu3_fb_methods, sizeof(struct ipu3sc_softc), }; DRIVER_MODULE(fb, simplebus, ipu3_fb_driver, ipu3_fb_devclass, 0, 0); Index: head/sys/arm/lpc/lpc_fb.c =================================================================== --- head/sys/arm/lpc/lpc_fb.c (revision 306901) +++ head/sys/arm/lpc/lpc_fb.c (revision 306902) @@ -1,469 +1,467 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include struct lpc_fb_dmamap_arg { bus_addr_t lf_dma_busaddr; }; struct lpc_lcd_config { int lc_xres; int lc_yres; int lc_bpp; uint32_t lc_pixelclock; int lc_left_margin; int lc_right_margin; int lc_upper_margin; int lc_lower_margin; int lc_hsync_len; int lc_vsync_len; }; struct lpc_fb_softc { device_t lf_dev; struct cdev * lf_cdev; struct mtx lf_mtx; struct resource * lf_mem_res; struct resource * lf_irq_res; bus_space_tag_t lf_bst; bus_space_handle_t lf_bsh; void * lf_intrhand; bus_dma_tag_t lf_dma_tag; bus_dmamap_t lf_dma_map; void * lf_buffer; bus_addr_t lf_buffer_phys; bus_size_t lf_buffer_size; struct lpc_lcd_config lf_lcd_config; int lf_initialized; int lf_opened; }; extern void ssd1289_configure(void); #define lpc_fb_lock(_sc) mtx_lock(&(_sc)->lf_mtx) #define lpc_fb_unlock(_sc) mtx_unlock(&(_sc)->lf_mtx) #define lpc_fb_lock_assert(sc) mtx_assert(&(_sc)->lf_mtx, MA_OWNED) #define lpc_fb_read_4(_sc, _reg) \ bus_space_read_4((_sc)->lf_bst, (_sc)->lf_bsh, (_reg)) #define lpc_fb_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->lf_bst, (_sc)->lf_bsh, (_reg), (_val)) static int lpc_fb_probe(device_t); static int lpc_fb_attach(device_t); static void lpc_fb_intr(void *); static void lpc_fb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err); static int lpc_fb_fdt_read(phandle_t, const char *, uint32_t *); static int lpc_fb_read_lcd_config(phandle_t, struct lpc_lcd_config *); static int lpc_fb_open(struct cdev *, int, int, struct thread *); static int lpc_fb_close(struct cdev *, int, int, struct thread *); static int lpc_fb_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *); static int lpc_fb_mmap(struct cdev *, vm_ooffset_t, vm_paddr_t *, int, vm_memattr_t *); static void lpc_fb_blank(struct lpc_fb_softc *); static struct cdevsw lpc_fb_cdevsw = { .d_open = lpc_fb_open, .d_close = lpc_fb_close, .d_ioctl = lpc_fb_ioctl, .d_mmap = lpc_fb_mmap, .d_name = "lpcfb", .d_version = D_VERSION, }; static int lpc_fb_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,fb")) return (ENXIO); device_set_desc(dev, "LPC32x0 framebuffer device"); return (BUS_PROBE_DEFAULT); } static int lpc_fb_attach(device_t dev) { struct lpc_fb_softc *sc = device_get_softc(dev); struct lpc_fb_dmamap_arg ctx; phandle_t node; int mode, rid, err = 0; sc->lf_dev = dev; mtx_init(&sc->lf_mtx, "lpcfb", "fb", MTX_DEF); rid = 0; sc->lf_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->lf_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->lf_bst = rman_get_bustag(sc->lf_mem_res); sc->lf_bsh = rman_get_bushandle(sc->lf_mem_res); rid = 0; sc->lf_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->lf_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lf_mem_res); return (ENXIO); } if (bus_setup_intr(dev, sc->lf_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lpc_fb_intr, sc, &sc->lf_intrhand)) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lf_mem_res); bus_release_resource(dev, SYS_RES_IRQ, 1, sc->lf_irq_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } node = ofw_bus_get_node(dev); err = lpc_fb_read_lcd_config(node, &sc->lf_lcd_config); if (err) { device_printf(dev, "cannot read LCD configuration\n"); goto fail; } sc->lf_buffer_size = sc->lf_lcd_config.lc_xres * sc->lf_lcd_config.lc_yres * (sc->lf_lcd_config.lc_bpp == 24 ? 3 : 2); device_printf(dev, "%dx%d LCD, %d bits per pixel, %dkHz pixel clock\n", sc->lf_lcd_config.lc_xres, sc->lf_lcd_config.lc_yres, sc->lf_lcd_config.lc_bpp, sc->lf_lcd_config.lc_pixelclock / 1000); err = bus_dma_tag_create( bus_get_dma_tag(sc->lf_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->lf_buffer_size, 1, /* maxsize, nsegments */ sc->lf_buffer_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->lf_dma_tag); err = bus_dmamem_alloc(sc->lf_dma_tag, (void **)&sc->lf_buffer, 0, &sc->lf_dma_map); if (err) { device_printf(dev, "cannot allocate framebuffer\n"); goto fail; } err = bus_dmamap_load(sc->lf_dma_tag, sc->lf_dma_map, sc->lf_buffer, sc->lf_buffer_size, lpc_fb_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (err) { device_printf(dev, "cannot load DMA map\n"); goto fail; } switch (sc->lf_lcd_config.lc_bpp) { case 12: mode = LPC_CLKPWR_LCDCLK_CTRL_MODE_12; break; case 15: mode = LPC_CLKPWR_LCDCLK_CTRL_MODE_15; break; case 16: mode = LPC_CLKPWR_LCDCLK_CTRL_MODE_16; break; case 24: mode = LPC_CLKPWR_LCDCLK_CTRL_MODE_24; break; default: panic("unsupported bpp"); } lpc_pwr_write(sc->lf_dev, LPC_CLKPWR_LCDCLK_CTRL, LPC_CLKPWR_LCDCLK_CTRL_MODE(mode) | LPC_CLKPWR_LCDCLK_CTRL_HCLKEN); sc->lf_buffer_phys = ctx.lf_dma_busaddr; sc->lf_cdev = make_dev(&lpc_fb_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "lpcfb"); sc->lf_cdev->si_drv1 = sc; return (0); fail: return (ENXIO); } static void lpc_fb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { struct lpc_fb_dmamap_arg *ctx; if (err) return; ctx = (struct lpc_fb_dmamap_arg *)arg; ctx->lf_dma_busaddr = segs[0].ds_addr; } static void lpc_fb_intr(void *arg) { } static int lpc_fb_fdt_read(phandle_t node, const char *name, uint32_t *ret) { if (OF_getprop(node, name, ret, sizeof(uint32_t)) <= 0) return (ENOENT); *ret = fdt32_to_cpu(*ret); return (0); } static int lpc_fb_read_lcd_config(phandle_t node, struct lpc_lcd_config *cfg) { if (lpc_fb_fdt_read(node, "horizontal-resolution", &cfg->lc_xres)) return (ENXIO); if (lpc_fb_fdt_read(node, "vertical-resolution", &cfg->lc_yres)) return (ENXIO); if (lpc_fb_fdt_read(node, "bits-per-pixel", &cfg->lc_bpp)) return (ENXIO); if (lpc_fb_fdt_read(node, "pixel-clock", &cfg->lc_pixelclock)) return (ENXIO); if (lpc_fb_fdt_read(node, "left-margin", &cfg->lc_left_margin)) return (ENXIO); if (lpc_fb_fdt_read(node, "right-margin", &cfg->lc_right_margin)) return (ENXIO); if (lpc_fb_fdt_read(node, "upper-margin", &cfg->lc_upper_margin)) return (ENXIO); if (lpc_fb_fdt_read(node, "lower-margin", &cfg->lc_lower_margin)) return (ENXIO); if (lpc_fb_fdt_read(node, "hsync-len", &cfg->lc_hsync_len)) return (ENXIO); if (lpc_fb_fdt_read(node, "vsync-len", &cfg->lc_vsync_len)) return (ENXIO); return (0); } static void lpc_fb_setup(struct lpc_fb_softc *sc) { struct lpc_lcd_config *cfg = &sc->lf_lcd_config; uint32_t bpp; lpc_fb_write_4(sc, LPC_LCD_TIMH, LPC_LCD_TIMH_PPL(cfg->lc_xres) | LPC_LCD_TIMH_HSW(cfg->lc_hsync_len - 1) | LPC_LCD_TIMH_HFP(cfg->lc_right_margin - 1) | LPC_LCD_TIMH_HBP(cfg->lc_left_margin - 1)); lpc_fb_write_4(sc, LPC_LCD_TIMV, LPC_LCD_TIMV_LPP(cfg->lc_yres - 1) | LPC_LCD_TIMV_VSW(cfg->lc_vsync_len - 1) | LPC_LCD_TIMV_VFP(cfg->lc_lower_margin) | LPC_LCD_TIMV_VBP(cfg->lc_upper_margin)); /* XXX LPC_LCD_POL_PCD_LO */ lpc_fb_write_4(sc, LPC_LCD_POL, LPC_LCD_POL_IHS | LPC_LCD_POL_IVS | LPC_LCD_POL_CPL(cfg->lc_xres - 1) | LPC_LCD_POL_PCD_LO(4)); lpc_fb_write_4(sc, LPC_LCD_UPBASE, sc->lf_buffer_phys); switch (cfg->lc_bpp) { case 1: bpp = LPC_LCD_CTRL_BPP1; break; case 2: bpp = LPC_LCD_CTRL_BPP2; break; case 4: bpp = LPC_LCD_CTRL_BPP4; break; case 8: bpp = LPC_LCD_CTRL_BPP8; break; case 12: bpp = LPC_LCD_CTRL_BPP12_444; break; case 15: bpp = LPC_LCD_CTRL_BPP16; break; case 16: bpp = LPC_LCD_CTRL_BPP16_565; break; case 24: bpp = LPC_LCD_CTRL_BPP24; break; default: panic("LCD unknown bpp: %d", cfg->lc_bpp); } lpc_fb_write_4(sc, LPC_LCD_CTRL, LPC_LCD_CTRL_LCDVCOMP(1) | LPC_LCD_CTRL_LCDPWR | LPC_LCD_CTRL_BGR | LPC_LCD_CTRL_LCDTFT | LPC_LCD_CTRL_LCDBPP(bpp) | LPC_LCD_CTRL_LCDEN); } static int lpc_fb_open(struct cdev *cdev, int oflags, int devtype, struct thread *td) { struct lpc_fb_softc *sc = cdev->si_drv1; lpc_fb_lock(sc); if (sc->lf_opened) return (EBUSY); sc->lf_opened = 1; lpc_fb_unlock(sc); if (!sc->lf_initialized) { ssd1289_configure(); lpc_fb_setup(sc); lpc_fb_blank(sc); sc->lf_initialized = 1; } return (0); } static int lpc_fb_close(struct cdev *cdev, int fflag, int devtype, struct thread *td) { struct lpc_fb_softc *sc = cdev->si_drv1; lpc_fb_lock(sc); sc->lf_opened = 0; lpc_fb_unlock(sc); return (0); } static int lpc_fb_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int x, struct thread *td) { return (EINVAL); } static int lpc_fb_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct lpc_fb_softc *sc = cdev->si_drv1; *paddr = (vm_paddr_t)(sc->lf_buffer_phys + offset); return (0); } static void lpc_fb_blank(struct lpc_fb_softc *sc) { memset(sc->lf_buffer, 0xffff, sc->lf_buffer_size); } static device_method_t lpc_fb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lpc_fb_probe), DEVMETHOD(device_attach, lpc_fb_attach), { 0, 0 } }; static devclass_t lpc_fb_devclass; static driver_t lpc_fb_driver = { "lpcfb", lpc_fb_methods, sizeof(struct lpc_fb_softc), }; DRIVER_MODULE(lpcfb, simplebus, lpc_fb_driver, lpc_fb_devclass, 0, 0); Index: head/sys/arm/lpc/lpc_gpio.c =================================================================== --- head/sys/arm/lpc/lpc_gpio.c (revision 306901) +++ head/sys/arm/lpc/lpc_gpio.c (revision 306902) @@ -1,573 +1,571 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * GPIO on LPC32x0 consist of 4 ports: * - Port0 with 8 input/output pins * - Port1 with 24 input/output pins * - Port2 with 13 input/output pins * - Port3 with: * - 26 input pins (GPI_00..GPI_09 + GPI_15..GPI_23 + GPI_25 + GPI_27..GPI_28) * - 24 output pins (GPO_00..GPO_23) * - 6 input/output pins (GPIO_00..GPIO_05) * * Pins are mapped to logical pin number as follows: * [0..9] -> GPI_00..GPI_09 (port 3) * [10..18] -> GPI_15..GPI_23 (port 3) * [19] -> GPI_25 (port 3) * [20..21] -> GPI_27..GPI_28 (port 3) * [22..45] -> GPO_00..GPO_23 (port 3) * [46..51] -> GPIO_00..GPIO_05 (port 3) * [52..64] -> P2.0..P2.12 (port 2) * [65..88] -> P1.0..P1.23 (port 1) * [89..96] -> P0.0..P0.7 (port 0) * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include "gpio_if.h" struct lpc_gpio_softc { device_t lg_dev; device_t lg_busdev; struct resource * lg_res; bus_space_tag_t lg_bst; bus_space_handle_t lg_bsh; }; struct lpc_gpio_pinmap { int lp_start_idx; int lp_pin_count; int lp_port; int lp_start_bit; int lp_flags; }; static const struct lpc_gpio_pinmap lpc_gpio_pins[] = { { 0, 10, 3, 0, GPIO_PIN_INPUT }, { 10, 9, 3, 15, GPIO_PIN_INPUT }, { 19, 1, 3, 25, GPIO_PIN_INPUT }, { 20, 2, 3, 27, GPIO_PIN_INPUT }, { 22, 24, 3, 0, GPIO_PIN_OUTPUT }, /* * -1 below is to mark special case for Port3 GPIO pins, as they * have other bits in Port 3 registers as inputs and as outputs */ { 46, 6, 3, -1, GPIO_PIN_INPUT | GPIO_PIN_OUTPUT }, { 52, 13, 2, 0, GPIO_PIN_INPUT | GPIO_PIN_OUTPUT }, { 65, 24, 1, 0, GPIO_PIN_INPUT | GPIO_PIN_OUTPUT }, { 89, 8, 0, 0, GPIO_PIN_INPUT | GPIO_PIN_OUTPUT }, { -1, -1, -1, -1, -1 }, }; #define LPC_GPIO_NPINS \ (LPC_GPIO_P0_COUNT + LPC_GPIO_P1_COUNT + \ LPC_GPIO_P2_COUNT + LPC_GPIO_P3_COUNT) #define LPC_GPIO_PIN_IDX(_map, _idx) \ (_idx - _map->lp_start_idx) #define LPC_GPIO_PIN_BIT(_map, _idx) \ (_map->lp_start_bit + LPC_GPIO_PIN_IDX(_map, _idx)) static int lpc_gpio_probe(device_t); static int lpc_gpio_attach(device_t); static int lpc_gpio_detach(device_t); static device_t lpc_gpio_get_bus(device_t); static int lpc_gpio_pin_max(device_t, int *); static int lpc_gpio_pin_getcaps(device_t, uint32_t, uint32_t *); static int lpc_gpio_pin_getflags(device_t, uint32_t, uint32_t *); static int lpc_gpio_pin_setflags(device_t, uint32_t, uint32_t); static int lpc_gpio_pin_getname(device_t, uint32_t, char *); static int lpc_gpio_pin_get(device_t, uint32_t, uint32_t *); static int lpc_gpio_pin_set(device_t, uint32_t, uint32_t); static int lpc_gpio_pin_toggle(device_t, uint32_t); static const struct lpc_gpio_pinmap *lpc_gpio_get_pinmap(int); static struct lpc_gpio_softc *lpc_gpio_sc = NULL; #define lpc_gpio_read_4(_sc, _reg) \ bus_space_read_4(_sc->lg_bst, _sc->lg_bsh, _reg) #define lpc_gpio_write_4(_sc, _reg, _val) \ bus_space_write_4(_sc->lg_bst, _sc->lg_bsh, _reg, _val) #define lpc_gpio_get_4(_sc, _test, _reg1, _reg2) \ lpc_gpio_read_4(_sc, ((_test) ? _reg1 : _reg2)) #define lpc_gpio_set_4(_sc, _test, _reg1, _reg2, _val) \ lpc_gpio_write_4(_sc, ((_test) ? _reg1 : _reg2), _val) static int lpc_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,gpio")) return (ENXIO); device_set_desc(dev, "LPC32x0 GPIO"); return (BUS_PROBE_DEFAULT); } static int lpc_gpio_attach(device_t dev) { struct lpc_gpio_softc *sc = device_get_softc(dev); int rid; sc->lg_dev = dev; rid = 0; sc->lg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->lg_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->lg_bst = rman_get_bustag(sc->lg_res); sc->lg_bsh = rman_get_bushandle(sc->lg_res); lpc_gpio_sc = sc; sc->lg_busdev = gpiobus_attach_bus(dev); if (sc->lg_busdev == NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->lg_res); return (ENXIO); } return (0); } static int lpc_gpio_detach(device_t dev) { return (EBUSY); } static device_t lpc_gpio_get_bus(device_t dev) { struct lpc_gpio_softc *sc; sc = device_get_softc(dev); return (sc->lg_busdev); } static int lpc_gpio_pin_max(device_t dev, int *npins) { *npins = LPC_GPIO_NPINS - 1; return (0); } static int lpc_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { const struct lpc_gpio_pinmap *map; if (pin > LPC_GPIO_NPINS) return (ENODEV); map = lpc_gpio_get_pinmap(pin); *caps = map->lp_flags; return (0); } static int lpc_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct lpc_gpio_softc *sc = device_get_softc(dev); const struct lpc_gpio_pinmap *map; uint32_t state; int dir; if (pin > LPC_GPIO_NPINS) return (ENODEV); map = lpc_gpio_get_pinmap(pin); /* Check whether it's bidirectional pin */ if ((map->lp_flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) != (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) { *flags = map->lp_flags; return (0); } switch (map->lp_port) { case 0: state = lpc_gpio_read_4(sc, LPC_GPIO_P0_DIR_STATE); dir = (state & (1 << LPC_GPIO_PIN_BIT(map, pin))); break; case 1: state = lpc_gpio_read_4(sc, LPC_GPIO_P1_DIR_STATE); dir = (state & (1 << LPC_GPIO_PIN_BIT(map, pin))); break; case 2: state = lpc_gpio_read_4(sc, LPC_GPIO_P2_DIR_STATE); dir = (state & (1 << LPC_GPIO_PIN_BIT(map, pin))); break; case 3: state = lpc_gpio_read_4(sc, LPC_GPIO_P2_DIR_STATE); dir = (state & (1 << (25 + LPC_GPIO_PIN_IDX(map, pin)))); break; default: panic("unknown GPIO port"); } *flags = dir ? GPIO_PIN_OUTPUT : GPIO_PIN_INPUT; return (0); } static int lpc_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct lpc_gpio_softc *sc = device_get_softc(dev); const struct lpc_gpio_pinmap *map; uint32_t dir, state; if (pin > LPC_GPIO_NPINS) return (ENODEV); map = lpc_gpio_get_pinmap(pin); /* Check whether it's bidirectional pin */ if ((map->lp_flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) != (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) return (ENOTSUP); if (flags & GPIO_PIN_INPUT) dir = 0; if (flags & GPIO_PIN_OUTPUT) dir = 1; switch (map->lp_port) { case 0: state = (1 << LPC_GPIO_PIN_IDX(map, pin)); lpc_gpio_set_4(sc, dir, LPC_GPIO_P0_DIR_SET, LPC_GPIO_P0_DIR_CLR, state); break; case 1: state = (1 << LPC_GPIO_PIN_IDX(map, pin)); lpc_gpio_set_4(sc, dir, LPC_GPIO_P1_DIR_SET, LPC_GPIO_P0_DIR_CLR, state); break; case 2: state = (1 << LPC_GPIO_PIN_IDX(map, pin)); lpc_gpio_set_4(sc, dir, LPC_GPIO_P2_DIR_SET, LPC_GPIO_P0_DIR_CLR, state); break; case 3: state = (1 << (25 + (pin - map->lp_start_idx))); lpc_gpio_set_4(sc, dir, LPC_GPIO_P2_DIR_SET, LPC_GPIO_P0_DIR_CLR, state); break; } return (0); } static int lpc_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { const struct lpc_gpio_pinmap *map; int idx; map = lpc_gpio_get_pinmap(pin); idx = LPC_GPIO_PIN_IDX(map, pin); switch (map->lp_port) { case 0: case 1: case 2: snprintf(name, GPIOMAXNAME - 1, "P%d.%d", map->lp_port, map->lp_start_bit + LPC_GPIO_PIN_IDX(map, pin)); break; case 3: if (map->lp_start_bit == -1) { snprintf(name, GPIOMAXNAME - 1, "GPIO_%02d", idx); break; } snprintf(name, GPIOMAXNAME - 1, "GP%c_%02d", (map->lp_flags & GPIO_PIN_INPUT) ? 'I' : 'O', map->lp_start_bit + idx); break; } return (0); } static int lpc_gpio_pin_get(device_t dev, uint32_t pin, uint32_t *value) { struct lpc_gpio_softc *sc = device_get_softc(dev); const struct lpc_gpio_pinmap *map; uint32_t state, flags; int dir; map = lpc_gpio_get_pinmap(pin); if (lpc_gpio_pin_getflags(dev, pin, &flags)) return (ENXIO); if (flags & GPIO_PIN_OUTPUT) dir = 1; if (flags & GPIO_PIN_INPUT) dir = 0; switch (map->lp_port) { case 0: state = lpc_gpio_get_4(sc, dir, LPC_GPIO_P0_OUTP_STATE, LPC_GPIO_P0_INP_STATE); *value = !!(state & (1 << LPC_GPIO_PIN_BIT(map, pin))); case 1: state = lpc_gpio_get_4(sc, dir, LPC_GPIO_P1_OUTP_STATE, LPC_GPIO_P1_INP_STATE); *value = !!(state & (1 << LPC_GPIO_PIN_BIT(map, pin))); case 2: state = lpc_gpio_read_4(sc, LPC_GPIO_P2_INP_STATE); *value = !!(state & (1 << LPC_GPIO_PIN_BIT(map, pin))); case 3: state = lpc_gpio_get_4(sc, dir, LPC_GPIO_P3_OUTP_STATE, LPC_GPIO_P3_INP_STATE); if (map->lp_start_bit == -1) { if (dir) *value = !!(state & (1 << (25 + LPC_GPIO_PIN_IDX(map, pin)))); else *value = !!(state & (1 << (10 + LPC_GPIO_PIN_IDX(map, pin)))); } *value = !!(state & (1 << LPC_GPIO_PIN_BIT(map, pin))); } return (0); } static int lpc_gpio_pin_set(device_t dev, uint32_t pin, uint32_t value) { struct lpc_gpio_softc *sc = device_get_softc(dev); const struct lpc_gpio_pinmap *map; uint32_t state, flags; map = lpc_gpio_get_pinmap(pin); if (lpc_gpio_pin_getflags(dev, pin, &flags)) return (ENXIO); if ((flags & GPIO_PIN_OUTPUT) == 0) return (EINVAL); state = (1 << LPC_GPIO_PIN_BIT(map, pin)); switch (map->lp_port) { case 0: lpc_gpio_set_4(sc, value, LPC_GPIO_P0_OUTP_SET, LPC_GPIO_P0_OUTP_CLR, state); break; case 1: lpc_gpio_set_4(sc, value, LPC_GPIO_P1_OUTP_SET, LPC_GPIO_P1_OUTP_CLR, state); break; case 2: lpc_gpio_set_4(sc, value, LPC_GPIO_P2_OUTP_SET, LPC_GPIO_P2_OUTP_CLR, state); break; case 3: if (map->lp_start_bit == -1) state = (1 << (25 + LPC_GPIO_PIN_IDX(map, pin))); lpc_gpio_set_4(sc, value, LPC_GPIO_P3_OUTP_SET, LPC_GPIO_P3_OUTP_CLR, state); break; } return (0); } static int lpc_gpio_pin_toggle(device_t dev, uint32_t pin) { const struct lpc_gpio_pinmap *map; uint32_t flags; map = lpc_gpio_get_pinmap(pin); if (lpc_gpio_pin_getflags(dev, pin, &flags)) return (ENXIO); if ((flags & GPIO_PIN_OUTPUT) == 0) return (EINVAL); panic("not implemented yet"); return (0); } static const struct lpc_gpio_pinmap * lpc_gpio_get_pinmap(int pin) { const struct lpc_gpio_pinmap *map; for (map = &lpc_gpio_pins[0]; map->lp_start_idx != -1; map++) { if (pin >= map->lp_start_idx && pin < map->lp_start_idx + map->lp_pin_count) return map; } panic("pin number %d out of range", pin); } int lpc_gpio_set_flags(device_t dev, int pin, int flags) { if (lpc_gpio_sc == NULL) return (ENXIO); return lpc_gpio_pin_setflags(lpc_gpio_sc->lg_dev, pin, flags); } int lpc_gpio_set_state(device_t dev, int pin, int state) { if (lpc_gpio_sc == NULL) return (ENXIO); return lpc_gpio_pin_set(lpc_gpio_sc->lg_dev, pin, state); } int lpc_gpio_get_state(device_t dev, int pin, int *state) { if (lpc_gpio_sc == NULL) return (ENXIO); return lpc_gpio_pin_get(lpc_gpio_sc->lg_dev, pin, state); } void lpc_gpio_init() { bus_space_tag_t bst; bus_space_handle_t bsh; bst = fdtbus_bs_tag; /* Preset SPI devices CS pins to one */ bus_space_map(bst, LPC_GPIO_PHYS_BASE, LPC_GPIO_SIZE, 0, &bsh); bus_space_write_4(bst, bsh, LPC_GPIO_P3_OUTP_SET, 1 << (SSD1289_CS_PIN - LPC_GPIO_GPO_00(0)) | 1 << (SSD1289_DC_PIN - LPC_GPIO_GPO_00(0)) | 1 << (ADS7846_CS_PIN - LPC_GPIO_GPO_00(0))); bus_space_unmap(bst, bsh, LPC_GPIO_SIZE); } static device_method_t lpc_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lpc_gpio_probe), DEVMETHOD(device_attach, lpc_gpio_attach), DEVMETHOD(device_detach, lpc_gpio_detach), /* GPIO interface */ DEVMETHOD(gpio_get_bus, lpc_gpio_get_bus), DEVMETHOD(gpio_pin_max, lpc_gpio_pin_max), DEVMETHOD(gpio_pin_getcaps, lpc_gpio_pin_getcaps), DEVMETHOD(gpio_pin_getflags, lpc_gpio_pin_getflags), DEVMETHOD(gpio_pin_setflags, lpc_gpio_pin_setflags), DEVMETHOD(gpio_pin_getname, lpc_gpio_pin_getname), DEVMETHOD(gpio_pin_set, lpc_gpio_pin_set), DEVMETHOD(gpio_pin_get, lpc_gpio_pin_get), DEVMETHOD(gpio_pin_toggle, lpc_gpio_pin_toggle), { 0, 0 } }; static devclass_t lpc_gpio_devclass; static driver_t lpc_gpio_driver = { "lpcgpio", lpc_gpio_methods, sizeof(struct lpc_gpio_softc), }; extern devclass_t gpiobus_devclass, gpioc_devclass; extern driver_t gpiobus_driver, gpioc_driver; DRIVER_MODULE(lpcgpio, simplebus, lpc_gpio_driver, lpc_gpio_devclass, 0, 0); DRIVER_MODULE(gpiobus, lpcgpio, gpiobus_driver, gpiobus_devclass, 0, 0); DRIVER_MODULE(gpioc, lpcgpio, gpioc_driver, gpioc_devclass, 0, 0); MODULE_VERSION(lpcgpio, 1); Index: head/sys/arm/lpc/lpc_mmc.c =================================================================== --- head/sys/arm/lpc/lpc_mmc.c (revision 306901) +++ head/sys/arm/lpc/lpc_mmc.c (revision 306902) @@ -1,779 +1,777 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif struct lpc_mmc_dmamap_arg { bus_addr_t lm_dma_busaddr; }; struct lpc_mmc_softc { device_t lm_dev; struct mtx lm_mtx; struct resource * lm_mem_res; struct resource * lm_irq_res; bus_space_tag_t lm_bst; bus_space_handle_t lm_bsh; void * lm_intrhand; struct mmc_host lm_host; struct mmc_request * lm_req; struct mmc_data * lm_data; uint32_t lm_flags; #define LPC_SD_FLAGS_IGNORECRC (1 << 0) int lm_xfer_direction; #define DIRECTION_READ 0 #define DIRECTION_WRITE 1 int lm_xfer_done; int lm_bus_busy; bus_dma_tag_t lm_dma_tag; bus_dmamap_t lm_dma_map; bus_addr_t lm_buffer_phys; void * lm_buffer; }; #define LPC_SD_MAX_BLOCKSIZE 1024 /* XXX */ #define LPC_MMC_DMACH_READ 1 #define LPC_MMC_DMACH_WRITE 0 static int lpc_mmc_probe(device_t); static int lpc_mmc_attach(device_t); static int lpc_mmc_detach(device_t); static void lpc_mmc_intr(void *); static void lpc_mmc_cmd(struct lpc_mmc_softc *, struct mmc_command *); static void lpc_mmc_setup_xfer(struct lpc_mmc_softc *, struct mmc_data *); static int lpc_mmc_update_ios(device_t, device_t); static int lpc_mmc_request(device_t, device_t, struct mmc_request *); static int lpc_mmc_get_ro(device_t, device_t); static int lpc_mmc_acquire_host(device_t, device_t); static int lpc_mmc_release_host(device_t, device_t); static void lpc_mmc_dma_rxfinish(void *); static void lpc_mmc_dma_rxerror(void *); static void lpc_mmc_dma_txfinish(void *); static void lpc_mmc_dma_txerror(void *); static void lpc_mmc_dmamap_cb(void *, bus_dma_segment_t *, int, int); #define lpc_mmc_lock(_sc) \ mtx_lock(&_sc->lm_mtx); #define lpc_mmc_unlock(_sc) \ mtx_unlock(&_sc->lm_mtx); #define lpc_mmc_read_4(_sc, _reg) \ bus_space_read_4(_sc->lm_bst, _sc->lm_bsh, _reg) #define lpc_mmc_write_4(_sc, _reg, _value) \ bus_space_write_4(_sc->lm_bst, _sc->lm_bsh, _reg, _value) static struct lpc_dmac_channel_config lpc_mmc_dma_rxconf = { .ldc_fcntl = LPC_DMAC_FLOW_D_P2M, .ldc_src_periph = LPC_DMAC_SD_ID, .ldc_src_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_src_incr = 0, .ldc_src_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_dst_periph = LPC_DMAC_SD_ID, .ldc_dst_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_dst_incr = 1, .ldc_dst_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_success_handler = lpc_mmc_dma_rxfinish, .ldc_error_handler = lpc_mmc_dma_rxerror, }; static struct lpc_dmac_channel_config lpc_mmc_dma_txconf = { .ldc_fcntl = LPC_DMAC_FLOW_P_M2P, .ldc_src_periph = LPC_DMAC_SD_ID, .ldc_src_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_src_incr = 1, .ldc_src_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_dst_periph = LPC_DMAC_SD_ID, .ldc_dst_width = LPC_DMAC_CH_CONTROL_WIDTH_4, .ldc_dst_incr = 0, .ldc_dst_burst = LPC_DMAC_CH_CONTROL_BURST_8, .ldc_success_handler = lpc_mmc_dma_txfinish, .ldc_error_handler = lpc_mmc_dma_txerror, }; static int lpc_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,mmc")) return (ENXIO); device_set_desc(dev, "LPC32x0 MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int lpc_mmc_attach(device_t dev) { struct lpc_mmc_softc *sc = device_get_softc(dev); struct lpc_mmc_dmamap_arg ctx; device_t child; int rid, err; sc->lm_dev = dev; sc->lm_req = NULL; mtx_init(&sc->lm_mtx, "lpcmmc", "mmc", MTX_DEF); rid = 0; sc->lm_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->lm_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->lm_bst = rman_get_bustag(sc->lm_mem_res); sc->lm_bsh = rman_get_bushandle(sc->lm_mem_res); debugf("virtual register space: 0x%08lx\n", sc->lm_bsh); rid = 0; sc->lm_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->lm_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); return (ENXIO); } if (bus_setup_intr(dev, sc->lm_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lpc_mmc_intr, sc, &sc->lm_intrhand)) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } sc->lm_host.f_min = 312500; sc->lm_host.f_max = 2500000; sc->lm_host.host_ocr = MMC_OCR_300_310 | MMC_OCR_310_320 | MMC_OCR_320_330 | MMC_OCR_330_340; #if 0 sc->lm_host.caps = MMC_CAP_4_BIT_DATA; #endif lpc_pwr_write(dev, LPC_CLKPWR_MS_CTRL, LPC_CLKPWR_MS_CTRL_CLOCK_EN | LPC_CLKPWR_MS_CTRL_SD_CLOCK | 1); lpc_mmc_write_4(sc, LPC_SD_POWER, LPC_SD_POWER_CTRL_ON); device_set_ivars(dev, &sc->lm_host); child = device_add_child(dev, "mmc", -1); if (!child) { device_printf(dev, "attaching MMC bus failed!\n"); bus_teardown_intr(dev, sc->lm_irq_res, sc->lm_intrhand); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); return (ENXIO); } /* Alloc DMA memory */ err = bus_dma_tag_create( bus_get_dma_tag(sc->lm_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ LPC_SD_MAX_BLOCKSIZE, 1, /* maxsize, nsegments */ LPC_SD_MAX_BLOCKSIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->lm_dma_tag); err = bus_dmamem_alloc(sc->lm_dma_tag, (void **)&sc->lm_buffer, 0, &sc->lm_dma_map); if (err) { device_printf(dev, "cannot allocate framebuffer\n"); goto fail; } err = bus_dmamap_load(sc->lm_dma_tag, sc->lm_dma_map, sc->lm_buffer, LPC_SD_MAX_BLOCKSIZE, lpc_mmc_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (err) { device_printf(dev, "cannot load DMA map\n"); goto fail; } sc->lm_buffer_phys = ctx.lm_dma_busaddr; lpc_mmc_dma_rxconf.ldc_handler_arg = (void *)sc; err = lpc_dmac_config_channel(dev, LPC_MMC_DMACH_READ, &lpc_mmc_dma_rxconf); if (err) { device_printf(dev, "cannot allocate RX DMA channel\n"); goto fail; } lpc_mmc_dma_txconf.ldc_handler_arg = (void *)sc; err = lpc_dmac_config_channel(dev, LPC_MMC_DMACH_WRITE, &lpc_mmc_dma_txconf); if (err) { device_printf(dev, "cannot allocate TX DMA channel\n"); goto fail; } bus_generic_probe(dev); bus_generic_attach(dev); return (0); fail: if (sc->lm_intrhand) bus_teardown_intr(dev, sc->lm_irq_res, sc->lm_intrhand); if (sc->lm_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lm_irq_res); if (sc->lm_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lm_mem_res); return (err); } static int lpc_mmc_detach(device_t dev) { return (EBUSY); } static void lpc_mmc_intr(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; struct mmc_command *cmd; uint32_t status; status = lpc_mmc_read_4(sc, LPC_SD_STATUS); debugf("interrupt: 0x%08x\n", status); if (status & LPC_SD_STATUS_CMDCRCFAIL) { cmd = sc->lm_req->cmd; cmd->error = sc->lm_flags & LPC_SD_FLAGS_IGNORECRC ? MMC_ERR_NONE : MMC_ERR_BADCRC; cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDCRCFAIL); } if (status & LPC_SD_STATUS_CMDACTIVE) { debugf("command active\n"); cmd = sc->lm_req->cmd; cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; } if (status & LPC_SD_STATUS_DATATIMEOUT) { device_printf(sc->lm_dev, "data timeout\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATATIMEOUT); } if (status & LPC_SD_STATUS_TXUNDERRUN) { device_printf(sc->lm_dev, "TX underrun\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_TXUNDERRUN); } if (status & LPC_SD_STATUS_CMDRESPEND) { debugf("command response\n"); cmd = sc->lm_req->cmd; if (cmd->flags & MMC_RSP_136) { cmd->resp[3] = lpc_mmc_read_4(sc, LPC_SD_RESP3); cmd->resp[2] = lpc_mmc_read_4(sc, LPC_SD_RESP2); cmd->resp[1] = lpc_mmc_read_4(sc, LPC_SD_RESP1); } cmd->resp[0] = lpc_mmc_read_4(sc, LPC_SD_RESP0); cmd->error = MMC_ERR_NONE; if (cmd->data && (cmd->data->flags & MMC_DATA_WRITE)) lpc_mmc_setup_xfer(sc, sc->lm_req->cmd->data); if (!cmd->data) { sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; } lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDRESPEND); } if (status & LPC_SD_STATUS_CMDSENT) { debugf("command sent\n"); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_NONE; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDSENT); } if (status & LPC_SD_STATUS_DATAEND) { if (sc->lm_xfer_direction == DIRECTION_READ) lpc_dmac_start_burst(sc->lm_dev, LPC_DMAC_SD_ID); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATAEND); } if (status & LPC_SD_STATUS_CMDTIMEOUT) { device_printf(sc->lm_dev, "command response timeout\n"); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_TIMEOUT; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_CMDTIMEOUT); return; } if (status & LPC_SD_STATUS_STARTBITERR) { device_printf(sc->lm_dev, "start bit error\n"); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_STARTBITERR); } if (status & LPC_SD_STATUS_DATACRCFAIL) { device_printf(sc->lm_dev, "data CRC error\n"); debugf("data buffer: %p\n", sc->lm_buffer); cmd = sc->lm_req->cmd; cmd->error = MMC_ERR_BADCRC; sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; if (sc->lm_xfer_direction == DIRECTION_READ) lpc_dmac_start_burst(sc->lm_dev, LPC_DMAC_SD_ID); lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATACRCFAIL); } if (status & LPC_SD_STATUS_DATABLOCKEND) { debugf("data block end\n"); if (sc->lm_xfer_direction == DIRECTION_READ) memcpy(sc->lm_data->data, sc->lm_buffer, sc->lm_data->len); if (sc->lm_xfer_direction == DIRECTION_WRITE) { lpc_dmac_disable_channel(sc->lm_dev, LPC_MMC_DMACH_WRITE); lpc_mmc_write_4(sc, LPC_SD_DATACTRL, 0); } sc->lm_req->done(sc->lm_req); sc->lm_req = NULL; lpc_mmc_write_4(sc, LPC_SD_CLEAR, LPC_SD_STATUS_DATABLOCKEND); } debugf("done\n"); } static int lpc_mmc_request(device_t bus, device_t child, struct mmc_request *req) { struct lpc_mmc_softc *sc = device_get_softc(bus); debugf("request: %p\n", req); lpc_mmc_lock(sc); if (sc->lm_req) return (EBUSY); sc->lm_req = req; if (req->cmd->data && req->cmd->data->flags & MMC_DATA_WRITE) { memcpy(sc->lm_buffer, req->cmd->data->data, req->cmd->data->len); lpc_mmc_cmd(sc, req->cmd); lpc_mmc_unlock(sc); return (0); } if (req->cmd->data) lpc_mmc_setup_xfer(sc, req->cmd->data); lpc_mmc_cmd(sc, req->cmd); lpc_mmc_unlock(sc); return (0); } static void lpc_mmc_cmd(struct lpc_mmc_softc *sc, struct mmc_command *cmd) { uint32_t cmdreg = 0; debugf("cmd: %d arg: 0x%08x\n", cmd->opcode, cmd->arg); if (lpc_mmc_read_4(sc, LPC_SD_COMMAND) & LPC_SD_COMMAND_ENABLE) { lpc_mmc_write_4(sc, LPC_SD_COMMAND, 0); DELAY(1000); } sc->lm_flags &= ~LPC_SD_FLAGS_IGNORECRC; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= LPC_SD_COMMAND_RESPONSE; if (MMC_RSP(cmd->flags) == MMC_RSP_R2) cmdreg |= LPC_SD_COMMAND_LONGRSP; if (MMC_RSP(cmd->flags) == MMC_RSP_R3) sc->lm_flags |= LPC_SD_FLAGS_IGNORECRC; cmdreg |= LPC_SD_COMMAND_ENABLE; cmdreg |= (cmd->opcode & LPC_SD_COMMAND_CMDINDEXMASK); lpc_mmc_write_4(sc, LPC_SD_MASK0, 0xffffffff); lpc_mmc_write_4(sc, LPC_SD_MASK1, 0xffffffff); lpc_mmc_write_4(sc, LPC_SD_ARGUMENT, cmd->arg); lpc_mmc_write_4(sc, LPC_SD_COMMAND, cmdreg); } static void lpc_mmc_setup_xfer(struct lpc_mmc_softc *sc, struct mmc_data *data) { uint32_t datactrl = 0; int data_words = data->len / 4; sc->lm_data = data; sc->lm_xfer_done = 0; debugf("data: %p, len: %d, %s\n", data, data->len, (data->flags & MMC_DATA_READ) ? "read" : "write"); if (data->flags & MMC_DATA_READ) { sc->lm_xfer_direction = DIRECTION_READ; lpc_dmac_setup_transfer(sc->lm_dev, LPC_MMC_DMACH_READ, LPC_SD_PHYS_BASE + LPC_SD_FIFO, sc->lm_buffer_phys, data_words, 0); } if (data->flags & MMC_DATA_WRITE) { sc->lm_xfer_direction = DIRECTION_WRITE; lpc_dmac_setup_transfer(sc->lm_dev, LPC_MMC_DMACH_WRITE, sc->lm_buffer_phys, LPC_SD_PHYS_BASE + LPC_SD_FIFO, data_words, 0); } datactrl |= (sc->lm_xfer_direction ? LPC_SD_DATACTRL_WRITE : LPC_SD_DATACTRL_READ); datactrl |= LPC_SD_DATACTRL_DMAENABLE | LPC_SD_DATACTRL_ENABLE; datactrl |= (ffs(data->len) - 1) << 4; debugf("datactrl: 0x%08x\n", datactrl); lpc_mmc_write_4(sc, LPC_SD_DATATIMER, 0xFFFF0000); lpc_mmc_write_4(sc, LPC_SD_DATALENGTH, data->len); lpc_mmc_write_4(sc, LPC_SD_DATACTRL, datactrl); } static int lpc_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct lpc_mmc_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->lm_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->lm_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->lm_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->lm_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->lm_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->lm_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->lm_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->lm_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->lm_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->lm_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->lm_host.ios.vdd; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->lm_host.caps; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = 1; break; } return (0); } static int lpc_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct lpc_mmc_softc *sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->lm_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->lm_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->lm_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->lm_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->lm_host.mode = value; break; case MMCBR_IVAR_OCR: sc->lm_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->lm_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->lm_host.ios.vdd = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int lpc_mmc_update_ios(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); struct mmc_ios *ios = &sc->lm_host.ios; uint32_t clkdiv = 0, pwr = 0; if (ios->bus_width == bus_width_4) clkdiv |= LPC_SD_CLOCK_WIDEBUS; /* Calculate clock divider */ clkdiv = (LPC_SD_CLK / (2 * ios->clock)) - 1; /* Clock rate should not exceed rate requested in ios */ if ((LPC_SD_CLK / (2 * (clkdiv + 1))) > ios->clock) clkdiv++; debugf("clock: %dHz, clkdiv: %d\n", ios->clock, clkdiv); if (ios->bus_width == bus_width_4) { debugf("using wide bus mode\n"); clkdiv |= LPC_SD_CLOCK_WIDEBUS; } lpc_mmc_write_4(sc, LPC_SD_CLOCK, clkdiv | LPC_SD_CLOCK_ENABLE); switch (ios->power_mode) { case power_off: pwr |= LPC_SD_POWER_CTRL_OFF; break; case power_up: pwr |= LPC_SD_POWER_CTRL_UP; break; case power_on: pwr |= LPC_SD_POWER_CTRL_ON; break; } if (ios->bus_mode == opendrain) pwr |= LPC_SD_POWER_OPENDRAIN; lpc_mmc_write_4(sc, LPC_SD_POWER, pwr); return (0); } static int lpc_mmc_get_ro(device_t bus, device_t child) { return (0); } static int lpc_mmc_acquire_host(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); int error = 0; lpc_mmc_lock(sc); while (sc->lm_bus_busy) error = mtx_sleep(sc, &sc->lm_mtx, PZERO, "mmcah", 0); sc->lm_bus_busy++; lpc_mmc_unlock(sc); return (error); } static int lpc_mmc_release_host(device_t bus, device_t child) { struct lpc_mmc_softc *sc = device_get_softc(bus); lpc_mmc_lock(sc); sc->lm_bus_busy--; wakeup(sc); lpc_mmc_unlock(sc); return (0); } static void lpc_mmc_dma_rxfinish(void *arg) { } static void lpc_mmc_dma_rxerror(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; device_printf(sc->lm_dev, "DMA RX error\n"); } static void lpc_mmc_dma_txfinish(void *arg) { } static void lpc_mmc_dma_txerror(void *arg) { struct lpc_mmc_softc *sc = (struct lpc_mmc_softc *)arg; device_printf(sc->lm_dev, "DMA TX error\n"); } static void lpc_mmc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { struct lpc_mmc_dmamap_arg *ctx; if (err) return; ctx = (struct lpc_mmc_dmamap_arg *)arg; ctx->lm_dma_busaddr = segs[0].ds_addr; } static device_method_t lpc_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lpc_mmc_probe), DEVMETHOD(device_attach, lpc_mmc_attach), DEVMETHOD(device_detach, lpc_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, lpc_mmc_read_ivar), DEVMETHOD(bus_write_ivar, lpc_mmc_write_ivar), DEVMETHOD(bus_print_child, bus_generic_print_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, lpc_mmc_update_ios), DEVMETHOD(mmcbr_request, lpc_mmc_request), DEVMETHOD(mmcbr_get_ro, lpc_mmc_get_ro), DEVMETHOD(mmcbr_acquire_host, lpc_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, lpc_mmc_release_host), { 0, 0 } }; static devclass_t lpc_mmc_devclass; static driver_t lpc_mmc_driver = { "lpcmmc", lpc_mmc_methods, sizeof(struct lpc_mmc_softc), }; DRIVER_MODULE(lpcmmc, simplebus, lpc_mmc_driver, lpc_mmc_devclass, 0, 0); DRIVER_MODULE(mmc, lpcmmc, mmc_driver, mmc_devclass, NULL, NULL); MODULE_DEPEND(lpcmmc, mmc, 1, 1, 1); Index: head/sys/arm/lpc/lpc_spi.c =================================================================== --- head/sys/arm/lpc/lpc_spi.c (revision 306901) +++ head/sys/arm/lpc/lpc_spi.c (revision 306902) @@ -1,198 +1,196 @@ /*- * Copyright (c) 2011 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include "spibus_if.h" struct lpc_spi_softc { device_t ls_dev; struct resource * ls_mem_res; struct resource * ls_irq_res; bus_space_tag_t ls_bst; bus_space_handle_t ls_bsh; }; static int lpc_spi_probe(device_t); static int lpc_spi_attach(device_t); static int lpc_spi_detach(device_t); static int lpc_spi_transfer(device_t, device_t, struct spi_command *); #define lpc_spi_read_4(_sc, _reg) \ bus_space_read_4(_sc->ls_bst, _sc->ls_bsh, _reg) #define lpc_spi_write_4(_sc, _reg, _val) \ bus_space_write_4(_sc->ls_bst, _sc->ls_bsh, _reg, _val) static int lpc_spi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "lpc,spi")) return (ENXIO); device_set_desc(dev, "LPC32x0 PL022 SPI/SSP controller"); return (BUS_PROBE_DEFAULT); } static int lpc_spi_attach(device_t dev) { struct lpc_spi_softc *sc = device_get_softc(dev); int rid; sc->ls_dev = dev; rid = 0; sc->ls_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->ls_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->ls_bst = rman_get_bustag(sc->ls_mem_res); sc->ls_bsh = rman_get_bushandle(sc->ls_mem_res); rid = 0; sc->ls_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->ls_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } bus_space_write_4(sc->ls_bst, 0xd0028100, 0, (1<<12)|(1<<10)|(1<<9)|(1<<8)|(1<<6)|(1<<5)); lpc_pwr_write(dev, LPC_CLKPWR_SSP_CTRL, LPC_CLKPWR_SSP_CTRL_SSP0EN); lpc_spi_write_4(sc, LPC_SSP_CR0, LPC_SSP_CR0_DSS(8)); lpc_spi_write_4(sc, LPC_SSP_CR1, LPC_SSP_CR1_SSE); lpc_spi_write_4(sc, LPC_SSP_CPSR, 128); device_add_child(dev, "spibus", 0); return (bus_generic_attach(dev)); } static int lpc_spi_detach(device_t dev) { return (EBUSY); } static int lpc_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct lpc_spi_softc *sc = device_get_softc(dev); struct spibus_ivar *devi = SPIBUS_IVAR(child); uint8_t *in_buf, *out_buf; int i; /* Set CS active */ lpc_gpio_set_state(child, devi->cs, 0); /* Wait for FIFO to be ready */ while ((lpc_spi_read_4(sc, LPC_SSP_SR) & LPC_SSP_SR_TNF) == 0); /* Command */ in_buf = cmd->rx_cmd; out_buf = cmd->tx_cmd; for (i = 0; i < cmd->tx_cmd_sz; i++) { lpc_spi_write_4(sc, LPC_SSP_DR, out_buf[i]); in_buf[i] = lpc_spi_read_4(sc, LPC_SSP_DR); } /* Data */ in_buf = cmd->rx_data; out_buf = cmd->tx_data; for (i = 0; i < cmd->tx_data_sz; i++) { lpc_spi_write_4(sc, LPC_SSP_DR, out_buf[i]); in_buf[i] = lpc_spi_read_4(sc, LPC_SSP_DR); } /* Set CS inactive */ lpc_gpio_set_state(child, devi->cs, 1); return (0); } static device_method_t lpc_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lpc_spi_probe), DEVMETHOD(device_attach, lpc_spi_attach), DEVMETHOD(device_detach, lpc_spi_detach), /* SPI interface */ DEVMETHOD(spibus_transfer, lpc_spi_transfer), { 0, 0 } }; static devclass_t lpc_spi_devclass; static driver_t lpc_spi_driver = { "spi", lpc_spi_methods, sizeof(struct lpc_spi_softc), }; DRIVER_MODULE(lpcspi, simplebus, lpc_spi_driver, lpc_spi_devclass, 0, 0); Index: head/sys/arm/mv/mpic.c =================================================================== --- head/sys/arm/mv/mpic.c (revision 306901) +++ head/sys/arm/mv/mpic.c (revision 306902) @@ -1,641 +1,640 @@ /*- * Copyright (c) 2006 Benno Rice. * Copyright (C) 2007-2011 MARVELL INTERNATIONAL LTD. * Copyright (c) 2012 Semihalf. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_icu.c, rev 1 * from: FreeBSD: src/sys/arm/mv/ic.c,v 1.5 2011/02/08 01:49:30 */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #ifdef INTRNG #include "pic_if.h" #endif #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define MPIC_INT_ERR 4 #define MPIC_INT_MSI 96 #define MPIC_IRQ_MASK 0x3ff #define MPIC_CTRL 0x0 #define MPIC_SOFT_INT 0x4 #define MPIC_SOFT_INT_DRBL1 (1 << 5) #define MPIC_ERR_CAUSE 0x20 #define MPIC_ISE 0x30 #define MPIC_ICE 0x34 #define MPIC_INT_CTL(irq) (0x100 + (irq)*4) #define MPIC_INT_IRQ_FIQ_MASK(cpuid) (0x101 << (cpuid)) #define MPIC_CTRL_NIRQS(ctrl) (((ctrl) >> 2) & 0x3ff) #define MPIC_IN_DRBL 0x08 #define MPIC_IN_DRBL_MASK 0x0c #define MPIC_PPI_CAUSE 0x10 #define MPIC_CTP 0x40 #define MPIC_IIACK 0x44 #define MPIC_ISM 0x48 #define MPIC_ICM 0x4c #define MPIC_ERR_MASK 0xe50 #define MPIC_PPI 32 #ifdef INTRNG struct mv_mpic_irqsrc { struct intr_irqsrc mmi_isrc; u_int mmi_irq; }; #endif struct mv_mpic_softc { device_t sc_dev; struct resource * mpic_res[4]; bus_space_tag_t mpic_bst; bus_space_handle_t mpic_bsh; bus_space_tag_t cpu_bst; bus_space_handle_t cpu_bsh; bus_space_tag_t drbl_bst; bus_space_handle_t drbl_bsh; struct mtx mtx; #ifdef INTRNG struct mv_mpic_irqsrc * mpic_isrcs; #endif int nirqs; void * intr_hand; }; static struct resource_spec mv_mpic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; static struct ofw_compat_data compat_data[] = { {"mrvl,mpic", true}, {"marvell,mpic", true}, {NULL, false} }; static struct mv_mpic_softc *mv_mpic_sc = NULL; void mpic_send_ipi(int cpus, u_int ipi); static int mv_mpic_probe(device_t); static int mv_mpic_attach(device_t); uint32_t mv_mpic_get_cause(void); uint32_t mv_mpic_get_cause_err(void); uint32_t mv_mpic_get_msi(void); static void mpic_unmask_irq(uintptr_t nb); static void mpic_mask_irq(uintptr_t nb); static void mpic_mask_irq_err(uintptr_t nb); static void mpic_unmask_irq_err(uintptr_t nb); static int mpic_intr(void *arg); static void mpic_unmask_msi(void); #ifndef INTRNG static void arm_mask_irq_err(uintptr_t); static void arm_unmask_irq_err(uintptr_t); #endif #define MPIC_WRITE(softc, reg, val) \ bus_space_write_4((softc)->mpic_bst, (softc)->mpic_bsh, (reg), (val)) #define MPIC_READ(softc, reg) \ bus_space_read_4((softc)->mpic_bst, (softc)->mpic_bsh, (reg)) #define MPIC_CPU_WRITE(softc, reg, val) \ bus_space_write_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg), (val)) #define MPIC_CPU_READ(softc, reg) \ bus_space_read_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg)) #define MPIC_DRBL_WRITE(softc, reg, val) \ bus_space_write_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg), (val)) #define MPIC_DRBL_READ(softc, reg) \ bus_space_read_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg)) static int mv_mpic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Marvell Integrated Interrupt Controller"); return (0); } #ifdef INTRNG static int mv_mpic_register_isrcs(struct mv_mpic_softc *sc) { int error; uint32_t irq; struct intr_irqsrc *isrc; const char *name; sc->mpic_isrcs = malloc(sc->nirqs * sizeof (*sc->mpic_isrcs), M_DEVBUF, M_WAITOK | M_ZERO); name = device_get_nameunit(sc->sc_dev); for (irq = 0; irq < sc->nirqs; irq++) { sc->mpic_isrcs[irq].mmi_irq = irq; isrc = &sc->mpic_isrcs[irq].mmi_isrc; if (irq < MPIC_PPI) { error = intr_isrc_register(isrc, sc->sc_dev, INTR_ISRCF_PPI, "%s", name); } else { error = intr_isrc_register(isrc, sc->sc_dev, 0, "%s", name); } if (error != 0) { /* XXX call intr_isrc_deregister() */ device_printf(sc->sc_dev, "%s failed", __func__); return (error); } } return (0); } #endif static int mv_mpic_attach(device_t dev) { struct mv_mpic_softc *sc; int error; uint32_t val; sc = (struct mv_mpic_softc *)device_get_softc(dev); if (mv_mpic_sc != NULL) return (ENXIO); mv_mpic_sc = sc; sc->sc_dev = dev; mtx_init(&sc->mtx, "MPIC lock", NULL, MTX_SPIN); error = bus_alloc_resources(dev, mv_mpic_spec, sc->mpic_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } #ifdef INTRNG if (sc->mpic_res[3] == NULL) device_printf(dev, "No interrupt to use.\n"); else bus_setup_intr(dev, sc->mpic_res[3], INTR_TYPE_CLK, mpic_intr, NULL, sc, &sc->intr_hand); #endif sc->mpic_bst = rman_get_bustag(sc->mpic_res[0]); sc->mpic_bsh = rman_get_bushandle(sc->mpic_res[0]); sc->cpu_bst = rman_get_bustag(sc->mpic_res[1]); sc->cpu_bsh = rman_get_bushandle(sc->mpic_res[1]); if (sc->mpic_res[2] != NULL) { /* This is required only if MSIs are used. */ sc->drbl_bst = rman_get_bustag(sc->mpic_res[2]); sc->drbl_bsh = rman_get_bushandle(sc->mpic_res[2]); } bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_CTRL, 1); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CTP, 0); val = MPIC_READ(mv_mpic_sc, MPIC_CTRL); sc->nirqs = MPIC_CTRL_NIRQS(val); #ifdef INTRNG if (mv_mpic_register_isrcs(sc) != 0) { device_printf(dev, "could not register PIC ISRCs\n"); bus_release_resources(dev, mv_mpic_spec, sc->mpic_res); return (ENXIO); } if (intr_pic_register(dev, OF_xref_from_device(dev)) == NULL) { device_printf(dev, "could not register PIC\n"); bus_release_resources(dev, mv_mpic_spec, sc->mpic_res); return (ENXIO); } #endif mpic_unmask_msi(); return (0); } #ifdef INTRNG static int mpic_intr(void *arg) { struct mv_mpic_softc *sc; uint32_t cause, irqsrc; unsigned int irq; u_int cpuid; sc = arg; cpuid = PCPU_GET(cpuid); irq = 0; for (cause = MPIC_CPU_READ(sc, MPIC_PPI_CAUSE); cause > 0; cause >>= 1, irq++) { if (cause & 1) { irqsrc = MPIC_READ(sc, MPIC_INT_CTL(irq)); if ((irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid)) == 0) continue; if (intr_isrc_dispatch(&sc->mpic_isrcs[irq].mmi_isrc, curthread->td_intr_frame) != 0) { mpic_mask_irq(irq); device_printf(sc->sc_dev, "Stray irq %u " "disabled\n", irq); } } } return (FILTER_HANDLED); } static void mpic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct mv_mpic_irqsrc *)isrc)->mmi_irq; mpic_mask_irq(irq); } static void mpic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct mv_mpic_irqsrc *)isrc)->mmi_irq; mpic_unmask_irq(irq); } static int mpic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct intr_map_data_fdt *daf; struct mv_mpic_softc *sc; if (data->type != INTR_MAP_DATA_FDT) return (ENOTSUP); sc = device_get_softc(dev); daf = (struct intr_map_data_fdt *)data; if (daf->ncells !=1 || daf->cells[0] >= sc->nirqs) return (EINVAL); *isrcp = &sc->mpic_isrcs[daf->cells[0]].mmi_isrc; return (0); } static void mpic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { mpic_disable_intr(dev, isrc); } static void mpic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { mpic_enable_intr(dev, isrc); } static void mpic_post_filter(device_t dev, struct intr_irqsrc *isrc) { } #endif static device_method_t mv_mpic_methods[] = { DEVMETHOD(device_probe, mv_mpic_probe), DEVMETHOD(device_attach, mv_mpic_attach), #ifdef INTRNG DEVMETHOD(pic_disable_intr, mpic_disable_intr), DEVMETHOD(pic_enable_intr, mpic_enable_intr), DEVMETHOD(pic_map_intr, mpic_map_intr), DEVMETHOD(pic_post_filter, mpic_post_filter), DEVMETHOD(pic_post_ithread, mpic_post_ithread), DEVMETHOD(pic_pre_ithread, mpic_pre_ithread), #endif { 0, 0 } }; static driver_t mv_mpic_driver = { "mpic", mv_mpic_methods, sizeof(struct mv_mpic_softc), }; static devclass_t mv_mpic_devclass; EARLY_DRIVER_MODULE(mpic, simplebus, mv_mpic_driver, mv_mpic_devclass, 0, 0, BUS_PASS_INTERRUPT); #ifndef INTRNG int arm_get_next_irq(int last) { u_int irq, next = -1; irq = mv_mpic_get_cause() & MPIC_IRQ_MASK; CTR2(KTR_INTR, "%s: irq:%#x", __func__, irq); if (irq != MPIC_IRQ_MASK) { if (irq == MPIC_INT_ERR) irq = mv_mpic_get_cause_err(); if (irq == MPIC_INT_MSI) irq = mv_mpic_get_msi(); next = irq; } CTR3(KTR_INTR, "%s: last=%d, next=%d", __func__, last, next); return (next); } /* * XXX We can make arm_enable_irq to operate on ICE and then mask/unmask only * by ISM/ICM and remove access to ICE in masking operation */ void arm_mask_irq(uintptr_t nb) { mpic_mask_irq(nb); } static void arm_mask_irq_err(uintptr_t nb) { mpic_mask_irq_err(nb); } void arm_unmask_irq(uintptr_t nb) { mpic_unmask_irq(nb); } void arm_unmask_irq_err(uintptr_t nb) { mpic_unmask_irq_err(nb); } #endif static void mpic_unmask_msi(void) { mpic_unmask_irq(MPIC_INT_MSI); } static void mpic_unmask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ISE, MPIC_INT_ERR); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, MPIC_INT_ERR); bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask |= (1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } static void mpic_mask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask &= ~(1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } static void mpic_unmask_irq(uintptr_t nb) { if (nb < ERR_IRQ) { bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ISE, nb); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, nb); } else if (nb < MSI_IRQ) mpic_unmask_irq_err(nb); if (nb == 0) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL_MASK, 0xffffffff); } static void mpic_mask_irq(uintptr_t nb) { if (nb < ERR_IRQ) { bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ICE, nb); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ISM, nb); } else if (nb < MSI_IRQ) mpic_mask_irq_err(nb); } uint32_t mv_mpic_get_cause(void) { return (MPIC_CPU_READ(mv_mpic_sc, MPIC_IIACK)); } uint32_t mv_mpic_get_cause_err(void) { uint32_t err_cause; uint8_t bit_off; err_cause = bus_space_read_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ERR_CAUSE); if (err_cause) bit_off = ffs(err_cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, err_cause); return (ERR_IRQ + bit_off); } uint32_t mv_mpic_get_msi(void) { uint32_t cause; uint8_t bit_off; KASSERT(mv_mpic_sc->drbl_bst != NULL, ("No doorbell in mv_mpic_get_msi")); cause = MPIC_DRBL_READ(mv_mpic_sc, 0); if (cause) bit_off = ffs(cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, cause); cause &= ~(1 << bit_off); MPIC_DRBL_WRITE(mv_mpic_sc, 0, cause); return (MSI_IRQ + bit_off); } int mv_msi_data(int irq, uint64_t *addr, uint32_t *data) { u_long phys, base, size; phandle_t node; int error; node = ofw_bus_get_node(mv_mpic_sc->sc_dev); /* Get physical address of register space */ error = fdt_get_range(OF_parent(node), 0, &phys, &size); if (error) { printf("%s: Cannot get register physical address, err:%d", __func__, error); return (error); } /* Get offset of MPIC register space */ error = fdt_regsize(node, &base, &size); if (error) { printf("%s: Cannot get MPIC register offset, err:%d", __func__, error); return (error); } *addr = phys + base + MPIC_SOFT_INT; *data = MPIC_SOFT_INT_DRBL1 | irq; return (0); } #if defined(SMP) && defined(SOC_MV_ARMADAXP) void intr_pic_init_secondary(void) { } void pic_ipi_send(cpuset_t cpus, u_int ipi) { uint32_t val, i; val = 0x00000000; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= (1 << (8 + i)); val |= ipi; bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_SOFT_INT, val); } int pic_ipi_read(int i __unused) { uint32_t val; int ipi; val = MPIC_CPU_READ(mv_mpic_sc, MPIC_IN_DRBL); if (val) { ipi = ffs(val) - 1; MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL, ~(1 << ipi)); return (ipi); } return (0x3ff); } void pic_ipi_clear(int ipi) { } #endif Index: head/sys/arm/rockchip/rk30xx_gpio.c =================================================================== --- head/sys/arm/rockchip/rk30xx_gpio.c (revision 306901) +++ head/sys/arm/rockchip/rk30xx_gpio.c (revision 306902) @@ -1,634 +1,632 @@ /*- * Copyright (c) 2013 Ganbold Tsagaankhuu * Copyright (c) 2012 Oleksandr Tymoshenko * Copyright (c) 2012 Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include "gpio_if.h" #include "rk30xx_grf.h" #include "rk30xx_pmu.h" /* * RK3188 has 4 banks of gpio. * 32 pins per bank * PA0 - PA7 | PB0 - PB7 * PC0 - PC7 | PD0 - PD7 */ #define RK30_GPIO_PINS 32 #define RK30_GPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN) #define RK30_GPIO_NONE 0 #define RK30_GPIO_PULLUP 1 #define RK30_GPIO_PULLDOWN 2 struct rk30_gpio_softc { device_t sc_dev; device_t sc_busdev; struct mtx sc_mtx; struct resource * sc_mem_res; struct resource * sc_irq_res; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; void * sc_intrhand; int sc_bank; int sc_gpio_npins; struct gpio_pin sc_gpio_pins[RK30_GPIO_PINS]; }; /* We use our base address to find out our bank number. */ static unsigned long rk30_gpio_base_addr[4] = { 0x2000a000, 0x2003c000, 0x2003e000, 0x20080000 }; static struct rk30_gpio_softc *rk30_gpio_sc = NULL; typedef int (*gpios_phandler_t)(phandle_t, pcell_t *, int); struct gpio_ctrl_entry { const char *compat; gpios_phandler_t handler; }; int rk30_gpios_prop_handle(phandle_t ctrl, pcell_t *gpios, int len); static int rk30_gpio_init(void); struct gpio_ctrl_entry gpio_controllers[] = { { "rockchip,rk30xx-gpio", &rk30_gpios_prop_handle }, { "rockchip,rk30xx-gpio", &rk30_gpios_prop_handle }, { "rockchip,rk30xx-gpio", &rk30_gpios_prop_handle }, { "rockchip,rk30xx-gpio", &rk30_gpios_prop_handle }, { NULL, NULL } }; #define RK30_GPIO_LOCK(_sc) mtx_lock(&_sc->sc_mtx) #define RK30_GPIO_UNLOCK(_sc) mtx_unlock(&_sc->sc_mtx) #define RK30_GPIO_LOCK_ASSERT(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED) #define RK30_GPIO_SWPORT_DR 0x00 #define RK30_GPIO_SWPORT_DDR 0x04 #define RK30_GPIO_INTEN 0x30 #define RK30_GPIO_INTMASK 0x34 #define RK30_GPIO_INTTYPE_LEVEL 0x38 #define RK30_GPIO_INT_POLARITY 0x3c #define RK30_GPIO_INT_STATUS 0x40 #define RK30_GPIO_INT_RAWSTATUS 0x44 #define RK30_GPIO_DEBOUNCE 0x48 #define RK30_GPIO_PORT_EOI 0x4c #define RK30_GPIO_EXT_PORT 0x50 #define RK30_GPIO_LS_SYNC 0x60 #define RK30_GPIO_WRITE(_sc, _off, _val) \ bus_space_write_4(_sc->sc_bst, _sc->sc_bsh, _off, _val) #define RK30_GPIO_READ(_sc, _off) \ bus_space_read_4(_sc->sc_bst, _sc->sc_bsh, _off) static uint32_t rk30_gpio_get_function(struct rk30_gpio_softc *sc, uint32_t pin) { if (RK30_GPIO_READ(sc, RK30_GPIO_SWPORT_DDR) & (1U << pin)) return (GPIO_PIN_OUTPUT); else return (GPIO_PIN_INPUT); } static void rk30_gpio_set_function(struct rk30_gpio_softc *sc, uint32_t pin, uint32_t func) { uint32_t data; /* Must be called with lock held. */ RK30_GPIO_LOCK_ASSERT(sc); data = RK30_GPIO_READ(sc, RK30_GPIO_SWPORT_DDR); if (func == GPIO_PIN_OUTPUT) data |= (1U << pin); else data &= ~(1U << pin); RK30_GPIO_WRITE(sc, RK30_GPIO_SWPORT_DDR, data); } static void rk30_gpio_set_pud(struct rk30_gpio_softc *sc, uint32_t pin, uint32_t state) { uint32_t pud; /* Must be called with lock held. */ RK30_GPIO_LOCK_ASSERT(sc); switch (state) { case GPIO_PIN_PULLUP: pud = RK30_GPIO_PULLUP; break; case GPIO_PIN_PULLDOWN: pud = RK30_GPIO_PULLDOWN; break; default: pud = RK30_GPIO_NONE; } /* * The pull up/down registers for GPIO0A and half of GPIO0B * (the first 12 pins on bank 0) are at a different location. */ if (sc->sc_bank == 0 && pin < 12) rk30_pmu_gpio_pud(pin, pud); else rk30_grf_gpio_pud(sc->sc_bank, pin, pud); } static void rk30_gpio_pin_configure(struct rk30_gpio_softc *sc, struct gpio_pin *pin, unsigned int flags) { RK30_GPIO_LOCK(sc); /* * Manage input/output. */ if (flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) { pin->gp_flags &= ~(GPIO_PIN_INPUT | GPIO_PIN_OUTPUT); if (flags & GPIO_PIN_OUTPUT) pin->gp_flags |= GPIO_PIN_OUTPUT; else pin->gp_flags |= GPIO_PIN_INPUT; rk30_gpio_set_function(sc, pin->gp_pin, pin->gp_flags); } /* Manage Pull-up/pull-down. */ pin->gp_flags &= ~(GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN); if (flags & (GPIO_PIN_PULLUP | GPIO_PIN_PULLDOWN)) { if (flags & GPIO_PIN_PULLUP) pin->gp_flags |= GPIO_PIN_PULLUP; else pin->gp_flags |= GPIO_PIN_PULLDOWN; } rk30_gpio_set_pud(sc, pin->gp_pin, pin->gp_flags); RK30_GPIO_UNLOCK(sc); } static device_t rk30_gpio_get_bus(device_t dev) { struct rk30_gpio_softc *sc; sc = device_get_softc(dev); return (sc->sc_busdev); } static int rk30_gpio_pin_max(device_t dev, int *maxpin) { *maxpin = RK30_GPIO_PINS - 1; return (0); } static int rk30_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); *caps = sc->sc_gpio_pins[i].gp_caps; RK30_GPIO_UNLOCK(sc); return (0); } static int rk30_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); *flags = sc->sc_gpio_pins[i].gp_flags; RK30_GPIO_UNLOCK(sc); return (0); } static int rk30_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); memcpy(name, sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME); RK30_GPIO_UNLOCK(sc); return (0); } static int rk30_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); rk30_gpio_pin_configure(sc, &sc->sc_gpio_pins[i], flags); return (0); } static int rk30_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { int i; struct rk30_gpio_softc *sc; uint32_t data; sc = device_get_softc(dev); for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); data = RK30_GPIO_READ(sc, RK30_GPIO_SWPORT_DR); if (value) data |= (1U << pin); else data &= ~(1U << pin); RK30_GPIO_WRITE(sc, RK30_GPIO_SWPORT_DR, data); RK30_GPIO_UNLOCK(sc); return (0); } static int rk30_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { int i; struct rk30_gpio_softc *sc; uint32_t data; sc = device_get_softc(dev); for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); data = RK30_GPIO_READ(sc, RK30_GPIO_EXT_PORT); RK30_GPIO_UNLOCK(sc); *val = (data & (1U << pin)) ? 1 : 0; return (0); } static int rk30_gpio_pin_toggle(device_t dev, uint32_t pin) { int i; struct rk30_gpio_softc *sc; uint32_t data; sc = device_get_softc(dev); for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); RK30_GPIO_LOCK(sc); data = RK30_GPIO_READ(sc, RK30_GPIO_SWPORT_DR); if (data & (1U << pin)) data &= ~(1U << pin); else data |= (1U << pin); RK30_GPIO_WRITE(sc, RK30_GPIO_SWPORT_DR, data); RK30_GPIO_UNLOCK(sc); return (0); } static int rk30_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "rockchip,rk30xx-gpio")) return (ENXIO); device_set_desc(dev, "Rockchip RK30XX GPIO controller"); return (BUS_PROBE_DEFAULT); } static int rk30_gpio_attach(device_t dev) { struct rk30_gpio_softc *sc = device_get_softc(dev); int i, rid; phandle_t gpio; unsigned long start; if (rk30_gpio_sc) return (ENXIO); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "rk30 gpio", "gpio", MTX_DEF); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); /* Check the unit we are attaching by our base address. */ sc->sc_bank = -1; start = rman_get_start(sc->sc_mem_res); for (i = 0; i < nitems(rk30_gpio_base_addr); i++) { if (rk30_gpio_base_addr[i] == start) { sc->sc_bank = i; break; } } if (sc->sc_bank == -1) { device_printf(dev, "unsupported device unit (only GPIO0..3 are supported)\n"); goto fail; } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Initialize the software controlled pins. */ for (i = 0; i < RK30_GPIO_PINS; i++) { snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME, "pin %d", i); sc->sc_gpio_pins[i].gp_pin = i; sc->sc_gpio_pins[i].gp_caps = RK30_GPIO_DEFAULT_CAPS; sc->sc_gpio_pins[i].gp_flags = rk30_gpio_get_function(sc, i); } sc->sc_gpio_npins = i; rk30_gpio_sc = sc; rk30_gpio_init(); sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); } static int rk30_gpio_detach(device_t dev) { return (EBUSY); } static device_method_t rk30_gpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk30_gpio_probe), DEVMETHOD(device_attach, rk30_gpio_attach), DEVMETHOD(device_detach, rk30_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, rk30_gpio_get_bus), DEVMETHOD(gpio_pin_max, rk30_gpio_pin_max), DEVMETHOD(gpio_pin_getname, rk30_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, rk30_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, rk30_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, rk30_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, rk30_gpio_pin_get), DEVMETHOD(gpio_pin_set, rk30_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, rk30_gpio_pin_toggle), DEVMETHOD_END }; static devclass_t rk30_gpio_devclass; static driver_t rk30_gpio_driver = { "gpio", rk30_gpio_methods, sizeof(struct rk30_gpio_softc), }; DRIVER_MODULE(rk30_gpio, simplebus, rk30_gpio_driver, rk30_gpio_devclass, 0, 0); int rk30_gpios_prop_handle(phandle_t ctrl, pcell_t *gpios, int len) { struct rk30_gpio_softc *sc; pcell_t gpio_cells; int inc, t, tuples, tuple_size; int dir, flags, pin, i; u_long gpio_ctrl, size; sc = rk30_gpio_sc; if (sc == NULL) return ENXIO; if (OF_getprop(ctrl, "#gpio-cells", &gpio_cells, sizeof(pcell_t)) < 0) return (ENXIO); gpio_cells = fdt32_to_cpu(gpio_cells); if (gpio_cells != 2) return (ENXIO); tuple_size = gpio_cells * sizeof(pcell_t) + sizeof(phandle_t); tuples = len / tuple_size; if (fdt_regsize(ctrl, &gpio_ctrl, &size)) return (ENXIO); /* * Skip controller reference, since controller's phandle is given * explicitly (in a function argument). */ inc = sizeof(ihandle_t) / sizeof(pcell_t); gpios += inc; for (t = 0; t < tuples; t++) { pin = fdt32_to_cpu(gpios[0]); dir = fdt32_to_cpu(gpios[1]); flags = fdt32_to_cpu(gpios[2]); for (i = 0; i < sc->sc_gpio_npins; i++) { if (sc->sc_gpio_pins[i].gp_pin == pin) break; } if (i >= sc->sc_gpio_npins) return (EINVAL); rk30_gpio_pin_configure(sc, &sc->sc_gpio_pins[i], flags); if (dir == 1) { /* Input. */ rk30_gpio_pin_set(sc->sc_dev, pin, GPIO_PIN_INPUT); } else { /* Output. */ rk30_gpio_pin_set(sc->sc_dev, pin, GPIO_PIN_OUTPUT); } gpios += gpio_cells + inc; } return (0); } #define MAX_PINS_PER_NODE 5 #define GPIOS_PROP_CELLS 4 static int rk30_gpio_init(void) { phandle_t child, parent, root, ctrl; pcell_t gpios[MAX_PINS_PER_NODE * GPIOS_PROP_CELLS]; struct gpio_ctrl_entry *e; int len, rv; root = OF_finddevice("/"); len = 0; parent = root; /* Traverse through entire tree to find nodes with 'gpios' prop */ for (child = OF_child(parent); child != 0; child = OF_peer(child)) { /* Find a 'leaf'. Start the search from this node. */ while (OF_child(child)) { parent = child; child = OF_child(child); } if ((len = OF_getproplen(child, "gpios")) > 0) { if (len > sizeof(gpios)) return (ENXIO); /* Get 'gpios' property. */ OF_getprop(child, "gpios", &gpios, len); e = (struct gpio_ctrl_entry *)&gpio_controllers; /* Find and call a handler. */ for (; e->compat; e++) { /* * First cell of 'gpios' property should * contain a ref. to a node defining GPIO * controller. */ ctrl = OF_node_from_xref(fdt32_to_cpu(gpios[0])); if (fdt_is_compatible(ctrl, e->compat)) /* Call a handler. */ if ((rv = e->handler(ctrl, (pcell_t *)&gpios, len))) return (rv); } } if (OF_peer(child) == 0) { /* No more siblings. */ child = parent; parent = OF_parent(child); } } return (0); } Index: head/sys/arm/rockchip/rk30xx_wdog.c =================================================================== --- head/sys/arm/rockchip/rk30xx_wdog.c (revision 306901) +++ head/sys/arm/rockchip/rk30xx_wdog.c (revision 306902) @@ -1,201 +1,200 @@ /*- * Copyright (c) 2013 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #ifndef RK30_WDT_BASE #define RK30_WDT_BASE 0x2004c000 #define RK30_WDT_PSIZE 0x100 #endif #define RK30_WDT_READ(_sc, _r) bus_read_4((_sc)->res, (_r)) #define RK30_WDT_WRITE(_sc, _r, _v) bus_write_4((_sc)->res, (_r), (_v)) #define WDOG_CTRL 0x00 #define WDOG_CTRL_EN (1 << 0) #define WDOG_CTRL_RSP_MODE (1 << 1) #define WDOG_CTRL_RST_PULSE (4 << 2) #define WDOG_CTRL_RST 0xa #define WDOG_TORR 0x04 #define WDOG_TORR_INTVL_SHIFT 0 #define WDOG_CCVR 0x08 #define WDOG_CRR 0x0c #define WDOG_CRR_PWD 0x76 #define WDOG_STAT 0x10 #define WDOG_EOI 0x14 static struct rk30_wd_softc *rk30_wd_sc = NULL; struct rk30_wd_softc { device_t dev; struct resource *res; struct mtx mtx; int freq; }; static void rk30_wd_watchdog_fn(void *private, u_int cmd, int *error); static int rk30_wd_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk30xx-wdt")) { device_set_desc(dev, "Rockchip RK30XX Watchdog"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk30_wd_attach(device_t dev) { struct rk30_wd_softc *sc; int rid; phandle_t node; pcell_t cell; if (rk30_wd_sc != NULL) return (ENXIO); sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) > 0) sc->freq = cell / 1000000; else return (ENXIO); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } rk30_wd_sc = sc; mtx_init(&sc->mtx, "RK30XX Watchdog", "rk30_wd", MTX_DEF); EVENTHANDLER_REGISTER(watchdog_list, rk30_wd_watchdog_fn, sc, 0); return (0); } static void rk30_wd_watchdog_fn(void *private, u_int cmd, int *error) { struct rk30_wd_softc *sc; uint64_t ms, m, max; int i; sc = private; mtx_lock(&sc->mtx); cmd &= WD_INTERVAL; if (cmd > 0) { ms = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000; m = 0xffff / sc->freq; max = 0x7fffffff / sc->freq + 1; i = 0; while (m < max && m < ms) { m <<= 1; i++; } if (m < max) { RK30_WDT_WRITE(sc, WDOG_TORR, i << WDOG_TORR_INTVL_SHIFT); RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_EN | WDOG_CTRL_RSP_MODE | WDOG_CTRL_RST_PULSE); RK30_WDT_WRITE(sc, WDOG_CRR, WDOG_CRR_PWD); *error = 0; } else { device_printf(sc->dev, "Can not be disabled\n"); mtx_unlock(&sc->mtx); RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_RST); return; } } else RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_RST); mtx_unlock(&sc->mtx); } void rk30_wd_watchdog_reset() { bus_space_handle_t bsh; bus_space_map(fdtbus_bs_tag, RK30_WDT_BASE, RK30_WDT_PSIZE, 0, &bsh); bus_space_write_4(fdtbus_bs_tag, bsh, WDOG_TORR, 0); bus_space_write_4(fdtbus_bs_tag, bsh, WDOG_CTRL, WDOG_CTRL_EN | WDOG_CTRL_RSP_MODE | WDOG_CTRL_RST_PULSE); while (1); } static device_method_t rk30_wd_methods[] = { DEVMETHOD(device_probe, rk30_wd_probe), DEVMETHOD(device_attach, rk30_wd_attach), DEVMETHOD_END }; static driver_t rk30_wd_driver = { "rk30_wd", rk30_wd_methods, sizeof(struct rk30_wd_softc), }; static devclass_t rk30_wd_devclass; DRIVER_MODULE(rk30_wd, simplebus, rk30_wd_driver, rk30_wd_devclass, 0, 0); Index: head/sys/arm/ti/am335x/am335x_gpio.c =================================================================== --- head/sys/arm/ti/am335x/am335x_gpio.c (revision 306901) +++ head/sys/arm/ti/am335x/am335x_gpio.c (revision 306902) @@ -1,159 +1,157 @@ /*- * Copyright (c) 2012 Damjan Marion * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #include "ti_gpio_if.h" static struct ofw_compat_data compat_data[] = { {"ti,am335x-gpio", 1}, /* Linux uses ti,omap4-gpio on am335x so we need to support it */ {"ti,omap4-gpio", 1}, {"ti,gpio", 1}, {NULL, 0}, }; static int am335x_gpio_probe(device_t dev) { if (ti_chip() != CHIP_AM335X) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI AM335x General Purpose I/O (GPIO)"); return (0); } static int am335x_gpio_set_flags(device_t dev, uint32_t gpio, uint32_t flags) { unsigned int state = 0; struct ti_gpio_softc *sc = device_get_softc(dev); if (flags & GPIO_PIN_OUTPUT) { if (flags & GPIO_PIN_PULLUP) state = PADCONF_OUTPUT_PULLUP; else state = PADCONF_OUTPUT; } else if (flags & GPIO_PIN_INPUT) { if (flags & GPIO_PIN_PULLUP) state = PADCONF_INPUT_PULLUP; else if (flags & GPIO_PIN_PULLDOWN) state = PADCONF_INPUT_PULLDOWN; else state = PADCONF_INPUT; } return ti_pinmux_padconf_set_gpiomode(sc->sc_bank*32 + gpio, state); } static int am335x_gpio_get_flags(device_t dev, uint32_t gpio, uint32_t *flags) { unsigned int state; struct ti_gpio_softc *sc = device_get_softc(dev); if (ti_pinmux_padconf_get_gpiomode(sc->sc_bank*32 + gpio, &state) != 0) { *flags = 0; return (EINVAL); } else { switch (state) { case PADCONF_OUTPUT: *flags = GPIO_PIN_OUTPUT; break; case PADCONF_OUTPUT_PULLUP: *flags = GPIO_PIN_OUTPUT | GPIO_PIN_PULLUP; break; case PADCONF_INPUT: *flags = GPIO_PIN_INPUT; break; case PADCONF_INPUT_PULLUP: *flags = GPIO_PIN_INPUT | GPIO_PIN_PULLUP; break; case PADCONF_INPUT_PULLDOWN: *flags = GPIO_PIN_INPUT | GPIO_PIN_PULLDOWN; break; default: *flags = 0; break; } } return (0); } static device_method_t am335x_gpio_methods[] = { /* bus interface */ DEVMETHOD(device_probe, am335x_gpio_probe), /* ti_gpio interface */ DEVMETHOD(ti_gpio_set_flags, am335x_gpio_set_flags), DEVMETHOD(ti_gpio_get_flags, am335x_gpio_get_flags), DEVMETHOD_END }; extern driver_t ti_gpio_driver; static devclass_t am335x_gpio_devclass; DEFINE_CLASS_1(gpio, am335x_gpio_driver, am335x_gpio_methods, sizeof(struct ti_gpio_softc), ti_gpio_driver); DRIVER_MODULE(am335x_gpio, simplebus, am335x_gpio_driver, am335x_gpio_devclass, 0, 0); Index: head/sys/arm/ti/am335x/am335x_lcd_syscons.c =================================================================== --- head/sys/arm/ti/am335x/am335x_lcd_syscons.c (revision 306901) +++ head/sys/arm/ti/am335x/am335x_lcd_syscons.c (revision 306902) @@ -1,792 +1,790 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include "am335x_lcd.h" struct video_adapter_softc { /* Videoadpater part */ video_adapter_t va; int console; intptr_t fb_addr; intptr_t fb_paddr; unsigned int fb_size; unsigned int height; unsigned int width; unsigned int depth; unsigned int stride; unsigned int xmargin; unsigned int ymargin; unsigned char *font; int initialized; }; struct argb { uint8_t a; uint8_t r; uint8_t g; uint8_t b; }; static struct argb am335x_syscons_palette[16] = { {0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x00, 0xaa, 0x00}, {0x00, 0x00, 0xaa, 0xaa}, {0x00, 0xaa, 0x00, 0x00}, {0x00, 0xaa, 0x00, 0xaa}, {0x00, 0xaa, 0x55, 0x00}, {0x00, 0xaa, 0xaa, 0xaa}, {0x00, 0x55, 0x55, 0x55}, {0x00, 0x55, 0x55, 0xff}, {0x00, 0x55, 0xff, 0x55}, {0x00, 0x55, 0xff, 0xff}, {0x00, 0xff, 0x55, 0x55}, {0x00, 0xff, 0x55, 0xff}, {0x00, 0xff, 0xff, 0x55}, {0x00, 0xff, 0xff, 0xff} }; /* mouse pointer from dev/syscons/scgfbrndr.c */ static u_char mouse_pointer[16] = { 0x00, 0x40, 0x60, 0x70, 0x78, 0x7c, 0x7e, 0x68, 0x0c, 0x0c, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00 }; #define AM335X_FONT_HEIGHT 16 #define FB_WIDTH 640 #define FB_HEIGHT 480 #define FB_DEPTH 24 static struct video_adapter_softc va_softc; static int am335x_syscons_configure(int flags); /* * Video driver routines and glue. */ static vi_probe_t am335x_syscons_probe; static vi_init_t am335x_syscons_init; static vi_get_info_t am335x_syscons_get_info; static vi_query_mode_t am335x_syscons_query_mode; static vi_set_mode_t am335x_syscons_set_mode; static vi_save_font_t am335x_syscons_save_font; static vi_load_font_t am335x_syscons_load_font; static vi_show_font_t am335x_syscons_show_font; static vi_save_palette_t am335x_syscons_save_palette; static vi_load_palette_t am335x_syscons_load_palette; static vi_set_border_t am335x_syscons_set_border; static vi_save_state_t am335x_syscons_save_state; static vi_load_state_t am335x_syscons_load_state; static vi_set_win_org_t am335x_syscons_set_win_org; static vi_read_hw_cursor_t am335x_syscons_read_hw_cursor; static vi_set_hw_cursor_t am335x_syscons_set_hw_cursor; static vi_set_hw_cursor_shape_t am335x_syscons_set_hw_cursor_shape; static vi_blank_display_t am335x_syscons_blank_display; static vi_mmap_t am335x_syscons_mmap; static vi_ioctl_t am335x_syscons_ioctl; static vi_clear_t am335x_syscons_clear; static vi_fill_rect_t am335x_syscons_fill_rect; static vi_bitblt_t am335x_syscons_bitblt; static vi_diag_t am335x_syscons_diag; static vi_save_cursor_palette_t am335x_syscons_save_cursor_palette; static vi_load_cursor_palette_t am335x_syscons_load_cursor_palette; static vi_copy_t am335x_syscons_copy; static vi_putp_t am335x_syscons_putp; static vi_putc_t am335x_syscons_putc; static vi_puts_t am335x_syscons_puts; static vi_putm_t am335x_syscons_putm; static video_switch_t am335x_sysconsvidsw = { .probe = am335x_syscons_probe, .init = am335x_syscons_init, .get_info = am335x_syscons_get_info, .query_mode = am335x_syscons_query_mode, .set_mode = am335x_syscons_set_mode, .save_font = am335x_syscons_save_font, .load_font = am335x_syscons_load_font, .show_font = am335x_syscons_show_font, .save_palette = am335x_syscons_save_palette, .load_palette = am335x_syscons_load_palette, .set_border = am335x_syscons_set_border, .save_state = am335x_syscons_save_state, .load_state = am335x_syscons_load_state, .set_win_org = am335x_syscons_set_win_org, .read_hw_cursor = am335x_syscons_read_hw_cursor, .set_hw_cursor = am335x_syscons_set_hw_cursor, .set_hw_cursor_shape = am335x_syscons_set_hw_cursor_shape, .blank_display = am335x_syscons_blank_display, .mmap = am335x_syscons_mmap, .ioctl = am335x_syscons_ioctl, .clear = am335x_syscons_clear, .fill_rect = am335x_syscons_fill_rect, .bitblt = am335x_syscons_bitblt, .diag = am335x_syscons_diag, .save_cursor_palette = am335x_syscons_save_cursor_palette, .load_cursor_palette = am335x_syscons_load_cursor_palette, .copy = am335x_syscons_copy, .putp = am335x_syscons_putp, .putc = am335x_syscons_putc, .puts = am335x_syscons_puts, .putm = am335x_syscons_putm, }; VIDEO_DRIVER(am335x_syscons, am335x_sysconsvidsw, am335x_syscons_configure); static vr_init_t am335x_rend_init; static vr_clear_t am335x_rend_clear; static vr_draw_border_t am335x_rend_draw_border; static vr_draw_t am335x_rend_draw; static vr_set_cursor_t am335x_rend_set_cursor; static vr_draw_cursor_t am335x_rend_draw_cursor; static vr_blink_cursor_t am335x_rend_blink_cursor; static vr_set_mouse_t am335x_rend_set_mouse; static vr_draw_mouse_t am335x_rend_draw_mouse; /* * We use our own renderer; this is because we must emulate a hardware * cursor. */ static sc_rndr_sw_t am335x_rend = { am335x_rend_init, am335x_rend_clear, am335x_rend_draw_border, am335x_rend_draw, am335x_rend_set_cursor, am335x_rend_draw_cursor, am335x_rend_blink_cursor, am335x_rend_set_mouse, am335x_rend_draw_mouse }; RENDERER(am335x_syscons, 0, am335x_rend, gfb_set); RENDERER_MODULE(am335x_syscons, gfb_set); static void am335x_rend_init(scr_stat* scp) { } static void am335x_rend_clear(scr_stat* scp, int c, int attr) { } static void am335x_rend_draw_border(scr_stat* scp, int color) { } static void am335x_rend_draw(scr_stat* scp, int from, int count, int flip) { video_adapter_t* adp = scp->sc->adp; int i, c, a; if (!flip) { /* Normal printing */ vidd_puts(adp, from, (uint16_t*)sc_vtb_pointer(&scp->vtb, from), count); } else { /* This is for selections and such: invert the color attribute */ for (i = count; i-- > 0; ++from) { c = sc_vtb_getc(&scp->vtb, from); a = sc_vtb_geta(&scp->vtb, from) >> 8; vidd_putc(adp, from, c, (a >> 4) | ((a & 0xf) << 4)); } } } static void am335x_rend_set_cursor(scr_stat* scp, int base, int height, int blink) { } static void am335x_rend_draw_cursor(scr_stat* scp, int off, int blink, int on, int flip) { video_adapter_t* adp = scp->sc->adp; struct video_adapter_softc *sc; int row, col; uint8_t *addr; int i, j, bytes; sc = (struct video_adapter_softc *)adp; if (scp->curs_attr.height <= 0) return; if (sc->fb_addr == 0) return; if (off >= adp->va_info.vi_width * adp->va_info.vi_height) return; /* calculate the coordinates in the video buffer */ row = (off / adp->va_info.vi_width) * adp->va_info.vi_cheight; col = (off % adp->va_info.vi_width) * adp->va_info.vi_cwidth; addr = (uint8_t *)sc->fb_addr + (row + sc->ymargin)*(sc->stride) + (sc->depth/8) * (col + sc->xmargin); bytes = sc->depth/8; /* our cursor consists of simply inverting the char under it */ for (i = 0; i < adp->va_info.vi_cheight; i++) { for (j = 0; j < adp->va_info.vi_cwidth; j++) { switch (sc->depth) { case 32: case 24: addr[bytes*j + 2] ^= 0xff; /* FALLTHROUGH */ case 16: addr[bytes*j + 1] ^= 0xff; addr[bytes*j] ^= 0xff; break; default: break; } } addr += sc->stride; } } static void am335x_rend_blink_cursor(scr_stat* scp, int at, int flip) { } static void am335x_rend_set_mouse(scr_stat* scp) { } static void am335x_rend_draw_mouse(scr_stat* scp, int x, int y, int on) { vidd_putm(scp->sc->adp, x, y, mouse_pointer, 0xffffffff, 16, 8); } static uint16_t am335x_syscons_static_window[ROW*COL]; extern u_char dflt_font_16[]; /* * Update videoadapter settings after changing resolution */ static void am335x_syscons_update_margins(video_adapter_t *adp) { struct video_adapter_softc *sc; video_info_t *vi; sc = (struct video_adapter_softc *)adp; vi = &adp->va_info; sc->xmargin = (sc->width - (vi->vi_width * vi->vi_cwidth)) / 2; sc->ymargin = (sc->height - (vi->vi_height * vi->vi_cheight))/2; } static phandle_t am335x_syscons_find_panel_node(phandle_t start) { phandle_t child; phandle_t result; for (child = OF_child(start); child != 0; child = OF_peer(child)) { if (fdt_is_compatible(child, "ti,am335x-lcd")) return (child); if ((result = am335x_syscons_find_panel_node(child))) return (result); } return (0); } static int am335x_syscons_configure(int flags) { struct video_adapter_softc *va_sc; va_sc = &va_softc; phandle_t display, root; pcell_t cell; if (va_sc->initialized) return (0); va_sc->width = 0; va_sc->height = 0; /* * It seems there is no way to let syscons framework know * that framebuffer resolution has changed. So just try * to fetch data from FDT and go with defaults if failed */ root = OF_finddevice("/"); if ((root != 0) && (display = am335x_syscons_find_panel_node(root))) { if ((OF_getprop(display, "panel_width", &cell, sizeof(cell))) > 0) va_sc->width = (int)fdt32_to_cpu(cell); if ((OF_getprop(display, "panel_height", &cell, sizeof(cell))) > 0) va_sc->height = (int)fdt32_to_cpu(cell); } if (va_sc->width == 0) va_sc->width = FB_WIDTH; if (va_sc->height == 0) va_sc->height = FB_HEIGHT; am335x_syscons_init(0, &va_sc->va, 0); va_sc->initialized = 1; return (0); } static int am335x_syscons_probe(int unit, video_adapter_t **adp, void *arg, int flags) { return (0); } static int am335x_syscons_init(int unit, video_adapter_t *adp, int flags) { struct video_adapter_softc *sc; video_info_t *vi; sc = (struct video_adapter_softc *)adp; vi = &adp->va_info; vid_init_struct(adp, "am335x_syscons", -1, unit); sc->font = dflt_font_16; vi->vi_cheight = AM335X_FONT_HEIGHT; vi->vi_cwidth = 8; vi->vi_width = sc->width/8; vi->vi_height = sc->height/vi->vi_cheight; /* * Clamp width/height to syscons maximums */ if (vi->vi_width > COL) vi->vi_width = COL; if (vi->vi_height > ROW) vi->vi_height = ROW; sc->xmargin = (sc->width - (vi->vi_width * vi->vi_cwidth)) / 2; sc->ymargin = (sc->height - (vi->vi_height * vi->vi_cheight))/2; adp->va_window = (vm_offset_t) am335x_syscons_static_window; adp->va_flags |= V_ADP_FONT /* | V_ADP_COLOR | V_ADP_MODECHANGE */; vid_register(&sc->va); return (0); } static int am335x_syscons_get_info(video_adapter_t *adp, int mode, video_info_t *info) { bcopy(&adp->va_info, info, sizeof(*info)); return (0); } static int am335x_syscons_query_mode(video_adapter_t *adp, video_info_t *info) { return (0); } static int am335x_syscons_set_mode(video_adapter_t *adp, int mode) { return (0); } static int am335x_syscons_save_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (0); } static int am335x_syscons_load_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { struct video_adapter_softc *sc = (struct video_adapter_softc *)adp; sc->font = data; return (0); } static int am335x_syscons_show_font(video_adapter_t *adp, int page) { return (0); } static int am335x_syscons_save_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int am335x_syscons_load_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int am335x_syscons_set_border(video_adapter_t *adp, int border) { return (am335x_syscons_blank_display(adp, border)); } static int am335x_syscons_save_state(video_adapter_t *adp, void *p, size_t size) { return (0); } static int am335x_syscons_load_state(video_adapter_t *adp, void *p) { return (0); } static int am335x_syscons_set_win_org(video_adapter_t *adp, off_t offset) { return (0); } static int am335x_syscons_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { *col = *row = 0; return (0); } static int am335x_syscons_set_hw_cursor(video_adapter_t *adp, int col, int row) { return (0); } static int am335x_syscons_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { return (0); } static int am335x_syscons_blank_display(video_adapter_t *adp, int mode) { struct video_adapter_softc *sc; sc = (struct video_adapter_softc *)adp; if (sc && sc->fb_addr) memset((void*)sc->fb_addr, 0, sc->fb_size); return (0); } static int am335x_syscons_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { struct video_adapter_softc *sc; sc = (struct video_adapter_softc *)adp; /* * This might be a legacy VGA mem request: if so, just point it at the * framebuffer, since it shouldn't be touched */ if (offset < sc->stride*sc->height) { *paddr = sc->fb_paddr + offset; return (0); } return (EINVAL); } static int am335x_syscons_ioctl(video_adapter_t *adp, u_long cmd, caddr_t data) { struct video_adapter_softc *sc; struct fbtype *fb; sc = (struct video_adapter_softc *)adp; switch (cmd) { case FBIOGTYPE: fb = (struct fbtype *)data; fb->fb_type = FBTYPE_PCIMISC; fb->fb_height = sc->height; fb->fb_width = sc->width; fb->fb_depth = sc->depth; if (sc->depth <= 1 || sc->depth > 8) fb->fb_cmsize = 0; else fb->fb_cmsize = 1 << sc->depth; fb->fb_size = sc->fb_size; break; default: return (fb_commonioctl(adp, cmd, data)); } return (0); } static int am335x_syscons_clear(video_adapter_t *adp) { return (am335x_syscons_blank_display(adp, 0)); } static int am335x_syscons_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { return (0); } static int am335x_syscons_bitblt(video_adapter_t *adp, ...) { return (0); } static int am335x_syscons_diag(video_adapter_t *adp, int level) { return (0); } static int am335x_syscons_save_cursor_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int am335x_syscons_load_cursor_palette(video_adapter_t *adp, u_char *palette) { return (0); } static int am335x_syscons_copy(video_adapter_t *adp, vm_offset_t src, vm_offset_t dst, int n) { return (0); } static int am335x_syscons_putp(video_adapter_t *adp, vm_offset_t off, uint32_t p, uint32_t a, int size, int bpp, int bit_ltor, int byte_ltor) { return (0); } static int am335x_syscons_putc(video_adapter_t *adp, vm_offset_t off, uint8_t c, uint8_t a) { struct video_adapter_softc *sc; int row; int col; int i, j, k; uint8_t *addr; u_char *p; uint8_t fg, bg, color; uint16_t rgb; sc = (struct video_adapter_softc *)adp; if (sc->fb_addr == 0) return (0); row = (off / adp->va_info.vi_width) * adp->va_info.vi_cheight; col = (off % adp->va_info.vi_width) * adp->va_info.vi_cwidth; p = sc->font + c*AM335X_FONT_HEIGHT; addr = (uint8_t *)sc->fb_addr + (row + sc->ymargin)*(sc->stride) + (sc->depth/8) * (col + sc->xmargin); fg = a & 0xf ; bg = (a >> 4) & 0xf; for (i = 0; i < AM335X_FONT_HEIGHT; i++) { for (j = 0, k = 7; j < 8; j++, k--) { if ((p[i] & (1 << k)) == 0) color = bg; else color = fg; switch (sc->depth) { case 32: addr[4*j+0] = am335x_syscons_palette[color].r; addr[4*j+1] = am335x_syscons_palette[color].g; addr[4*j+2] = am335x_syscons_palette[color].b; addr[4*j+3] = am335x_syscons_palette[color].a; break; case 24: addr[3*j] = am335x_syscons_palette[color].r; addr[3*j+1] = am335x_syscons_palette[color].g; addr[3*j+2] = am335x_syscons_palette[color].b; break; case 16: rgb = (am335x_syscons_palette[color].r >> 3) << 11; rgb |= (am335x_syscons_palette[color].g >> 2) << 5; rgb |= (am335x_syscons_palette[color].b >> 3); addr[2*j] = rgb & 0xff; addr[2*j + 1] = (rgb >> 8) & 0xff; default: /* Not supported yet */ break; } } addr += (sc->stride); } return (0); } static int am335x_syscons_puts(video_adapter_t *adp, vm_offset_t off, u_int16_t *s, int len) { int i; for (i = 0; i < len; i++) am335x_syscons_putc(adp, off + i, s[i] & 0xff, (s[i] & 0xff00) >> 8); return (0); } static int am335x_syscons_putm(video_adapter_t *adp, int x, int y, uint8_t *pixel_image, uint32_t pixel_mask, int size, int width) { return (0); } /* Initialization function */ int am335x_lcd_syscons_setup(vm_offset_t vaddr, vm_paddr_t paddr, struct panel_info *panel) { struct video_adapter_softc *va_sc = &va_softc; va_sc->fb_addr = vaddr; va_sc->fb_paddr = paddr; va_sc->depth = panel->bpp; va_sc->stride = panel->bpp*panel->panel_width/8; va_sc->width = panel->panel_width; va_sc->height = panel->panel_height; va_sc->fb_size = va_sc->width * va_sc->height * va_sc->depth/8; am335x_syscons_update_margins(&va_sc->va); return (0); } /* * Define a stub keyboard driver in case one hasn't been * compiled into the kernel */ #include #include static int dummy_kbd_configure(int flags); keyboard_switch_t am335x_dummysw; static int dummy_kbd_configure(int flags) { return (0); } KEYBOARD_DRIVER(am335x_dummy, am335x_dummysw, dummy_kbd_configure); Index: head/sys/arm/ti/am335x/am335x_scm_padconf.c =================================================================== --- head/sys/arm/ti/am335x/am335x_scm_padconf.c (revision 306901) +++ head/sys/arm/ti/am335x/am335x_scm_padconf.c (revision 306902) @@ -1,303 +1,301 @@ /*- * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #define _PIN(r, b, gp, gm, m0, m1, m2, m3, m4, m5, m6, m7) \ { .reg_off = r, \ .gpio_pin = gp, \ .gpio_mode = gm, \ .ballname = b, \ .muxmodes[0] = m0, \ .muxmodes[1] = m1, \ .muxmodes[2] = m2, \ .muxmodes[3] = m3, \ .muxmodes[4] = m4, \ .muxmodes[5] = m5, \ .muxmodes[6] = m6, \ .muxmodes[7] = m7, \ } const static struct ti_pinmux_padstate ti_padstate_devmap[] = { {"output", PADCONF_OUTPUT }, {"output_pullup", PADCONF_OUTPUT_PULLUP }, {"input", PADCONF_INPUT }, {"input_pulldown", PADCONF_INPUT_PULLDOWN }, {"input_pullup", PADCONF_INPUT_PULLUP }, {"i2c", PADCONF_INPUT_PULLUP_SLOW }, { .state = NULL } }; const static struct ti_pinmux_padconf ti_padconf_devmap[] = { _PIN(0x000, "GPMC_AD0", 32, 7,"gpmc_ad0", "mmc1_dat0", NULL, NULL, NULL, NULL, NULL, "gpio1_0"), _PIN(0x004, "GPMC_AD1", 33, 7,"gpmc_ad1", "mmc1_dat1", NULL, NULL, NULL, NULL, NULL, "gpio1_1"), _PIN(0x008, "GPMC_AD2", 34, 7,"gpmc_ad2", "mmc1_dat2", NULL, NULL, NULL, NULL, NULL, "gpio1_2"), _PIN(0x00C, "GPMC_AD3", 35, 7,"gpmc_ad3", "mmc1_dat3", NULL, NULL, NULL, NULL, NULL, "gpio1_3"), _PIN(0x010, "GPMC_AD4", 36, 7,"gpmc_ad4", "mmc1_dat4", NULL, NULL, NULL, NULL, NULL, "gpio1_4"), _PIN(0x014, "GPMC_AD5", 37, 7,"gpmc_ad5", "mmc1_dat5", NULL, NULL, NULL, NULL, NULL, "gpio1_5"), _PIN(0x018, "GPMC_AD6", 38, 7,"gpmc_ad6", "mmc1_dat6", NULL, NULL, NULL, NULL, NULL, "gpio1_6"), _PIN(0x01C, "GPMC_AD7", 39, 7,"gpmc_ad7", "mmc1_dat7", NULL, NULL, NULL, NULL, NULL, "gpio1_7"), _PIN(0x020, "GPMC_AD8", 22, 7, "gpmc_ad8", "lcd_data23", "mmc1_dat0", "mmc2_dat4", "ehrpwm2A", NULL, NULL, "gpio0_22"), _PIN(0x024, "GPMC_AD9", 23, 7, "gpmc_ad9", "lcd_data22", "mmc1_dat1", "mmc2_dat5", "ehrpwm2B", NULL, NULL, "gpio0_23"), _PIN(0x028, "GPMC_AD10", 26, 7, "gpmc_ad10", "lcd_data21", "mmc1_dat2", "mmc2_dat6", "ehrpwm2_tripzone_in", NULL, NULL, "gpio0_26"), _PIN(0x02C, "GPMC_AD11", 27, 7, "gpmc_ad11", "lcd_data20", "mmc1_dat3", "mmc2_dat7", "ehrpwm0_synco", NULL, NULL, "gpio0_27"), _PIN(0x030, "GPMC_AD12", 44, 7, "gpmc_ad12", "lcd_data19", "mmc1_dat4", "mmc2_dat0", "eQEP2A_in", "pr1_mii0_txd2", "pr1_pru0_pru_r30_14", "gpio1_12"), _PIN(0x034, "GPMC_AD13", 45, 7, "gpmc_ad13", "lcd_data18", "mmc1_dat5", "mmc2_dat1", "eQEP2B_in", "pr1_mii0_txd1", "pr1_pru0_pru_r30_15", "gpio1_13"), _PIN(0x038, "GPMC_AD14", 46, 7, "gpmc_ad14", "lcd_data17", "mmc1_dat6", "mmc2_dat2", "eQEP2_index", "pr1_mii0_txd0", "pr1_pru0_pru_r31_14", "gpio1_14"), _PIN(0x03C, "GPMC_AD15", 47, 7, "gpmc_ad15", "lcd_data16", "mmc1_dat7", "mmc2_dat3", "eQEP2_strobe", "pr1_ecap0_ecap_capin_apwm_o", "pr1_pru0_pru_r31_15", "gpio1_15"), _PIN(0x040, "GPMC_A0", 48, 7, "gpmc_a0", "gmii2_txen", "rgmii2_tctl", "rmii2_txen", "gpmc_a16", "pr1_mii_mt1_clk", "ehrpwm1_tripzone_input", "gpio1_16"), _PIN(0x044, "GPMC_A1", 49, 7, "gpmc_a1", "gmii2_rxdv", "rgmii2_rctl", "mmc2_dat0", "gpmc_a17", "pr1_mii1_txd3", "ehrpwm0_synco", "gpio1_17"), _PIN(0x048, "GPMC_A2", 50, 7, "gpmc_a2", "gmii2_txd3", "rgmii2_td3", "mmc2_dat1", "gpmc_a18", "pr1_mii1_txd2", "ehrpwm1A", "gpio1_18"), _PIN(0x04C, "GPMC_A3", 51, 7, "gpmc_a3", "gmii2_txd2", "rgmii2_td2", "mmc2_dat2", "gpmc_a19", "pr1_mii1_txd1", "ehrpwm1B", "gpio1_19"), _PIN(0x050, "GPMC_A4", 52, 7, "gpmc_a4", "gmii2_txd1", "rgmii2_td1", "rmii2_tdx1", "gpmc_a20", "pr1_mii1_txd0", "eQEP1A_in", "gpio1_20"), _PIN(0x054, "GPMC_A5", 53, 7, "gpmc_a5", "gmii2_txd0", "rgmii2_td0", "rmii2_txd0", "gpmc_a21", "pr1_mii1_rxd3", "eQEP1B_in", "gpio1_21"), _PIN(0x058, "GPMC_A6", 54, 7, "gpmc_a6", "gmii2_txclk", "rgmii2_tclk", "mmc2_dat4", "gpmc_a22", "pr1_mii1_rxd2", "eQEP1_index", "gpio1_22"), _PIN(0x05C, "GPMC_A7", 55, 7, "gpmc_a7", "gmii2_rxclk", "rgmii2_rclk", "mmc2_dat5", "gpmc_a23", "pr1_mii1_rxd1", "eQEP1_strobe", "gpio1_23"), _PIN(0x060, "GPMC_A8", 56, 7, "gpmc_a8", "gmii2_rxd3", "rgmii2_rd3", "mmc2_dat6", "gpmc_a24", "pr1_mii1_rxd0", "mcasp0_aclkx", "gpio1_24"), _PIN(0x064, "GPMC_A9", 57, 7, "gmpc_a9", "gmii2_rxd2", "rgmii2_rd2", "mmc2_dat7 / rmii2_crs_dv", "gpmc_a25", "pr1_mii_mr1_clk", "mcasp0_fsx", "gpio1_25"), _PIN(0x068, "GPMC_A10", 58, 7, "gmpc_a10", "gmii2_rxd1", "rgmii2_rd1", "rmii2_rxd1", "gpmc_a26", "pr1_mii1_rxdv", "mcasp0_arx0", "gpio1_26"), _PIN(0x06C, "GPMC_A11", 59, 7, "gmpc_a11", "gmii2_rxd0", "rgmii2_rd0", "rmii2_rxd0", "gpmc_a27", "pr1_mii1_rxer", "mcasp0_axr1", "gpio1_27"), _PIN(0x070, "GPMC_WAIT0", 30, 7, "gpmc_wait0", "gmii2_crs", "gpmc_csn4", "rmii2_crs_dv", "mmc1_sdcd", "pr1_mii1_col", "uart4_rxd", "gpio0_30"), _PIN(0x074, "GPMC_WPn", 31, 7, "gpmc_wpn", "gmii2_rxerr", "gpmc_csn5", "rmii2_rxerr", "mmc2_sdcd", "pr1_mii1_txen", "uart4_txd", "gpio0_31"), _PIN(0x078, "GPMC_BEn1", 60, 7, "gpmc_be1n", "gmii2_col", "gmpc_csn6","mmc2_dat3", "gpmc_dir", "pr1_mii1_rxlink", "mcasp0_aclkr", "gpio1_28"), _PIN(0x07c, "GPMC_CSn0", 61, 7, "gpmc_csn0", NULL, NULL, NULL, NULL, NULL, NULL, "gpio1_29"), _PIN(0x080, "GPMC_CSn1", 62, 7, "gpmc_csn1", "gpmc_clk", "mmc1_clk", "pr1_edio_data_in6", "pr1_edio_data_out6", "pr1_pru1_pru_r30_12", "pr1_pru1_pru_r31_12", "gpio1_30"), _PIN(0x084, "GPMC_CSn2", 63, 7, "gpmc_csn2", "gpmc_be1n", "mmc1_cmd", "pr1_edio_data_in7", "pr1_edio_data_out7", "pr1_pru1_pru_r30_13", "pr1_pru1_pru_r31_13", "gpio1_31"), _PIN(0x088, "GPMC_CSn3", 64, 7, "gpmc_csn3", "gpmc_a3", "rmii2_crs_dv", "mmc2_cmd", "pr1_mii0_crs", "pr1_mdio_data", "EMU4", "gpio2_0"), _PIN(0x08c, "GPMC_CLK", 65, 7, "gpmc_clk", "lcd_memory_clk", "gpmc_wait1", "mmc2_clk", "pr1_mii1_crs", "pr1_mdio_mdclk", "mcasp0_fsr", "gpio2_1"), _PIN(0x090, "GPMC_ADVn_ALE", 66, 7, "gpmc_advn_ale", NULL, "timer4", NULL, NULL, NULL, NULL, "gpio2_2"), _PIN(0x094, "GPMC_OEn_REn", 67, 7, "gpmc_oen_ren", NULL, "timer7", NULL, NULL, NULL, NULL, "gpio2_3"), _PIN(0x098, "GPMC_WEn", 68, 7, "gpmc_wen", NULL, "timer6", NULL, NULL, NULL, NULL, "gpio2_4"), _PIN(0x09c, "GPMC_BEn0_CLE", 67, 7, "gpmc_ben0_cle", NULL, "timer5", NULL, NULL, NULL, NULL, "gpio2_5"), _PIN(0x0a0, "LCD_DATA0", 68, 7, "lcd_data0", "gpmc_a0", "pr1_mii_mt0_clk", "ehrpwm2A", NULL, "pr1_pru1_pru_r30_0", "pr1_pru1_pru_r31_0", "gpio2_6"), _PIN(0x0a4, "LCD_DATA1", 69, 7, "lcd_data1", "gpmc_a1", "pr1_mii0_txen", "ehrpwm2B", NULL, "pr1_pru1_pru_r30_1", "pr1_pru1_pru_r31_1", "gpio2_7"), _PIN(0x0a8, "LCD_DATA2", 70, 7, "lcd_data2", "gpmc_a2", "pr1_mii0_txd3", "ehrpwm2_tripzone_input", NULL, "pr1_pru1_pru_r30_2", "pr1_pru1_pru_r31_2", "gpio2_8"), _PIN(0x0ac, "LCD_DATA3", 71, 7, "lcd_data3", "gpmc_a3", "pr1_mii0_txd2", "ehrpwm0_synco", NULL, "pr1_pru1_pru_r30_3", "pr1_pru1_pru_r31_3", "gpio2_9"), _PIN(0x0b0, "LCD_DATA4", 72, 7, "lcd_data4", "gpmc_a4", "pr1_mii0_txd1", "eQEP2A_in", NULL, "pr1_pru1_pru_r30_4", "pr1_pru1_pru_r31_4", "gpio2_10"), _PIN(0x0b4, "LCD_DATA5", 73, 7, "lcd_data5", "gpmc_a5", "pr1_mii0_txd0", "eQEP2B_in", NULL, "pr1_pru1_pru_r30_5", "pr1_pru1_pru_r31_5", "gpio2_11"), _PIN(0x0b8, "LCD_DATA6", 74, 7, "lcd_data6", "gpmc_a6", "pr1_edio_data_in6", "eQEP2_index", "pr1_edio_data_out6", "pr1_pru1_pru_r30_6", "pr1_pru1_pru_r31_6", "gpio2_12"), _PIN(0x0bc, "LCD_DATA7", 75, 7, "lcd_data7", "gpmc_a7", "pr1_edio_data_in7", "eQEP2_strobe", "pr1_edio_data_out7", "pr1_pru1_pru_r30_7", "pr1_pru1_pru_r31_7", "gpio2_13"), _PIN(0x0c0, "LCD_DATA8", 76, 7, "lcd_data8", "gpmc_a12", "ehrpwm1_tripzone_input", "mcasp0_aclkx", "uart5_txd", "pr1_mii0_rxd3", "uart2_ctsn", "gpio2_14"), _PIN(0x0c4, "LCD_DATA9", 76, 7, "lcd_data9", "gpmc_a13", "ehrpwm0_synco", "mcasp0_fsx", "uart5_rxd", "pr1_mii0_rxd2", "uart2_rtsn", "gpio2_15"), _PIN(0x0c8, "LCD_DATA10", 77, 7, "lcd_data10", "gpmc_a14", "ehrpwm1A", "mcasp0_axr0", NULL, "pr1_mii0_rxd1", "uart3_ctsn", "gpio2_16"), _PIN(0x0cc, "LCD_DATA11", 78, 7, "lcd_data11", "gpmc_a15", "ehrpwm1B", "mcasp0_ahclkr", "mcasp0_axr2", "pr1_mii0_rxd0", "uart3_rtsn", "gpio2_17"), _PIN(0x0d0, "LCD_DATA12", 8, 7, "lcd_data12", "gpmc_a16", "eQEP1A_in", "mcasp0_aclkr", "mcasp0_axr2", "pr1_mii0_rxlink", "uart4_ctsn", "gpio0_8"), _PIN(0x0d4, "LCD_DATA13", 9, 7, "lcd_data13", "gpmc_a17", "eQEP1B_in", "mcasp0_fsr", "mcasp0_axr3", "pr1_mii0_rxer", "uart4_rtsn", "gpio0_9"), _PIN(0x0d8, "LCD_DATA14", 10, 7, "lcd_data14", "gpmc_a18", "eQEP1_index", "mcasp0_axr1", "uart5_rxd", "pr1_mii_mr0_clk", "uart5_ctsn", "gpio0_10"), _PIN(0x0dc, "LCD_DATA15", 11, 7, "lcd_data15", "gpmc_a19", "eQEP1_strobe", "mcasp0_ahclkx", "mcasp0_axr3", "pr1_mii0_rxdv", "uart5_rtsn", "gpio0_11"), _PIN(0x0e0, "LCD_VSYNC", 86, 7, "lcd_vsync", "gpmc_a8", "gpmc_a1", "pr1_edio_data_in2", "pr1_edio_data_out2", "pr1_pru1_pru_r30_8", "pr1_pru1_pru_r31_8", "gpio2_22"), _PIN(0x0e4, "LCD_HSYNC", 87, 7, "lcd_hsync", "gmpc_a9", "gpmc_a2", "pr1_edio_data_in3", "pr1_edio_data_out3", "pr1_pru1_pru_r30_9", "pr1_pru1_pru_r31_9", "gpio2_23"), _PIN(0x0e8, "LCD_PCLK", 88, 7, "lcd_pclk", "gpmc_a10", "pr1_mii0_crs", "pr1_edio_data_in4", "pr1_edio_data_out4", "pr1_pru1_pru_r30_10", "pr1_pru1_pru_r31_10", "gpio2_24"), _PIN(0x0ec, "LCD_AC_BIAS_EN", 89, 7, "lcd_ac_bias_en", "gpmc_a11", "pr1_mii1_crs", "pr1_edio_data_in5", "pr1_edio_data_out5", "pr1_pru1_pru_r30_11", "pr1_pru1_pru_r31_11", "gpio2_25"), _PIN(0x0f0, "MMC0_DAT3", 90, 7, "mmc0_dat3", "gpmc_a20", "uart4_ctsn", "timer5", "uart1_dcdn", "pr1_pru0_pru_r30_8", "pr1_pru0_pru_r31_8", "gpio2_26"), _PIN(0x0f4, "MMC0_DAT2", 91, 7, "mmc0_dat2", "gpmc_a21", "uart4_rtsn", "timer6", "uart1_dsrn", "pr1_pru0_pru_r30_9", "pr1_pru0_pru_r31_9", "gpio2_27"), _PIN(0x0f8, "MMC0_DAT1", 92, 7, "mmc0_dat1", "gpmc_a22", "uart5_ctsn", "uart3_rxd", "uart1_dtrn", "pr1_pru0_pru_r30_10", "pr1_pru0_pru_r31_10", "gpio2_28"), _PIN(0x0fc, "MMC0_DAT0", 93, 7, "mmc0_dat0", "gpmc_a23", "uart5_rtsn", "uart3_txd", "uart1_rin", "pr1_pru0_pru_r30_11", "pr1_pru0_pru_r31_11", "gpio2_29"), _PIN(0x100, "MMC0_CLK", 94, 7, "mmc0_clk", "gpmc_a24", "uart3_ctsn", "uart2_rxd", "dcan1_tx", "pr1_pru0_pru_r30_12", "pr1_pru0_pru_r31_12", "gpio2_30"), _PIN(0x104, "MMC0_CMD", 95, 7, "mmc0_cmd", "gpmc_a25", "uart3_rtsn", "uart2_txd", "dcan1_rx", "pr1_pru0_pru_r30_13", "pr1_pru0_pru_r31_13", "gpio2_31"), _PIN(0x108, "MII1_COL", 96, 7, "gmii1_col", "rmii2_refclk", "spi1_sclk", "uart5_rxd", "mcasp1_axr2", "mmc2_dat3", "mcasp0_axr2", "gpio3_0"), _PIN(0x10c, "MII1_CRS", 97, 7, "gmii1_crs", "rmii1_crs_dv", "spi1_d0", "I2C1_SDA", "mcasp1_aclkx", "uart5_ctsn", "uart2_rxd", "gpio3_1"), _PIN(0x110, "MII1_RX_ER", 98, 7, "gmii1_rxerr", "rmii1_rxerr", "spi1_d1", "I2C1_SCL", "mcasp1_fsx", "uart5_rtsn", "uart2_txd", "gpio3_2"), _PIN(0x114, "MII1_TX_EN", 99, 7, "gmii1_txen", "rmii1_txen", "rgmii1_tctl", "timer4", "mcasp1_axr0", "eQEP0_index", "mmc2_cmd", "gpio3_3"), _PIN(0x118, "MII1_RX_DV", 100, 7, "gmii1_rxdv", "cd_memory_clk", "rgmii1_rctl", "uart5_txd", "mcasp1_aclkx", "mmc2_dat0", "mcasp0_aclkr", "gpio3_4"), _PIN(0x11c, "MII1_TXD3", 16, 7, "gmii1_txd3", "dcan0_tx", "rgmii1_td3", "uart4_rxd", "mcasp1_fsx", "mmc2_dat1", "mcasp0_fsr", "gpio0_16"), _PIN(0x120, "MII1_TXD2", 17, 7, "gmii1_txd2", "dcan0_rx", "rgmii1_td2", "uart4_txd", "mcasp1_axr0", "mmc2_dat2", "mcasp0_ahclkx", "gpio0_17"), _PIN(0x124, "MII1_TXD1", 21, 7, "gmii1_txd1", "rmii1_txd1", "rgmii1_td1", "mcasp1_fsr", "mcasp1_axr1", "eQEP0A_in", "mmc1_cmd", "gpio0_21"), _PIN(0x128, "MII1_TXD0", 28, 7, "gmii1_txd0", "rmii1_txd0", "rgmii1_td0", "mcasp1_axr2", "mcasp1_aclkr", "eQEP0B_in", "mmc1_clk", "gpio0_28"), _PIN(0x12c, "MII1_TX_CLK", 105, 7, "gmii1_txclk", "uart2_rxd", "rgmii1_tclk", "mmc0_dat7", "mmc1_dat0", "uart1_dcdn", "mcasp0_aclkx", "gpio3_9"), _PIN(0x130, "MII1_RX_CLK", 106, 7, "gmii1_rxclk", "uart2_txd", "rgmii1_rclk", "mmc0_dat6", "mmc1_dat1", "uart1_dsrn", "mcasp0_fsx", "gpio3_10"), _PIN(0x134, "MII1_RXD3", 82, 7, "gmii1_rxd3", "uart3_rxd", "rgmii1_rd3", "mmc0_dat5", "mmc1_dat2", "uart1_dtrn", "mcasp0_axr0", "gpio2_18"), _PIN(0x138, "MII1_RXD2", 83, 7, "gmii1_rxd2", "uart3_txd", "rgmii1_rd2", "mmc0_dat4", "mmc1_dat3", "uart1_rin", "mcasp0_axr1", "gpio2_19"), _PIN(0x13c, "MII1_RXD1", 84, 7, "gmii1_rxd1", "rmii1_rxd1", "rgmii1_rd1", "mcasp1_axr3", "mcasp1_fsr", "eQEP0_strobe", "mmc2_clk", "gpio2_20"), _PIN(0x140, "MII1_RXD0", 85, 7, "gmii1_rxd0", "rmii1_rxd0", "rgmii1_rd0", "mcasp1_ahclkx", "mcasp1_ahclkr", "mcasp1_aclkr", "mcasp0_axr3", "gpio2_21"), _PIN(0x144, "RMII1_REF_CLK", 29, 7, "rmii1_refclk", "xdma_event_intr2", "spi1_cs0", "uart5_txd", "mcasp1_axr3", "mmc0_pow", "mcasp1_ahclkx", "gpio0_29"), _PIN(0x148, "MDIO", 0, 7, "mdio_data", "timer6", "uart5_rxd", "uart3_ctsn", "mmc0_sdcd","mmc1_cmd", "mmc2_cmd","gpio0_0"), _PIN(0x14c, "MDC", 1, 7, "mdio_clk", "timer5", "uart5_txd", "uart3_rtsn", "mmc0_sdwp", "mmc1_clk", "mmc2_clk", "gpio0_1"), _PIN(0x150, "SPI0_SCLK", 2, 7, "spi0_sclk", "uart2_rxd", "I2C2_SDA", "ehrpwm0A", "pr1_uart0_cts_n", "pr1_edio_sof", "EMU2", "gpio0_2"), _PIN(0x154, "SPI0_D0", 3, 7, "spi0_d0", "uart2_txd", "I2C2_SCL", "ehrpwm0B", "pr1_uart0_rts_n", "pr1_edio_latch_in", "EMU3", "gpio0_3"), _PIN(0x158, "SPI0_D1", 4, 7, "spi0_d1", "mmc1_sdwp", "I2C1_SDA", "ehrpwm0_tripzone_input", "pr1_uart0_rxd", "pr1_edio_data_in0", "pr1_edio_data_out0", "gpio0_4"), _PIN(0x15c, "SPI0_CS0", 5, 7, "spi0_cs0", "mmc2_sdwp", "I2C1_SCL", "ehrpwm0_synci", "pr1_uart0_txd", "pr1_edio_data_in1", "pr1_edio_data_out1", "gpio0_5"), _PIN(0x160, "SPI0_CS1", 6, 7, "spi0_cs1", "uart3_rxd", "eCAP1_in_PWM1_out", "mcc0_pow", "xdm_event_intr2", "mmc0_sdcd", "EMU4", "gpio0_6"), _PIN(0x164, "ECAP0_IN_PWM0_OUT",7, 7, "eCAP0_in_PWM0_out", "uart3_txd", "spi1_cs1", "pr1_ecap0_ecap_capin_apwm_o", "spi1_sclk", "mmc0_sdwp", "xdma_event_intr2", "gpio0_7"), _PIN(0x168, "UART0_CTSn", 40, 7, "uart0_ctsn", "uart4_rxd", "dcan1_tx", "I2C1_SDA", "spi1_d0", "timer7", "pr1_edc_sync0_out", "gpio1_8"), _PIN(0x16c, "UART0_RTSn", 41, 7, "uart0_rtsn", "uart4_txd", "dcan1_rx", "I2C1_SCL", "spi1_d1", "spi1_cs0", "pr1_edc_sync1_out", "gpio1_9"), _PIN(0x170, "UART0_rxd", 42, 7, "uart0_rxd", "spi1_cs0", "dcan0_tx", "I2C2_SDA", "eCAP2_in_PWM2_out", "pr1_pru1_pru_r30_14", "pr1_pru1_pru_r31_14", "gpio1_10"), _PIN(0x174, "UART0_txd", 43, 7, "uart0_txd", "spi1_cs1", "dcan0_rx", "I2C2_SCL", "eCAP1_in_PWM1_out", "pr1_pru1_pru_r30_15", "pr1_pru1_pru_r31_15", "gpio1_11"), _PIN(0x178, "UART1_CTSn", 12, 7, "uart1_ctsn", "timer6_mux1", "dcan0_tx", "I2C2_SDA", "spi1_cs0", "pr1_uart0_cts_n", "pr1_edc_latch0_in", "gpio0_12"), _PIN(0x17c, "UART1_RTSn", 13, 7, "uart1_rtsn", "timer5_mux1", "dcan0_rx", "I2C2_SCL", "spi1_cs1", "pr1_uart0_rts_n", "pr1_edc_latch1_in", "gpio0_13"), _PIN(0x180, "UART1_RXD", 14, 7, "uart1_rxd", "mmc1_sdwp", "dcan1_tx", "I2C1_SDA", NULL, "pr1_uart0_rxd", "pr1_pru1_pru_r31_16", "gpio0_14"), _PIN(0x184, "UART1_TXD", 15, 7, "uart1_txd", "mmc2_sdwp", "dcan1_rx", "I2C1_SCL", NULL, "pr1_uart0_txd", "pr1_pru0_pru_r31_16", "gpio0_15"), _PIN(0x188, "I2C0_SDA", 101, 7, "I2C0_SDA", "timer4", "uart2_ctsn", "eCAP2_in_PWM2_out", NULL, NULL, NULL, "gpio3_5"), _PIN(0x18c, "I2C0_SCL", 102, 7, "I2C0_SCL", "timer7", "uart2_rtsn", "eCAP1_in_PWM1_out", NULL, NULL, NULL, "gpio3_6"), _PIN(0x190, "MCASP0_ACLKX", 110, 7, "mcasp0_aclkx", "ehrpwm0A", NULL, "spi1_sclk", "mmc0_sdcd", "pr1_pru0_pru_r30_0", "pr1_pru0_pru_r31_0", "gpio3_14"), _PIN(0x194, "MCASP0_FSX", 111, 7, "mcasp0_fsx", "ehrpwm0B", NULL, "spi1_d0", "mmc1_sdcd", "pr1_pru0_pru_r30_1", "pr1_pru0_pru_r31_1", "gpio3_15"), _PIN(0x198, "MCASP0_AXR0", 112, 7, "mcasp0_axr0", "ehrpwm0_tripzone_input", NULL, "spi1_d1", "mmc2_sdcd", "pr1_pru0_pru_r30_2", "pr1_pru0_pru_r31_2", "gpio3_16"), _PIN(0x19c, "MCASP0_AHCLKR", 113, 7, "mcasp0_ahclkr", "ehrpwm0_synci", "mcasp0_axr2", "spi1_cs0", "eCAP2_in_PWM2_out", "pr1_pru0_pru_r30_3", "pr1_pru0_pru_r31_3", "gpio3_17"), _PIN(0x1a0, "MCASP0_ACLKR", 114, 7, "mcasp0_aclkr", "eQEP0A_in", "mcasp0_axr2", "mcasp1_aclkx", "mmc0_sdwp", "pr1_pru0_pru_r30_4", "pr1_pru0_pru_r31_4", "gpio3_18"), _PIN(0x1a4, "MCASP0_FSR", 115, 7, "mcasp0_fsr", "eQEP0B_in", "mcasp0_axr3", "mcasp1_fsx", "EMU2", "pr1_pru0_pru_r30_5", "pr1_pru0_pru_r31_5", "gpio3_19"), _PIN(0x1a8, "MCASP0_AXR1", 116, 7, "mcasp0_axr1", "eQEP0_index", NULL, "mcasp1_axr0", "EMU3", "pr1_pru0_pru_r30_6", "pr1_pru0_pru_r31_6", "gpio3_20"), _PIN(0x1ac, "MCASP0_AHCLKX", 117, 7, "mcasp0_ahclkx", "eQEP0_strobe", "mcasp0_axr3", "mcasp1_axr1", "EMU4", "pr1_pru0_pru_r30_7", "pr1_pru0_pru_r31_7", "gpio3_21"), _PIN(0x1b0, "XDMA_EVENT_INTR0", 19, 7, "xdma_event_intr0", NULL, "timer4", "clkout1", "spi1_cs1", "pr1_pru1_pru_r31_16", "EMU2", "gpio0_19"), _PIN(0x1b4, "XDMA_EVENT_INTR1", 20, 7, "xdma_event_intr1", NULL, "tclkin", "clkout2", "timer7", "pr1_pru0_pru_r31_16", "EMU3", "gpio0_20"), #if 0 _PIN(0x1b8, "nresetin_out", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1bc, "porz", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1c0, "nnmi", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1c4, "osc0_in", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1c8, "osc0_out", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1cc, "osc0_vss", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1d0, "tms", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1d4, "tdi", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1d8, "tdo", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1dc, "tck", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1e0, "ntrst", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), #endif _PIN(0x1e4, "EMU0", 103, 7, "EMU0", NULL, NULL, NULL, NULL, NULL, NULL, "gpio3_7"), _PIN(0x1e8, "EMU1", 104, 0, "EMU1", NULL, NULL, NULL, NULL, NULL, NULL, "gpio3_8"), #if 0 _PIN(0x1ec, "osc1_in", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1f0, "osc1_out", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1f4, "osc1_vss", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1f8, "rtc_porz", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x1fc, "pmic_power_en", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x200, "ext_wakeup", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x204, "enz_kaldo_1p8v", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), #endif _PIN(0x208, "USB0_DM", 0, 0, "USB0_DM", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x20c, "USB0_DP", 0, 0, "USB0_DP", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x210, "USB0_CE", 0, 0, "USB0_CE", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x214, "USB0_ID", 0, 0, "USB0_ID", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x218, "USB0_VBUS", 0, 0, "USB0_VBUS", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x21c, "USB0_DRVVBUS", 18, 7, "USB0_DRVVBUS", NULL, NULL, NULL, NULL, NULL, NULL, "gpio0_18"), _PIN(0x220, "USB1_DM", 0, 0, "USB1_DM", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x224, "USB1_DP", 0, 0, "USB1_DP", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x228, "USB1_CE", 0, 0, "USB1_CE", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x22c, "USB1_ID", 0, 0, "USB1_ID", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x230, "USB1_VBUS", 0, 0, "USB1_VBUS", NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x234, "USB1_DRVVBUS", 109, 7, "USB1_DRVVBUS", NULL, NULL, NULL, NULL, NULL, NULL, "gpio3_13"), #if 0 _PIN(0x238, "ddr_resetn", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x23c, "ddr_csn0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x240, "ddr_cke", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x244, "ddr_ck", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x248, "ddr_nck", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x24c, "ddr_casn", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x250, "ddr_rasn", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x254, "ddr_wen", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x258, "ddr_ba0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x25c, "ddr_ba1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x260, "ddr_ba2", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x264, "ddr_a0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x268, "ddr_a1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x26c, "ddr_a2", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x270, "ddr_a3", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x274, "ddr_a4", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x278, "ddr_a5", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x27c, "ddr_a6", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x280, "ddr_a7", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x284, "ddr_a8", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x288, "ddr_a9", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x28c, "ddr_a10", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x290, "ddr_a11", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x294, "ddr_a12", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x298, "ddr_a13", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x29c, "ddr_a14", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2a0, "ddr_a15", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2a4, "ddr_odt", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2a8, "ddr_d0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2ac, "ddr_d1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2b0, "ddr_d2", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2b4, "ddr_d3", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2b8, "ddr_d4", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2bc, "ddr_d5", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2c0, "ddr_d6", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2c4, "ddr_d7", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2c8, "ddr_d8", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2cc, "ddr_d9", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2d0, "ddr_d10", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2d4, "ddr_d11", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2d8, "ddr_d12", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2dc, "ddr_d13", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2e0, "ddr_d14", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2e4, "ddr_d15", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2e8, "ddr_dqm0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2ec, "ddr_dqm1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2f0, "ddr_dqs0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2f4, "ddr_dqsn0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2f8, "ddr_dqs1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x2fc, "ddr_dqsn1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x300, "ddr_vref", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x304, "ddr_vtp", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x308, "ddr_strben0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x30c, "ddr_strben1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x32c, "ain0", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x328, "ain1", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x324, "ain2", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x320, "ain3", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x31c, "ain4", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x318, "ain5", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x314, "ain6", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x310, "ain7", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x330, "vrefp", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x334, "vrefn", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x338, "avdd", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x33c, "avss", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x340, "iforce", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x344, "vsense", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), _PIN(0x348, "testout", 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL), #endif { .ballname = NULL }, }; const struct ti_pinmux_device ti_pinmux_dev = { .padconf_muxmode_mask = 0x7, .padconf_sate_mask = 0x78, .padstate = ti_padstate_devmap, .padconf = ti_padconf_devmap, }; Index: head/sys/arm/ti/omap4/omap4_prcm_clks.c =================================================================== --- head/sys/arm/ti/omap4/omap4_prcm_clks.c (revision 306901) +++ head/sys/arm/ti/omap4/omap4_prcm_clks.c (revision 306902) @@ -1,1496 +1,1494 @@ /*- * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include /* * This file defines the clock configuration for the OMAP4xxx series of * devices. * * How This is Suppose to Work * =========================== * - There is a top level omap_prcm module that defines all OMAP SoC drivers * should use to enable/disable the system clocks regardless of the version * of OMAP device they are running on. This top level PRCM module is just * a thin shim to chip specific functions that perform the donkey work of * configuring the clock - this file is the 'donkey' for OMAP44xx devices. * * - The key bit in this file is the omap_clk_devmap array, it's * used by the omap_prcm driver to determine what clocks are valid and which * functions to call to manipulate them. * * - In essence you just need to define some callbacks for each of the * clocks and then you're done. * * - The other thing that is worth noting is that when the omap_prcm device * is registered you typically pass in some memory ranges which are the * SYS_MEMORY resources. These resources are in turn allocated using * bus_allocate_resources(...) and the resource handles are passed to all * individual clock callback handlers. * * * * OMAP4 devices are different from the previous OMAP3 devices in that there * is no longer a separate functional and interface clock for each module, * instead there is typically an interface clock that spans many modules. */ #define FREQ_96MHZ 96000000 #define FREQ_64MHZ 64000000 #define FREQ_48MHZ 48000000 #define FREQ_32KHZ 32000 #define PRM_INSTANCE 1 #define CM1_INSTANCE 2 #define CM2_INSTANCE 3 /** * Address offsets from the PRM memory region to the top level clock control * registers. */ #define CKGEN_PRM_OFFSET 0x00000100UL #define MPU_PRM_OFFSET 0x00000300UL #define DSP_PRM_OFFSET 0x00000400UL #define ABE_PRM_OFFSET 0x00000500UL #define ALWAYS_ON_PRM_OFFSET 0x00000600UL #define CORE_PRM_OFFSET 0x00000700UL #define IVAHD_PRM_OFFSET 0x00000F00UL #define CAM_PRM_OFFSET 0x00001000UL #define DSS_PRM_OFFSET 0x00001100UL #define SGX_PRM_OFFSET 0x00001200UL #define L3INIT_PRM_OFFSET 0x00001300UL #define L4PER_PRM_OFFSET 0x00001400UL #define WKUP_PRM_OFFSET 0x00001700UL #define WKUP_CM_OFFSET 0x00001800UL #define EMU_PRM_OFFSET 0x00001900UL #define EMU_CM_OFFSET 0x00001A00UL #define DEVICE_PRM_OFFSET 0x00001B00UL #define INSTR_PRM_OFFSET 0x00001F00UL #define CM_ABE_DSS_SYS_CLKSEL_OFFSET (CKGEN_PRM_OFFSET + 0x0000UL) #define CM_L4_WKUP_CLKSELL_OFFSET (CKGEN_PRM_OFFSET + 0x0008UL) #define CM_ABE_PLL_REF_CLKSEL_OFFSET (CKGEN_PRM_OFFSET + 0x000CUL) #define CM_SYS_CLKSEL_OFFSET (CKGEN_PRM_OFFSET + 0x0010UL) /** * Address offsets from the CM1 memory region to the top level clock control * registers. */ #define CKGEN_CM1_OFFSET 0x00000100UL #define MPU_CM1_OFFSET 0x00000300UL #define DSP_CM1_OFFSET 0x00000400UL #define ABE_CM1_OFFSET 0x00000500UL #define RESTORE_CM1_OFFSET 0x00000E00UL #define INSTR_CM1_OFFSET 0x00000F00UL #define CM_CLKSEL_DPLL_MPU (CKGEN_CM1_OFFSET + 0x006CUL) /** * Address offsets from the CM2 memory region to the top level clock control * registers. */ #define INTRCONN_SOCKET_CM2_OFFSET 0x00000000UL #define CKGEN_CM2_OFFSET 0x00000100UL #define ALWAYS_ON_CM2_OFFSET 0x00000600UL #define CORE_CM2_OFFSET 0x00000700UL #define IVAHD_CM2_OFFSET 0x00000F00UL #define CAM_CM2_OFFSET 0x00001000UL #define DSS_CM2_OFFSET 0x00001100UL #define SGX_CM2_OFFSET 0x00001200UL #define L3INIT_CM2_OFFSET 0x00001300UL #define L4PER_CM2_OFFSET 0x00001400UL #define RESTORE_CM2_OFFSET 0x00001E00UL #define INSTR_CM2_OFFSET 0x00001F00UL #define CLKCTRL_MODULEMODE_MASK 0x00000003UL #define CLKCTRL_MODULEMODE_DISABLE 0x00000000UL #define CLKCTRL_MODULEMODE_AUTO 0x00000001UL #define CLKCTRL_MODULEMODE_ENABLE 0x00000001UL #define CLKCTRL_IDLEST_MASK 0x00030000UL #define CLKCTRL_IDLEST_ENABLED 0x00000000UL #define CLKCTRL_IDLEST_WAKING 0x00010000UL #define CLKCTRL_IDLEST_IDLE 0x00020000UL #define CLKCTRL_IDLEST_DISABLED 0x00030000UL static struct ofw_compat_data compat_data[] = { {"ti,omap4-cm1", (uintptr_t)CM1_INSTANCE}, {"ti,omap4-cm2", (uintptr_t)CM2_INSTANCE}, {"ti,omap4-prm", (uintptr_t)PRM_INSTANCE}, {NULL, (uintptr_t)0}, }; struct omap4_prcm_softc { struct resource *sc_res; int sc_rid; int sc_instance; }; static int omap4_clk_generic_activate(struct ti_clock_dev *clkdev); static int omap4_clk_generic_deactivate(struct ti_clock_dev *clkdev); static int omap4_clk_generic_accessible(struct ti_clock_dev *clkdev); static int omap4_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int omap4_clk_generic_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int omap4_clk_gptimer_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int omap4_clk_gptimer_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int omap4_clk_hsmmc_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int omap4_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int omap4_clk_hsusbhost_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int omap4_clk_hsusbhost_activate(struct ti_clock_dev *clkdev); static int omap4_clk_hsusbhost_deactivate(struct ti_clock_dev *clkdev); static int omap4_clk_hsusbhost_accessible(struct ti_clock_dev *clkdev); static int omap4_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int omap4_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); /** * omap_clk_devmap - Array of clock devices available on OMAP4xxx devices * * This map only defines which clocks are valid and the callback functions * for clock activate, deactivate, etc. It is used by the top level omap_prcm * driver. * * The actual details of the clocks (config registers, bit fields, sources, * etc) are in the private g_omap3_clk_details array below. * */ #define OMAP4_GENERIC_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = omap4_clk_generic_activate, \ .clk_deactivate = omap4_clk_generic_deactivate, \ .clk_set_source = omap4_clk_generic_set_source, \ .clk_accessible = omap4_clk_generic_accessible, \ .clk_get_source_freq = omap4_clk_generic_get_source_freq, \ .clk_set_source_freq = NULL \ } #define OMAP4_GPTIMER_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = omap4_clk_generic_activate, \ .clk_deactivate = omap4_clk_generic_deactivate, \ .clk_set_source = omap4_clk_gptimer_set_source, \ .clk_accessible = omap4_clk_generic_accessible, \ .clk_get_source_freq = omap4_clk_gptimer_get_source_freq, \ .clk_set_source_freq = NULL \ } #define OMAP4_HSMMC_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = omap4_clk_generic_activate, \ .clk_deactivate = omap4_clk_generic_deactivate, \ .clk_set_source = omap4_clk_hsmmc_set_source, \ .clk_accessible = omap4_clk_generic_accessible, \ .clk_get_source_freq = omap4_clk_hsmmc_get_source_freq, \ .clk_set_source_freq = NULL \ } #define OMAP4_HSUSBHOST_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = omap4_clk_hsusbhost_activate, \ .clk_deactivate = omap4_clk_hsusbhost_deactivate, \ .clk_set_source = omap4_clk_hsusbhost_set_source, \ .clk_accessible = omap4_clk_hsusbhost_accessible, \ .clk_get_source_freq = NULL, \ .clk_set_source_freq = NULL \ } struct ti_clock_dev ti_omap4_clk_devmap[] = { /* System clocks */ { .id = SYS_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = omap4_clk_get_sysclk_freq, .clk_set_source_freq = NULL, }, /* MPU (ARM) core clocks */ { .id = MPU_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = omap4_clk_get_arm_fclk_freq, .clk_set_source_freq = NULL, }, /* UART device clocks */ OMAP4_GENERIC_CLOCK_DEV(UART1_CLK), OMAP4_GENERIC_CLOCK_DEV(UART2_CLK), OMAP4_GENERIC_CLOCK_DEV(UART3_CLK), OMAP4_GENERIC_CLOCK_DEV(UART4_CLK), /* Timer device source clocks */ OMAP4_GPTIMER_CLOCK_DEV(TIMER1_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER2_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER3_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER4_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER5_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER6_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER7_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER8_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER9_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER10_CLK), OMAP4_GPTIMER_CLOCK_DEV(TIMER11_CLK), /* MMC device clocks (MMC1 and MMC2 can have different input clocks) */ OMAP4_HSMMC_CLOCK_DEV(MMC1_CLK), OMAP4_HSMMC_CLOCK_DEV(MMC2_CLK), OMAP4_GENERIC_CLOCK_DEV(MMC3_CLK), OMAP4_GENERIC_CLOCK_DEV(MMC4_CLK), OMAP4_GENERIC_CLOCK_DEV(MMC5_CLK), /* USB HS (high speed TLL, EHCI and OHCI) */ OMAP4_HSUSBHOST_CLOCK_DEV(USBTLL_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBHSHOST_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBFSHOST_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP1_PHY_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP2_PHY_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP1_UTMI_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP2_UTMI_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP1_HSIC_CLK), OMAP4_HSUSBHOST_CLOCK_DEV(USBP2_HSIC_CLK), /* GPIO */ OMAP4_GENERIC_CLOCK_DEV(GPIO1_CLK), OMAP4_GENERIC_CLOCK_DEV(GPIO2_CLK), OMAP4_GENERIC_CLOCK_DEV(GPIO3_CLK), OMAP4_GENERIC_CLOCK_DEV(GPIO4_CLK), OMAP4_GENERIC_CLOCK_DEV(GPIO5_CLK), OMAP4_GENERIC_CLOCK_DEV(GPIO6_CLK), /* sDMA */ OMAP4_GENERIC_CLOCK_DEV(SDMA_CLK), /* I2C */ OMAP4_GENERIC_CLOCK_DEV(I2C1_CLK), OMAP4_GENERIC_CLOCK_DEV(I2C2_CLK), OMAP4_GENERIC_CLOCK_DEV(I2C3_CLK), OMAP4_GENERIC_CLOCK_DEV(I2C4_CLK), { INVALID_CLK_IDENT, NULL, NULL, NULL, NULL } }; /** * omap4_clk_details - Stores details for all the different clocks supported * * Whenever an operation on a clock is being performed (activated, deactivated, * etc) this array is looked up to find the correct register and bit(s) we * should be modifying. * */ struct omap4_clk_details { clk_ident_t id; uint32_t instance; uint32_t clksel_reg; int32_t src_freq; uint32_t enable_mode; }; #define OMAP4_GENERIC_CLOCK_DETAILS(i, f, di, r, e) \ { .id = (i), \ .instance = (di), \ .clksel_reg = (r), \ .src_freq = (f), \ .enable_mode = (e), \ } static struct omap4_clk_details g_omap4_clk_details[] = { /* UART */ OMAP4_GENERIC_CLOCK_DETAILS(UART1_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0140), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(UART2_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0148), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(UART3_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0150), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(UART4_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0158), CLKCTRL_MODULEMODE_ENABLE), /* General purpose timers */ OMAP4_GENERIC_CLOCK_DETAILS(TIMER1_CLK, -1, PRM_INSTANCE, (WKUP_CM_OFFSET + 0x040), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER2_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x038), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER3_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x040), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER4_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x048), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER5_CLK, -1, CM1_INSTANCE, (ABE_CM1_OFFSET + 0x068), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER6_CLK, -1, CM1_INSTANCE, (ABE_CM1_OFFSET + 0x070), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER7_CLK, -1, CM1_INSTANCE, (ABE_CM1_OFFSET + 0x078), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER8_CLK, -1, CM1_INSTANCE, (ABE_CM1_OFFSET + 0x080), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER9_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x050), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER10_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x028), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(TIMER11_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x030), CLKCTRL_MODULEMODE_ENABLE), /* HSMMC (MMC1 and MMC2 can have different input clocks) */ OMAP4_GENERIC_CLOCK_DETAILS(MMC1_CLK, -1, CM2_INSTANCE, (L3INIT_CM2_OFFSET + 0x028), /*CLKCTRL_MODULEMODE_ENABLE*/2), OMAP4_GENERIC_CLOCK_DETAILS(MMC2_CLK, -1, CM2_INSTANCE, (L3INIT_CM2_OFFSET + 0x030), /*CLKCTRL_MODULEMODE_ENABLE*/2), OMAP4_GENERIC_CLOCK_DETAILS(MMC3_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x120), /*CLKCTRL_MODULEMODE_ENABLE*/2), OMAP4_GENERIC_CLOCK_DETAILS(MMC4_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x128), /*CLKCTRL_MODULEMODE_ENABLE*/2), OMAP4_GENERIC_CLOCK_DETAILS(MMC5_CLK, FREQ_48MHZ, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x160), /*CLKCTRL_MODULEMODE_ENABLE*/1), /* GPIO modules */ OMAP4_GENERIC_CLOCK_DETAILS(GPIO1_CLK, -1, PRM_INSTANCE, (WKUP_CM_OFFSET + 0x038), CLKCTRL_MODULEMODE_AUTO), OMAP4_GENERIC_CLOCK_DETAILS(GPIO2_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x060), CLKCTRL_MODULEMODE_AUTO), OMAP4_GENERIC_CLOCK_DETAILS(GPIO3_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x068), CLKCTRL_MODULEMODE_AUTO), OMAP4_GENERIC_CLOCK_DETAILS(GPIO4_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x070), CLKCTRL_MODULEMODE_AUTO), OMAP4_GENERIC_CLOCK_DETAILS(GPIO5_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x078), CLKCTRL_MODULEMODE_AUTO), OMAP4_GENERIC_CLOCK_DETAILS(GPIO6_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x080), CLKCTRL_MODULEMODE_AUTO), /* sDMA block */ OMAP4_GENERIC_CLOCK_DETAILS(SDMA_CLK, -1, CM2_INSTANCE, (CORE_CM2_OFFSET + 0x300), CLKCTRL_MODULEMODE_AUTO), /* I2C modules */ OMAP4_GENERIC_CLOCK_DETAILS(I2C1_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0A0), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(I2C2_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0A8), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(I2C3_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0B0), CLKCTRL_MODULEMODE_ENABLE), OMAP4_GENERIC_CLOCK_DETAILS(I2C4_CLK, -1, CM2_INSTANCE, (L4PER_CM2_OFFSET + 0x0B8), CLKCTRL_MODULEMODE_ENABLE), { INVALID_CLK_IDENT, 0, 0, 0, 0 }, }; /** * MAX_MODULE_ENABLE_WAIT - the number of loops to wait for the module to come * alive. * */ #define MAX_MODULE_ENABLE_WAIT 100 /** * ARRAY_SIZE - Macro to return the number of elements in a static const array. * */ #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) /** * omap4_clk_details - writes a 32-bit value to one of the timer registers * @timer: Timer device context * @off: The offset of a register from the timer register address range * @val: The value to write into the register * * * RETURNS: * nothing */ static struct omap4_clk_details* omap4_clk_details(clk_ident_t id) { struct omap4_clk_details *walker; for (walker = g_omap4_clk_details; walker->id != INVALID_CLK_IDENT; walker++) { if (id == walker->id) return (walker); } return NULL; } static struct omap4_prcm_softc * omap4_prcm_get_instance_softc(int module_instance) { int i, maxunit; devclass_t prcm_devclass; device_t dev; struct omap4_prcm_softc *sc; prcm_devclass = devclass_find("omap4_prcm"); maxunit = devclass_get_maxunit(prcm_devclass); for (i = 0; i < maxunit; i++) { dev = devclass_get_device(prcm_devclass, i); sc = device_get_softc(dev); if (sc->sc_instance == module_instance) return (sc); } return (NULL); } /** * omap4_clk_generic_activate - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a positive error code on failure. */ static int omap4_clk_generic_activate(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; unsigned int i; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); /* All the 'generic' clocks have a CLKCTRL register which is more or less * generic - the have at least two fielda called MODULEMODE and IDLEST. */ clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= clk_details->enable_mode; bus_write_4(clk_mem_res, clk_details->clksel_reg, clksel); /* Now poll on the IDLEST register to tell us if the module has come up. * TODO: We need to take into account the parent clocks. */ /* Try MAX_MODULE_ENABLE_WAIT number of times to check if enabled */ for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) { clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); if ((clksel & CLKCTRL_IDLEST_MASK) == CLKCTRL_IDLEST_ENABLED) break; DELAY(10); } /* Check the enabled state */ if ((clksel & CLKCTRL_IDLEST_MASK) != CLKCTRL_IDLEST_ENABLED) { printf("Error: failed to enable module with clock %d\n", clkdev->id); printf("Error: 0x%08x => 0x%08x\n", clk_details->clksel_reg, clksel); return (ETIMEDOUT); } return (0); } /** * omap4_clk_generic_deactivate - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a positive error code on failure. */ static int omap4_clk_generic_deactivate(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); /* All the 'generic' clocks have a CLKCTRL register which is more or less * generic - the have at least two fielda called MODULEMODE and IDLEST. */ clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= CLKCTRL_MODULEMODE_DISABLE; bus_write_4(clk_mem_res, clk_details->clksel_reg, clksel); return (0); } /** * omap4_clk_generic_set_source - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a positive error code on failure. */ static int omap4_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { return (0); } /** * omap4_clk_generic_accessible - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_generic_accessible(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); /* Check the enabled state */ if ((clksel & CLKCTRL_IDLEST_MASK) != CLKCTRL_IDLEST_ENABLED) return (0); return (1); } /** * omap4_clk_generic_get_source_freq - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_generic_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq ) { struct omap4_clk_details* clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* Simply return the stored frequency */ if (freq) *freq = (unsigned int)clk_details->src_freq; return (0); } /** * omap4_clk_gptimer_set_source - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_gptimer_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); /* TODO: Implement */ return (0); } /** * omap4_clk_gptimer_get_source_freq - checks if a module is accessible * @module: identifier for the module to check, see omap3_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1 * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_gptimer_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq ) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; unsigned int src_freq; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); /* Need to read the CLKSEL field to determine the clock source */ clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); if (clksel & (0x1UL << 24)) src_freq = FREQ_32KHZ; else omap4_clk_get_sysclk_freq(NULL, &src_freq); /* Return the frequency */ if (freq) *freq = src_freq; return (0); } /** * omap4_clk_hsmmc_set_source - sets the source clock (freq) * @clkdev: pointer to the clockdev structure (id field will contain clock id) * * The MMC 1 and 2 clocks can be source from either a 64MHz or 96MHz clock. * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_hsmmc_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); /* For MMC modules 3, 4 & 5 you can't change the freq, it's always 48MHz */ if ((clkdev->id == MMC3_CLK) || (clkdev->id == MMC4_CLK) || (clkdev->id == MMC5_CLK)) { if (clksrc != F48MHZ_CLK) return (EINVAL); return 0; } clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); /* Bit 24 is set if 96MHz clock or cleared for 64MHz clock */ if (clksrc == F64MHZ_CLK) clksel &= ~(0x1UL << 24); else if (clksrc == F96MHZ_CLK) clksel |= (0x1UL << 24); else return (EINVAL); bus_write_4(clk_mem_res, clk_details->clksel_reg, clksel); return (0); } /** * omap4_clk_hsmmc_get_source_freq - checks if a module is accessible * @clkdev: pointer to the clockdev structure (id field will contain clock id) * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a negative error code on failure. */ static int omap4_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq ) { struct omap4_prcm_softc *sc; struct omap4_clk_details* clk_details; struct resource* clk_mem_res; uint32_t clksel; unsigned int src_freq; clk_details = omap4_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); sc = omap4_prcm_get_instance_softc(clk_details->instance); if (sc == NULL) return ENXIO; clk_mem_res = sc->sc_res; if (clk_mem_res == NULL) return (EINVAL); switch (clkdev->id) { case MMC1_CLK: case MMC2_CLK: /* Need to read the CLKSEL field to determine the clock source */ clksel = bus_read_4(clk_mem_res, clk_details->clksel_reg); if (clksel & (0x1UL << 24)) src_freq = FREQ_96MHZ; else src_freq = FREQ_64MHZ; break; case MMC3_CLK: case MMC4_CLK: case MMC5_CLK: src_freq = FREQ_48MHZ; break; default: return (EINVAL); } /* Return the frequency */ if (freq) *freq = src_freq; return (0); } /** * omap4_clk_get_sysclk_freq - gets the sysclk frequency * @sc: pointer to the clk module/device context * * Read the clocking information from the power-control/boot-strap registers, * and stored in two global variables. * * RETURNS: * nothing, values are saved in global variables */ static int omap4_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t clksel; uint32_t sysclk; struct omap4_prcm_softc *sc; sc = omap4_prcm_get_instance_softc(PRM_INSTANCE); if (sc == NULL) return ENXIO; /* Read the input clock freq from the configuration register (CM_SYS_CLKSEL) */ clksel = bus_read_4(sc->sc_res, CM_SYS_CLKSEL_OFFSET); switch (clksel & 0x7) { case 0x1: /* 12Mhz */ sysclk = 12000000; break; case 0x3: /* 16.8Mhz */ sysclk = 16800000; break; case 0x4: /* 19.2Mhz */ sysclk = 19200000; break; case 0x5: /* 26Mhz */ sysclk = 26000000; break; case 0x7: /* 38.4Mhz */ sysclk = 38400000; break; default: panic("%s: Invalid clock freq", __func__); } /* Return the value */ if (freq) *freq = sysclk; return (0); } /** * omap4_clk_get_arm_fclk_freq - gets the MPU clock frequency * @clkdev: ignored * @freq: pointer which upon return will contain the freq in hz * @mem_res: array of allocated memory resources * * Reads the frequency setting information registers and returns the value * in the freq variable. * * RETURNS: * returns 0 on success, a positive error code on failure. */ static int omap4_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t clksel; uint32_t pll_mult, pll_div; uint32_t mpuclk, sysclk; struct omap4_prcm_softc *sc; sc = omap4_prcm_get_instance_softc(CM1_INSTANCE); if (sc == NULL) return ENXIO; /* Read the clksel register which contains the DPLL multiple and divide * values. These are applied to the sysclk. */ clksel = bus_read_4(sc->sc_res, CM_CLKSEL_DPLL_MPU); pll_mult = ((clksel >> 8) & 0x7ff); pll_div = (clksel & 0x7f) + 1; /* Get the system clock freq */ omap4_clk_get_sysclk_freq(NULL, &sysclk); /* Calculate the MPU freq */ mpuclk = ((uint64_t)sysclk * pll_mult) / pll_div; /* Return the value */ if (freq) *freq = mpuclk; return (0); } /** * omap4_clk_hsusbhost_activate - activates the USB clocks for the given module * @clkdev: pointer to the clock device structure. * @mem_res: array of memory resources allocated by the top level PRCM driver. * * The USB clocking setup seems to be a bit more tricky than the other modules, * to start with the clocking diagram for the HS host module shows 13 different * clocks. So to try and make it easier to follow the clocking activation * and deactivation is handled in it's own set of callbacks. * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a positive error code on failure. */ struct dpll_param { unsigned int m; unsigned int n; unsigned int m2; unsigned int m3; unsigned int m4; unsigned int m5; unsigned int m6; unsigned int m7; }; /* USB parameters */ struct dpll_param usb_dpll_param[7] = { /* 12M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 13M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 16.8M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 19.2M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 26M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 27M values */ {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* 38.4M values */ #ifdef CONFIG_OMAP4_SDC {0x32, 0x1, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0}, #else {0x32, 0x1, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0}, #endif }; static int omap4_clk_hsusbhost_activate(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct resource* clk_mem_res; uint32_t clksel_reg_off; uint32_t clksel; unsigned int i; sc = omap4_prcm_get_instance_softc(CM2_INSTANCE); if (sc == NULL) return ENXIO; switch (clkdev->id) { case USBTLL_CLK: /* For the USBTLL module we need to enable the following clocks: * - INIT_L4_ICLK (will be enabled by bootloader) * - TLL_CH0_FCLK * - TLL_CH1_FCLK */ /* We need the CM_L3INIT_HSUSBTLL_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x68; /* Enable the module and also enable the optional func clocks for * channels 0 & 1 (is this needed ?) */ clksel = bus_read_4(clk_mem_res, clksel_reg_off); clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= CLKCTRL_MODULEMODE_ENABLE; clksel |= (0x1 << 8); /* USB-HOST optional clock: USB_CH0_CLK */ clksel |= (0x1 << 9); /* USB-HOST optional clock: USB_CH1_CLK */ break; case USBHSHOST_CLK: case USBP1_PHY_CLK: case USBP2_PHY_CLK: case USBP1_UTMI_CLK: case USBP2_UTMI_CLK: case USBP1_HSIC_CLK: case USBP2_HSIC_CLK: /* For the USB HS HOST module we need to enable the following clocks: * - INIT_L4_ICLK (will be enabled by bootloader) * - INIT_L3_ICLK (will be enabled by bootloader) * - INIT_48MC_FCLK * - UTMI_ROOT_GFCLK (UTMI only, create a new clock for that ?) * - UTMI_P1_FCLK (UTMI only, create a new clock for that ?) * - UTMI_P2_FCLK (UTMI only, create a new clock for that ?) * - HSIC_P1_60 (HSIC only, create a new clock for that ?) * - HSIC_P1_480 (HSIC only, create a new clock for that ?) * - HSIC_P2_60 (HSIC only, create a new clock for that ?) * - HSIC_P2_480 (HSIC only, create a new clock for that ?) */ /* We need the CM_L3INIT_HSUSBHOST_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x58; clksel = bus_read_4(clk_mem_res, clksel_reg_off); /* Enable the module and also enable the optional func clocks */ if (clkdev->id == USBHSHOST_CLK) { clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= /*CLKCTRL_MODULEMODE_ENABLE*/2; clksel |= (0x1 << 15); /* USB-HOST clock control: FUNC48MCLK */ } else if (clkdev->id == USBP1_UTMI_CLK) clksel |= (0x1 << 8); /* UTMI_P1_CLK */ else if (clkdev->id == USBP2_UTMI_CLK) clksel |= (0x1 << 9); /* UTMI_P2_CLK */ else if (clkdev->id == USBP1_HSIC_CLK) clksel |= (0x5 << 11); /* HSIC60M_P1_CLK + HSIC480M_P1_CLK */ else if (clkdev->id == USBP2_HSIC_CLK) clksel |= (0x5 << 12); /* HSIC60M_P2_CLK + HSIC480M_P2_CLK */ break; default: return (EINVAL); } bus_write_4(clk_mem_res, clksel_reg_off, clksel); /* Try MAX_MODULE_ENABLE_WAIT number of times to check if enabled */ for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) { clksel = bus_read_4(clk_mem_res, clksel_reg_off); if ((clksel & CLKCTRL_IDLEST_MASK) == CLKCTRL_IDLEST_ENABLED) break; } /* Check the enabled state */ if ((clksel & CLKCTRL_IDLEST_MASK) != CLKCTRL_IDLEST_ENABLED) { printf("Error: HERE failed to enable module with clock %d\n", clkdev->id); printf("Error: 0x%08x => 0x%08x\n", clksel_reg_off, clksel); return (ETIMEDOUT); } return (0); } /** * omap4_clk_generic_deactivate - checks if a module is accessible * @clkdev: pointer to the clock device structure. * @mem_res: array of memory resources allocated by the top level PRCM driver. * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 on success or a positive error code on failure. */ static int omap4_clk_hsusbhost_deactivate(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct resource* clk_mem_res; uint32_t clksel_reg_off; uint32_t clksel; sc = omap4_prcm_get_instance_softc(CM2_INSTANCE); if (sc == NULL) return ENXIO; switch (clkdev->id) { case USBTLL_CLK: /* We need the CM_L3INIT_HSUSBTLL_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x68; clksel = bus_read_4(clk_mem_res, clksel_reg_off); clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= CLKCTRL_MODULEMODE_DISABLE; break; case USBHSHOST_CLK: case USBP1_PHY_CLK: case USBP2_PHY_CLK: case USBP1_UTMI_CLK: case USBP2_UTMI_CLK: case USBP1_HSIC_CLK: case USBP2_HSIC_CLK: /* For the USB HS HOST module we need to enable the following clocks: * - INIT_L4_ICLK (will be enabled by bootloader) * - INIT_L3_ICLK (will be enabled by bootloader) * - INIT_48MC_FCLK * - UTMI_ROOT_GFCLK (UTMI only, create a new clock for that ?) * - UTMI_P1_FCLK (UTMI only, create a new clock for that ?) * - UTMI_P2_FCLK (UTMI only, create a new clock for that ?) * - HSIC_P1_60 (HSIC only, create a new clock for that ?) * - HSIC_P1_480 (HSIC only, create a new clock for that ?) * - HSIC_P2_60 (HSIC only, create a new clock for that ?) * - HSIC_P2_480 (HSIC only, create a new clock for that ?) */ /* We need the CM_L3INIT_HSUSBHOST_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x58; clksel = bus_read_4(clk_mem_res, clksel_reg_off); /* Enable the module and also enable the optional func clocks */ if (clkdev->id == USBHSHOST_CLK) { clksel &= ~CLKCTRL_MODULEMODE_MASK; clksel |= CLKCTRL_MODULEMODE_DISABLE; clksel &= ~(0x1 << 15); /* USB-HOST clock control: FUNC48MCLK */ } else if (clkdev->id == USBP1_UTMI_CLK) clksel &= ~(0x1 << 8); /* UTMI_P1_CLK */ else if (clkdev->id == USBP2_UTMI_CLK) clksel &= ~(0x1 << 9); /* UTMI_P2_CLK */ else if (clkdev->id == USBP1_HSIC_CLK) clksel &= ~(0x5 << 11); /* HSIC60M_P1_CLK + HSIC480M_P1_CLK */ else if (clkdev->id == USBP2_HSIC_CLK) clksel &= ~(0x5 << 12); /* HSIC60M_P2_CLK + HSIC480M_P2_CLK */ break; default: return (EINVAL); } bus_write_4(clk_mem_res, clksel_reg_off, clksel); return (0); } /** * omap4_clk_hsusbhost_accessible - checks if a module is accessible * @clkdev: pointer to the clock device structure. * @mem_res: array of memory resources allocated by the top level PRCM driver. * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 if module is not enable, 1 if module is enabled or a negative * error code on failure. */ static int omap4_clk_hsusbhost_accessible(struct ti_clock_dev *clkdev) { struct omap4_prcm_softc *sc; struct resource* clk_mem_res; uint32_t clksel_reg_off; uint32_t clksel; sc = omap4_prcm_get_instance_softc(CM2_INSTANCE); if (sc == NULL) return ENXIO; if (clkdev->id == USBTLL_CLK) { /* We need the CM_L3INIT_HSUSBTLL_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x68; } else if (clkdev->id == USBHSHOST_CLK) { /* We need the CM_L3INIT_HSUSBHOST_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x58; } else { return (EINVAL); } clksel = bus_read_4(clk_mem_res, clksel_reg_off); /* Check the enabled state */ if ((clksel & CLKCTRL_IDLEST_MASK) != CLKCTRL_IDLEST_ENABLED) return (0); return (1); } /** * omap4_clk_hsusbhost_set_source - sets the source clocks * @clkdev: pointer to the clock device structure. * @clksrc: the clock source ID for the given clock. * @mem_res: array of memory resources allocated by the top level PRCM driver. * * * * LOCKING: * Inherits the locks from the omap_prcm driver, no internal locking. * * RETURNS: * Returns 0 if successful otherwise a negative error code on failure. */ static int omap4_clk_hsusbhost_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { struct omap4_prcm_softc *sc; struct resource* clk_mem_res; uint32_t clksel_reg_off; uint32_t clksel; unsigned int bit; sc = omap4_prcm_get_instance_softc(CM2_INSTANCE); if (sc == NULL) return ENXIO; if (clkdev->id == USBP1_PHY_CLK) bit = 24; else if (clkdev->id != USBP2_PHY_CLK) bit = 25; else return (EINVAL); /* We need the CM_L3INIT_HSUSBHOST_CLKCTRL register in CM2 register set */ clk_mem_res = sc->sc_res; clksel_reg_off = L3INIT_CM2_OFFSET + 0x58; clksel = bus_read_4(clk_mem_res, clksel_reg_off); /* Set the clock source to either external or internal */ if (clksrc == EXT_CLK) clksel |= (0x1 << bit); else clksel &= ~(0x1 << bit); bus_write_4(clk_mem_res, clksel_reg_off, clksel); return (0); } #define PRM_RSTCTRL 0x1b00 #define PRM_RSTCTRL_RESET 0x2 static void omap4_prcm_reset(void) { struct omap4_prcm_softc *sc; sc = omap4_prcm_get_instance_softc(PRM_INSTANCE); if (sc == NULL) return; bus_write_4(sc->sc_res, PRM_RSTCTRL, bus_read_4(sc->sc_res, PRM_RSTCTRL) | PRM_RSTCTRL_RESET); bus_read_4(sc->sc_res, PRM_RSTCTRL); } /** * omap4_prcm_probe - probe function for the driver * @dev: prcm device handle * * Simply sets the name of the driver module. * * LOCKING: * None * * RETURNS: * Always returns 0 */ static int omap4_prcm_probe(device_t dev) { const struct ofw_compat_data *ocd; if (!ofw_bus_status_okay(dev)) return (ENXIO); ocd = ofw_bus_search_compatible(dev, compat_data); if ((int)ocd->ocd_data == 0) return (ENXIO); switch ((int)ocd->ocd_data) { case PRM_INSTANCE: device_set_desc(dev, "TI OMAP Power, Reset and Clock Management (PRM)"); break; case CM1_INSTANCE: device_set_desc(dev, "TI OMAP Power, Reset and Clock Management (C1)"); break; case CM2_INSTANCE: device_set_desc(dev, "TI OMAP Power, Reset and Clock Management (C2)"); break; default: device_printf(dev, "unknown instance type: %d\n", (int)ocd->ocd_data); return (ENXIO); } return (BUS_PROBE_DEFAULT); } /** * omap_prcm_attach - attach function for the driver * @dev: prcm device handle * * Allocates and sets up the driver context, this simply entails creating a * bus mappings for the PRCM register set. * * LOCKING: * None * * RETURNS: * Always returns 0 */ extern uint32_t platform_arm_tmr_freq; static int omap4_prcm_attach(device_t dev) { struct omap4_prcm_softc *sc; unsigned int freq; const struct ofw_compat_data *ocd; sc = device_get_softc(dev); ocd = ofw_bus_search_compatible(dev, compat_data); sc->sc_instance = (int)ocd->ocd_data; sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } ti_cpu_reset = omap4_prcm_reset; /* * In order to determine ARM frequency we need both RPM and CM1 * instances up and running. So wait until all CRM devices are * initialized. Should be replaced with proper clock framework */ if (device_get_unit(dev) == 2) { omap4_clk_get_arm_fclk_freq(NULL, &freq); arm_tmr_change_frequency(freq / 2); } return (0); } static device_method_t omap4_prcm_methods[] = { DEVMETHOD(device_probe, omap4_prcm_probe), DEVMETHOD(device_attach, omap4_prcm_attach), {0, 0}, }; static driver_t omap4_prcm_driver = { "omap4_prcm", omap4_prcm_methods, sizeof(struct omap4_prcm_softc), }; static devclass_t omap4_prcm_devclass; EARLY_DRIVER_MODULE(omap4_prcm, simplebus, omap4_prcm_driver, omap4_prcm_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_EARLY); MODULE_VERSION(omap4_prcm, 1); Index: head/sys/arm/ti/ti_cpuid.c =================================================================== --- head/sys/arm/ti/ti_cpuid.c (revision 306901) +++ head/sys/arm/ti/ti_cpuid.c (revision 306902) @@ -1,285 +1,283 @@ /*- * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #define OMAP4_STD_FUSE_DIE_ID_0 0x2200 #define OMAP4_ID_CODE 0x2204 #define OMAP4_STD_FUSE_DIE_ID_1 0x2208 #define OMAP4_STD_FUSE_DIE_ID_2 0x220C #define OMAP4_STD_FUSE_DIE_ID_3 0x2210 #define OMAP4_STD_FUSE_PROD_ID_0 0x2214 #define OMAP4_STD_FUSE_PROD_ID_1 0x2218 #define OMAP3_ID_CODE 0xA204 static uint32_t chip_revision = 0xffffffff; /** * ti_revision - Returns the revision number of the device * * Simply returns an identifier for the revision of the chip we are running * on. * * RETURNS * A 32-bit identifier for the current chip */ uint32_t ti_revision(void) { return chip_revision; } /** * omap4_get_revision - determines omap4 revision * * Reads the registers to determine the revision of the chip we are currently * running on. Stores the information in global variables. * * */ static void omap4_get_revision(void) { uint32_t id_code; uint32_t revision; uint32_t hawkeye; bus_space_handle_t bsh; /* The chip revsion is read from the device identification registers and * the JTAG (?) tap registers, which are located in address 0x4A00_2200 to * 0x4A00_2218. This is part of the L4_CORE memory range and should have * been mapped in by the machdep.c code. * * STD_FUSE_DIE_ID_0 0x4A00 2200 * ID_CODE 0x4A00 2204 (this is the only one we need) * STD_FUSE_DIE_ID_1 0x4A00 2208 * STD_FUSE_DIE_ID_2 0x4A00 220C * STD_FUSE_DIE_ID_3 0x4A00 2210 * STD_FUSE_PROD_ID_0 0x4A00 2214 * STD_FUSE_PROD_ID_1 0x4A00 2218 */ /* FIXME Should we map somewhere else? */ bus_space_map(fdtbus_bs_tag,OMAP44XX_L4_CORE_HWBASE, 0x4000, 0, &bsh); id_code = bus_space_read_4(fdtbus_bs_tag, bsh, OMAP4_ID_CODE); bus_space_unmap(fdtbus_bs_tag, bsh, 0x4000); hawkeye = ((id_code >> 12) & 0xffff); revision = ((id_code >> 28) & 0xf); /* Apparently according to the linux code there were some ES2.0 samples that * have the wrong id code and report themselves as ES1.0 silicon. So used * the ARM cpuid to get the correct revision. */ if (revision == 0) { id_code = cpu_ident(); revision = (id_code & 0xf) - 1; } switch (hawkeye) { case 0xB852: switch (revision) { case 0: chip_revision = OMAP4430_REV_ES1_0; break; case 1: chip_revision = OMAP4430_REV_ES2_1; break; default: chip_revision = OMAP4430_REV_UNKNOWN; break; } break; case 0xB95C: switch (revision) { case 3: chip_revision = OMAP4430_REV_ES2_1; break; case 4: chip_revision = OMAP4430_REV_ES2_2; break; case 6: chip_revision = OMAP4430_REV_ES2_3; break; default: chip_revision = OMAP4430_REV_UNKNOWN; break; } break; case 0xB94E: switch (revision) { case 0: chip_revision = OMAP4460_REV_ES1_0; break; case 2: chip_revision = OMAP4460_REV_ES1_1; break; default: chip_revision = OMAP4460_REV_UNKNOWN; break; } break; case 0xB975: switch (revision) { case 0: chip_revision = OMAP4470_REV_ES1_0; break; default: chip_revision = OMAP4470_REV_UNKNOWN; break; } break; default: /* Default to the latest revision if we can't determine type */ chip_revision = OMAP_UNKNOWN_DEV; break; } if (chip_revision != OMAP_UNKNOWN_DEV) { printf("Texas Instruments OMAP%04x Processor, Revision ES%u.%u\n", OMAP_REV_DEVICE(chip_revision), OMAP_REV_MAJOR(chip_revision), OMAP_REV_MINOR(chip_revision)); } else { printf("Texas Instruments unknown OMAP chip: %04x, rev %d\n", hawkeye, revision); } } static void am335x_get_revision(void) { uint32_t dev_feature; char cpu_last_char; bus_space_handle_t bsh; int major; int minor; bus_space_map(fdtbus_bs_tag, AM335X_CONTROL_BASE, AM335X_CONTROL_SIZE, 0, &bsh); chip_revision = bus_space_read_4(fdtbus_bs_tag, bsh, AM335X_CONTROL_DEVICE_ID); dev_feature = bus_space_read_4(fdtbus_bs_tag, bsh, AM335X_CONTROL_DEV_FEATURE); bus_space_unmap(fdtbus_bs_tag, bsh, AM335X_CONTROL_SIZE); switch (dev_feature) { case 0x00FF0382: cpu_last_char='2'; break; case 0x20FF0382: cpu_last_char='4'; break; case 0x00FF0383: cpu_last_char='6'; break; case 0x00FE0383: cpu_last_char='7'; break; case 0x20FF0383: cpu_last_char='8'; break; case 0x20FE0383: cpu_last_char='9'; break; default: cpu_last_char='x'; } switch(AM335X_DEVREV(chip_revision)) { case 0: major = 1; minor = 0; break; case 1: major = 2; minor = 0; break; case 2: major = 2; minor = 1; break; default: major = 0; minor = AM335X_DEVREV(chip_revision); break; } printf("Texas Instruments AM335%c Processor, Revision ES%u.%u\n", cpu_last_char, major, minor); } /** * ti_cpu_ident - attempts to identify the chip we are running on * @dummy: ignored * * This function is called before any of the driver are initialised, however * the basic virt to phys maps have been setup in machdep.c so we can still * access the required registers, we just have to use direct register reads * and writes rather than going through the bus stuff. * * */ static void ti_cpu_ident(void *dummy) { switch(ti_chip()) { case CHIP_OMAP_4: omap4_get_revision(); break; case CHIP_AM335X: am335x_get_revision(); break; default: panic("Unknown chip type, fixme!\n"); } } SYSINIT(ti_cpu_ident, SI_SUB_CPU, SI_ORDER_SECOND, ti_cpu_ident, NULL); Index: head/sys/arm/ti/ti_pinmux.c =================================================================== --- head/sys/arm/ti/ti_pinmux.c (revision 306901) +++ head/sys/arm/ti/ti_pinmux.c (revision 306902) @@ -1,445 +1,443 @@ /* * Copyright (c) 2010 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Ben Gray. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * Exposes pinmux module to pinctrl-compatible interface */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include "ti_pinmux.h" struct pincfg { uint32_t reg; uint32_t conf; }; static struct resource_spec ti_pinmux_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Control memory window */ { -1, 0 } }; static struct ti_pinmux_softc *ti_pinmux_sc; #define ti_pinmux_read_2(sc, reg) \ bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg)) #define ti_pinmux_write_2(sc, reg, val) \ bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) #define ti_pinmux_read_4(sc, reg) \ bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) #define ti_pinmux_write_4(sc, reg, val) \ bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) /** * ti_padconf_devmap - Array of pins, should be defined one per SoC * * This array is typically defined in one of the targeted *_scm_pinumx.c * files and is specific to the given SoC platform. Each entry in the array * corresponds to an individual pin. */ extern const struct ti_pinmux_device ti_pinmux_dev; /** * ti_pinmux_padconf_from_name - searches the list of pads and returns entry * with matching ball name. * @ballname: the name of the ball * * RETURNS: * A pointer to the matching padconf or NULL if the ball wasn't found. */ static const struct ti_pinmux_padconf* ti_pinmux_padconf_from_name(const char *ballname) { const struct ti_pinmux_padconf *padconf; padconf = ti_pinmux_dev.padconf; while (padconf->ballname != NULL) { if (strcmp(ballname, padconf->ballname) == 0) return(padconf); padconf++; } return (NULL); } /** * ti_pinmux_padconf_set_internal - sets the muxmode and state for a pad/pin * @padconf: pointer to the pad structure * @muxmode: the name of the mode to use for the pin, i.e. "uart1_rx" * @state: the state to put the pad/pin in, i.e. PADCONF_PIN_??? * * * LOCKING: * Internally locks it's own context. * * RETURNS: * 0 on success. * EINVAL if pin requested is outside valid range or already in use. */ static int ti_pinmux_padconf_set_internal(struct ti_pinmux_softc *sc, const struct ti_pinmux_padconf *padconf, const char *muxmode, unsigned int state) { unsigned int mode; uint16_t reg_val; /* populate the new value for the PADCONF register */ reg_val = (uint16_t)(state & ti_pinmux_dev.padconf_sate_mask); /* find the new mode requested */ for (mode = 0; mode < 8; mode++) { if ((padconf->muxmodes[mode] != NULL) && (strcmp(padconf->muxmodes[mode], muxmode) == 0)) { break; } } /* couldn't find the mux mode */ if (mode >= 8) { printf("Invalid mode \"%s\"\n", muxmode); return (EINVAL); } /* set the mux mode */ reg_val |= (uint16_t)(mode & ti_pinmux_dev.padconf_muxmode_mask); if (bootverbose) device_printf(sc->sc_dev, "setting internal %x for %s\n", reg_val, muxmode); /* write the register value (16-bit writes) */ ti_pinmux_write_2(sc, padconf->reg_off, reg_val); return (0); } /** * ti_pinmux_padconf_set - sets the muxmode and state for a pad/pin * @padname: the name of the pad, i.e. "c12" * @muxmode: the name of the mode to use for the pin, i.e. "uart1_rx" * @state: the state to put the pad/pin in, i.e. PADCONF_PIN_??? * * * LOCKING: * Internally locks it's own context. * * RETURNS: * 0 on success. * EINVAL if pin requested is outside valid range or already in use. */ int ti_pinmux_padconf_set(const char *padname, const char *muxmode, unsigned int state) { const struct ti_pinmux_padconf *padconf; if (!ti_pinmux_sc) return (ENXIO); /* find the pin in the devmap */ padconf = ti_pinmux_padconf_from_name(padname); if (padconf == NULL) return (EINVAL); return (ti_pinmux_padconf_set_internal(ti_pinmux_sc, padconf, muxmode, state)); } /** * ti_pinmux_padconf_get - gets the muxmode and state for a pad/pin * @padname: the name of the pad, i.e. "c12" * @muxmode: upon return will contain the name of the muxmode of the pin * @state: upon return will contain the state of the pad/pin * * * LOCKING: * Internally locks it's own context. * * RETURNS: * 0 on success. * EINVAL if pin requested is outside valid range or already in use. */ int ti_pinmux_padconf_get(const char *padname, const char **muxmode, unsigned int *state) { const struct ti_pinmux_padconf *padconf; uint16_t reg_val; if (!ti_pinmux_sc) return (ENXIO); /* find the pin in the devmap */ padconf = ti_pinmux_padconf_from_name(padname); if (padconf == NULL) return (EINVAL); /* read the register value (16-bit reads) */ reg_val = ti_pinmux_read_2(ti_pinmux_sc, padconf->reg_off); /* save the state */ if (state) *state = (reg_val & ti_pinmux_dev.padconf_sate_mask); /* save the mode */ if (muxmode) *muxmode = padconf->muxmodes[(reg_val & ti_pinmux_dev.padconf_muxmode_mask)]; return (0); } /** * ti_pinmux_padconf_set_gpiomode - converts a pad to GPIO mode. * @gpio: the GPIO pin number (0-195) * @state: the state to put the pad/pin in, i.e. PADCONF_PIN_??? * * * * LOCKING: * Internally locks it's own context. * * RETURNS: * 0 on success. * EINVAL if pin requested is outside valid range or already in use. */ int ti_pinmux_padconf_set_gpiomode(uint32_t gpio, unsigned int state) { const struct ti_pinmux_padconf *padconf; uint16_t reg_val; if (!ti_pinmux_sc) return (ENXIO); /* find the gpio pin in the padconf array */ padconf = ti_pinmux_dev.padconf; while (padconf->ballname != NULL) { if (padconf->gpio_pin == gpio) break; padconf++; } if (padconf->ballname == NULL) return (EINVAL); /* populate the new value for the PADCONF register */ reg_val = (uint16_t)(state & ti_pinmux_dev.padconf_sate_mask); /* set the mux mode */ reg_val |= (uint16_t)(padconf->gpio_mode & ti_pinmux_dev.padconf_muxmode_mask); /* write the register value (16-bit writes) */ ti_pinmux_write_2(ti_pinmux_sc, padconf->reg_off, reg_val); return (0); } /** * ti_pinmux_padconf_get_gpiomode - gets the current GPIO mode of the pin * @gpio: the GPIO pin number (0-195) * @state: upon return will contain the state * * * * LOCKING: * Internally locks it's own context. * * RETURNS: * 0 on success. * EINVAL if pin requested is outside valid range or not configured as GPIO. */ int ti_pinmux_padconf_get_gpiomode(uint32_t gpio, unsigned int *state) { const struct ti_pinmux_padconf *padconf; uint16_t reg_val; if (!ti_pinmux_sc) return (ENXIO); /* find the gpio pin in the padconf array */ padconf = ti_pinmux_dev.padconf; while (padconf->ballname != NULL) { if (padconf->gpio_pin == gpio) break; padconf++; } if (padconf->ballname == NULL) return (EINVAL); /* read the current register settings */ reg_val = ti_pinmux_read_2(ti_pinmux_sc, padconf->reg_off); /* check to make sure the pins is configured as GPIO in the first state */ if ((reg_val & ti_pinmux_dev.padconf_muxmode_mask) != padconf->gpio_mode) return (EINVAL); /* read and store the reset of the state, i.e. pull-up, pull-down, etc */ if (state) *state = (reg_val & ti_pinmux_dev.padconf_sate_mask); return (0); } static int ti_pinmux_configure_pins(device_t dev, phandle_t cfgxref) { struct pincfg *cfgtuples, *cfg; phandle_t cfgnode; int i, ntuples; static struct ti_pinmux_softc *sc; sc = device_get_softc(dev); cfgnode = OF_node_from_xref(cfgxref); ntuples = OF_getencprop_alloc(cfgnode, "pinctrl-single,pins", sizeof(*cfgtuples), (void **)&cfgtuples); if (ntuples < 0) return (ENOENT); if (ntuples == 0) return (0); /* Empty property is not an error. */ for (i = 0, cfg = cfgtuples; i < ntuples; i++, cfg++) { if (bootverbose) { char name[32]; OF_getprop(cfgnode, "name", &name, sizeof(name)); printf("%16s: muxreg 0x%04x muxval 0x%02x\n", name, cfg->reg, cfg->conf); } /* write the register value (16-bit writes) */ ti_pinmux_write_2(sc, cfg->reg, cfg->conf); } OF_prop_free(cfgtuples); return (0); } /* * Device part of OMAP SCM driver */ static int ti_pinmux_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "pinctrl-single")) return (ENXIO); if (ti_pinmux_sc) { printf("%s: multiple pinctrl modules in device tree data, ignoring\n", __func__); return (EEXIST); } device_set_desc(dev, "TI Pinmux Module"); return (BUS_PROBE_DEFAULT); } /** * ti_pinmux_attach - attaches the pinmux to the simplebus * @dev: new device * * RETURNS * Zero on success or ENXIO if an error occuried. */ static int ti_pinmux_attach(device_t dev) { struct ti_pinmux_softc *sc = device_get_softc(dev); #if 0 if (ti_pinmux_sc) return (ENXIO); #endif sc->sc_dev = dev; if (bus_alloc_resources(dev, ti_pinmux_res_spec, sc->sc_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res[0]); sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]); if (ti_pinmux_sc == NULL) ti_pinmux_sc = sc; fdt_pinctrl_register(dev, "pinctrl-single,pins"); fdt_pinctrl_configure_tree(dev); return (0); } static device_method_t ti_pinmux_methods[] = { DEVMETHOD(device_probe, ti_pinmux_probe), DEVMETHOD(device_attach, ti_pinmux_attach), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure, ti_pinmux_configure_pins), { 0, 0 } }; static driver_t ti_pinmux_driver = { "ti_pinmux", ti_pinmux_methods, sizeof(struct ti_pinmux_softc), }; static devclass_t ti_pinmux_devclass; DRIVER_MODULE(ti_pinmux, simplebus, ti_pinmux_driver, ti_pinmux_devclass, 0, 0); Index: head/sys/arm/ti/ti_prcm.c =================================================================== --- head/sys/arm/ti/ti_prcm.c (revision 306901) +++ head/sys/arm/ti/ti_prcm.c (revision 306902) @@ -1,357 +1,355 @@ /* * Copyright (c) 2010 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Ben Gray. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * Power, Reset and Clock Management Module * * This is a very simple driver wrapper around the PRCM set of registers in * the OMAP3 chip. It allows you to turn on and off things like the functional * and interface clocks to the various on-chip modules. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include /** * ti_*_clk_devmap - Array of clock devices, should be defined one per SoC * * This array is typically defined in one of the targeted *_prcm_clk.c * files and is specific to the given SoC platform. Each entry in the array * corresponds to an individual clock device. */ extern struct ti_clock_dev ti_omap4_clk_devmap[]; extern struct ti_clock_dev ti_am335x_clk_devmap[]; /** * ti_prcm_clk_dev - returns a pointer to the clock device with given id * @clk: the ID of the clock device to get * * Simply iterates through the clk_devmap global array and returns a pointer * to the clock device if found. * * LOCKING: * None * * RETURNS: * The pointer to the clock device on success, on failure NULL is returned. */ static struct ti_clock_dev * ti_prcm_clk_dev(clk_ident_t clk) { struct ti_clock_dev *clk_dev; /* Find the clock within the devmap - it's a bit inefficent having a for * loop for this, but this function should only called when a driver is * being activated so IMHO not a big issue. */ clk_dev = NULL; switch(ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: clk_dev = &(ti_omap4_clk_devmap[0]); break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: clk_dev = &(ti_am335x_clk_devmap[0]); break; #endif } if (clk_dev == NULL) panic("No clock devmap found"); while (clk_dev->id != INVALID_CLK_IDENT) { if (clk_dev->id == clk) { return (clk_dev); } clk_dev++; } /* Sanity check we managed to find the clock */ printf("ti_prcm: Failed to find clock device (%d)\n", clk); return (NULL); } /** * ti_prcm_clk_valid - enables a clock for a particular module * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1_ICLK or OMAP3_MODULE_GPTIMER10_FCLK. * * This function can enable either a functional or interface clock. * * The real work done to enable the clock is really done in the callback * function associated with the clock, this function is simply a wrapper * around that. * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_valid(clk_ident_t clk) { int ret = 0; if (ti_prcm_clk_dev(clk) == NULL) ret = EINVAL; return (ret); } /** * ti_prcm_clk_enable - enables a clock for a particular module * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1_ICLK or OMAP3_MODULE_GPTIMER10_FCLK. * * This function can enable either a functional or interface clock. * * The real work done to enable the clock is really done in the callback * function associated with the clock, this function is simply a wrapper * around that. * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_enable(clk_ident_t clk) { struct ti_clock_dev *clk_dev; int ret; /* Find the clock within the devmap - it's a bit inefficent having a for * loop for this, but this function should only called when a driver is * being activated so IMHO not a big issue. */ clk_dev = ti_prcm_clk_dev(clk); /* Sanity check we managed to find the clock */ if (clk_dev == NULL) return (EINVAL); /* Activate the clock */ if (clk_dev->clk_activate) ret = clk_dev->clk_activate(clk_dev); else ret = EINVAL; return (ret); } /** * ti_prcm_clk_disable - disables a clock for a particular module * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1_ICLK or OMAP3_MODULE_GPTIMER10_FCLK. * * This function can enable either a functional or interface clock. * * The real work done to enable the clock is really done in the callback * function associated with the clock, this function is simply a wrapper * around that. * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_disable(clk_ident_t clk) { struct ti_clock_dev *clk_dev; int ret; /* Find the clock within the devmap - it's a bit inefficent having a for * loop for this, but this function should only called when a driver is * being activated so IMHO not a big issue. */ clk_dev = ti_prcm_clk_dev(clk); /* Sanity check we managed to find the clock */ if (clk_dev == NULL) return (EINVAL); /* Activate the clock */ if (clk_dev->clk_deactivate) ret = clk_dev->clk_deactivate(clk_dev); else ret = EINVAL; return (ret); } /** * ti_prcm_clk_set_source - sets the source * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * Example: OMAP3_MODULE_MMC1_ICLK or OMAP3_MODULE_GPTIMER10_FCLK. * * This function can enable either a functional or interface clock. * * The real work done to enable the clock is really done in the callback * function associated with the clock, this function is simply a wrapper * around that. * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_set_source(clk_ident_t clk, clk_src_t clksrc) { struct ti_clock_dev *clk_dev; int ret; /* Find the clock within the devmap - it's a bit inefficent having a for * loop for this, but this function should only called when a driver is * being activated so IMHO not a big issue. */ clk_dev = ti_prcm_clk_dev(clk); /* Sanity check we managed to find the clock */ if (clk_dev == NULL) return (EINVAL); /* Activate the clock */ if (clk_dev->clk_set_source) ret = clk_dev->clk_set_source(clk_dev, clksrc); else ret = EINVAL; return (ret); } /** * ti_prcm_clk_get_source_freq - gets the source clock frequency * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * @freq: pointer to an integer that upon return will contain the src freq * * This function returns the frequency of the source clock. * * The real work done to enable the clock is really done in the callback * function associated with the clock, this function is simply a wrapper * around that. * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_get_source_freq(clk_ident_t clk, unsigned int *freq) { struct ti_clock_dev *clk_dev; int ret; /* Find the clock within the devmap - it's a bit inefficent having a for * loop for this, but this function should only called when a driver is * being activated so IMHO not a big issue. */ clk_dev = ti_prcm_clk_dev(clk); /* Sanity check we managed to find the clock */ if (clk_dev == NULL) return (EINVAL); /* Get the source frequency of the clock */ if (clk_dev->clk_get_source_freq) ret = clk_dev->clk_get_source_freq(clk_dev, freq); else ret = EINVAL; return (ret); } /** * ti_prcm_clk_set_source_freq - sets the source clock frequency as close to freq as possible * @clk: identifier for the module to enable, see ti_prcm.h for a list * of possible modules. * @freq: requested freq * * LOCKING: * Internally locks the driver context. * * RETURNS: * Returns 0 on success or positive error code on failure. */ int ti_prcm_clk_set_source_freq(clk_ident_t clk, unsigned int freq) { struct ti_clock_dev *clk_dev; int ret; clk_dev = ti_prcm_clk_dev(clk); /* Sanity check we managed to find the clock */ if (clk_dev == NULL) return (EINVAL); /* Get the source frequency of the clock */ if (clk_dev->clk_set_source_freq) ret = clk_dev->clk_set_source_freq(clk_dev, freq); else ret = EINVAL; return (ret); } Index: head/sys/arm/ti/ti_scm.c =================================================================== --- head/sys/arm/ti/ti_scm.c (revision 306901) +++ head/sys/arm/ti/ti_scm.c (revision 306902) @@ -1,177 +1,175 @@ /* * Copyright (c) 2010 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Ben Gray. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * SCM - System Control Module * * Hopefully in the end this module will contain a bunch of utility functions * for configuring and querying the general system control registers, but for * now it only does pin(pad) multiplexing. * * This is different from the GPIO module in that it is used to configure the * pins between modules not just GPIO input/output. * * This file contains the generic top level driver, however it relies on chip * specific settings and therefore expects an array of ti_scm_padconf structs * call ti_padconf_devmap to be located somewhere in the kernel. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include "ti_scm.h" static struct resource_spec ti_scm_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Control memory window */ { -1, 0 } }; static struct ti_scm_softc *ti_scm_sc; #define ti_scm_read_4(sc, reg) \ bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg)) #define ti_scm_write_4(sc, reg, val) \ bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val)) /* * Device part of OMAP SCM driver */ static int ti_scm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "syscon")) return (ENXIO); if (ti_scm_sc) { return (EEXIST); } device_set_desc(dev, "TI Control Module"); return (BUS_PROBE_DEFAULT); } /** * ti_scm_attach - attaches the timer to the simplebus * @dev: new device * * Reserves memory and interrupt resources, stores the softc structure * globally and registers both the timecount and eventtimer objects. * * RETURNS * Zero on success or ENXIO if an error occuried. */ static int ti_scm_attach(device_t dev) { struct ti_scm_softc *sc = device_get_softc(dev); sc->sc_dev = dev; if (bus_alloc_resources(dev, ti_scm_res_spec, sc->sc_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Global timer interface */ sc->sc_bst = rman_get_bustag(sc->sc_res[0]); sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]); ti_scm_sc = sc; /* Attach platform extensions, if any. */ bus_generic_probe(dev); return (bus_generic_attach(dev)); } int ti_scm_reg_read_4(uint32_t reg, uint32_t *val) { if (!ti_scm_sc) return (ENXIO); *val = ti_scm_read_4(ti_scm_sc, reg); return (0); } int ti_scm_reg_write_4(uint32_t reg, uint32_t val) { if (!ti_scm_sc) return (ENXIO); ti_scm_write_4(ti_scm_sc, reg, val); return (0); } static device_method_t ti_scm_methods[] = { DEVMETHOD(device_probe, ti_scm_probe), DEVMETHOD(device_attach, ti_scm_attach), { 0, 0 } }; static driver_t ti_scm_driver = { "ti_scm", ti_scm_methods, sizeof(struct ti_scm_softc), }; static devclass_t ti_scm_devclass; EARLY_DRIVER_MODULE(ti_scm, simplebus, ti_scm_driver, ti_scm_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); Index: head/sys/arm/ti/twl/twl.c =================================================================== --- head/sys/arm/ti/twl/twl.c (revision 306901) +++ head/sys/arm/ti/twl/twl.c (revision 306902) @@ -1,463 +1,461 @@ /*- * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Texas Instruments TWL4030/TWL5030/TWL60x0/TPS659x0 Power Management and * Audio CODEC devices. * * This code is based on the Linux TWL multifunctional device driver, which is * copyright (C) 2005-2006 Texas Instruments, Inc. * * These chips are typically used as support ICs for the OMAP range of embedded * ARM processes/SOC from Texas Instruments. They are typically used to control * on board voltages, however some variants have other features like audio * codecs, USB OTG transceivers, RTC, PWM, etc. * * This driver acts as a bus for more specific companion devices. * */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include "arm/ti/twl/twl.h" /* TWL device IDs */ #define TWL_DEVICE_UNKNOWN 0xffff #define TWL_DEVICE_4030 0x4030 #define TWL_DEVICE_6025 0x6025 #define TWL_DEVICE_6030 0x6030 /* Each TWL device typically has more than one I2C address */ #define TWL_MAX_SUBADDRS 4 /* The maxium number of bytes that can be written in one call */ #define TWL_MAX_IIC_DATA_SIZE 63 /* The TWL devices typically use 4 I2C address for the different internal * register sets, plus one SmartReflex I2C address. */ #define TWL_CHIP_ID0 0x48 #define TWL_CHIP_ID1 0x49 #define TWL_CHIP_ID2 0x4A #define TWL_CHIP_ID3 0x4B #define TWL_SMARTREFLEX_CHIP_ID 0x12 #define TWL_INVALID_CHIP_ID 0xff struct twl_softc { device_t sc_dev; struct mtx sc_mtx; unsigned int sc_type; uint8_t sc_subaddr_map[TWL_MAX_SUBADDRS]; struct intr_config_hook sc_scan_hook; device_t sc_vreg; device_t sc_clks; }; /** * Macros for driver mutex locking */ #define TWL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define TWL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define TWL_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ "twl", MTX_DEF) #define TWL_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define TWL_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define TWL_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); /** * twl_is_4030 - returns true if the device is TWL4030 * twl_is_6025 - returns true if the device is TWL6025 * twl_is_6030 - returns true if the device is TWL6030 * @sc: device soft context * * Returns a non-zero value if the device matches. * * RETURNS: * Returns a non-zero value if the device matches, otherwise zero. */ int twl_is_4030(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_4030); } int twl_is_6025(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_6025); } int twl_is_6030(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_6030); } /** * twl_read - read one or more registers from the TWL device * @sc: device soft context * @nsub: the sub-module to read from * @reg: the register offset within the module to read * @buf: buffer to store the bytes in * @cnt: the number of bytes to read * * Reads one or more registers and stores the result in the suppled buffer. * * RETURNS: * Zero on success or an error code on failure. */ int twl_read(device_t dev, uint8_t nsub, uint8_t reg, uint8_t *buf, uint16_t cnt) { struct twl_softc *sc; struct iic_msg msg[2]; uint8_t addr; int rc; sc = device_get_softc(dev); TWL_LOCK(sc); addr = sc->sc_subaddr_map[nsub]; TWL_UNLOCK(sc); if (addr == TWL_INVALID_CHIP_ID) return (EIO); /* Set the address to read from */ msg[0].slave = addr; msg[0].flags = IIC_M_WR | IIC_M_NOSTOP; msg[0].len = 1; msg[0].buf = ® /* Read the data back */ msg[1].slave = addr; msg[1].flags = IIC_M_RD; msg[1].len = cnt; msg[1].buf = buf; rc = iicbus_transfer(dev, msg, 2); if (rc != 0) { device_printf(dev, "iicbus read failed (adr:0x%02x, reg:0x%02x)\n", addr, reg); return (EIO); } return (0); } /** * twl_write - writes one or more registers to the TWL device * @sc: device soft context * @nsub: the sub-module to read from * @reg: the register offset within the module to read * @buf: data to write * @cnt: the number of bytes to write * * Writes one or more registers. * * RETURNS: * Zero on success or a negative error code on failure. */ int twl_write(device_t dev, uint8_t nsub, uint8_t reg, uint8_t *buf, uint16_t cnt) { struct twl_softc *sc; struct iic_msg msg; uint8_t addr; uint8_t tmp_buf[TWL_MAX_IIC_DATA_SIZE + 1]; int rc; if (cnt > TWL_MAX_IIC_DATA_SIZE) return (ENOMEM); /* Set the register address as the first byte */ tmp_buf[0] = reg; memcpy(&tmp_buf[1], buf, cnt); sc = device_get_softc(dev); TWL_LOCK(sc); addr = sc->sc_subaddr_map[nsub]; TWL_UNLOCK(sc); if (addr == TWL_INVALID_CHIP_ID) return (EIO); /* Setup the transfer and execute it */ msg.slave = addr; msg.flags = IIC_M_WR; msg.len = cnt + 1; msg.buf = tmp_buf; rc = iicbus_transfer(dev, &msg, 1); if (rc != 0) { device_printf(sc->sc_dev, "iicbus write failed (adr:0x%02x, reg:0x%02x)\n", addr, reg); return (EIO); } return (0); } /** * twl_test_present - checks if a device with given address is present * @sc: device soft context * @addr: the address of the device to scan for * * Sends just the address byte and checks for an ACK. If no ACK then device * is assumed to not be present. * * RETURNS: * EIO if device is not present, otherwise 0 is returned. */ static int twl_test_present(struct twl_softc *sc, uint8_t addr) { struct iic_msg msg; uint8_t tmp; /* Set the address to read from */ msg.slave = addr; msg.flags = IIC_M_RD; msg.len = 1; msg.buf = &tmp; if (iicbus_transfer(sc->sc_dev, &msg, 1) != 0) return (EIO); return (0); } /** * twl_scan - scans the i2c bus for sub modules * @dev: the twl device * * TWL devices don't just have one i2c slave address, rather they have up to * 5 other addresses, each is for separate modules within the device. This * function scans the bus for 4 possible sub-devices and stores the info * internally. * */ static void twl_scan(void *dev) { struct twl_softc *sc; unsigned i; uint8_t devs[TWL_MAX_SUBADDRS]; uint8_t base = TWL_CHIP_ID0; sc = device_get_softc((device_t)dev); memset(devs, TWL_INVALID_CHIP_ID, TWL_MAX_SUBADDRS); /* Try each of the addresses (0x48, 0x49, 0x4a & 0x4b) to determine which * sub modules we have. */ for (i = 0; i < TWL_MAX_SUBADDRS; i++) { if (twl_test_present(sc, (base + i)) == 0) { devs[i] = (base + i); device_printf(sc->sc_dev, "Found (sub)device at 0x%02x\n", (base + i)); } } TWL_LOCK(sc); memcpy(sc->sc_subaddr_map, devs, TWL_MAX_SUBADDRS); TWL_UNLOCK(sc); /* Finished with the interrupt hook */ config_intrhook_disestablish(&sc->sc_scan_hook); } /** * twl_probe - * @dev: the twl device * * Scans the FDT for a match for the device, possible compatible device * strings are; "ti,twl6030", "ti,twl6025", "ti,twl4030". * * The FDT compat string also determines the type of device (it is currently * not possible to dynamically determine the device type). * */ static int twl_probe(device_t dev) { phandle_t node; const char *compat; int len, l; struct twl_softc *sc; if ((compat = ofw_bus_get_compat(dev)) == NULL) return (ENXIO); if ((node = ofw_bus_get_node(dev)) == 0) return (ENXIO); /* Get total 'compatible' prop len */ if ((len = OF_getproplen(node, "compatible")) <= 0) return (ENXIO); sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_type = TWL_DEVICE_UNKNOWN; while (len > 0) { if (strncasecmp(compat, "ti,twl6030", 10) == 0) sc->sc_type = TWL_DEVICE_6030; else if (strncasecmp(compat, "ti,twl6025", 10) == 0) sc->sc_type = TWL_DEVICE_6025; else if (strncasecmp(compat, "ti,twl4030", 10) == 0) sc->sc_type = TWL_DEVICE_4030; if (sc->sc_type != TWL_DEVICE_UNKNOWN) break; /* Slide to the next sub-string. */ l = strlen(compat) + 1; compat += l; len -= l; } switch (sc->sc_type) { case TWL_DEVICE_4030: device_set_desc(dev, "TI TWL4030/TPS659x0 Companion IC"); break; case TWL_DEVICE_6025: device_set_desc(dev, "TI TWL6025 Companion IC"); break; case TWL_DEVICE_6030: device_set_desc(dev, "TI TWL6030 Companion IC"); break; case TWL_DEVICE_UNKNOWN: default: return (ENXIO); } return (0); } static int twl_attach(device_t dev) { struct twl_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; TWL_LOCK_INIT(sc); /* We have to wait until interrupts are enabled. I2C read and write * only works if the interrupts are available. */ sc->sc_scan_hook.ich_func = twl_scan; sc->sc_scan_hook.ich_arg = dev; if (config_intrhook_establish(&sc->sc_scan_hook) != 0) return (ENOMEM); /* FIXME: should be in DTS file */ if ((sc->sc_vreg = device_add_child(dev, "twl_vreg", -1)) == NULL) device_printf(dev, "could not allocate twl_vreg instance\n"); if ((sc->sc_clks = device_add_child(dev, "twl_clks", -1)) == NULL) device_printf(dev, "could not allocate twl_clks instance\n"); return (bus_generic_attach(dev)); } static int twl_detach(device_t dev) { struct twl_softc *sc; sc = device_get_softc(dev); if (sc->sc_vreg) device_delete_child(dev, sc->sc_vreg); if (sc->sc_clks) device_delete_child(dev, sc->sc_clks); TWL_LOCK_DESTROY(sc); return (0); } static device_method_t twl_methods[] = { DEVMETHOD(device_probe, twl_probe), DEVMETHOD(device_attach, twl_attach), DEVMETHOD(device_detach, twl_detach), {0, 0}, }; static driver_t twl_driver = { "twl", twl_methods, sizeof(struct twl_softc), }; static devclass_t twl_devclass; DRIVER_MODULE(twl, iicbus, twl_driver, twl_devclass, 0, 0); MODULE_VERSION(twl, 1); Index: head/sys/arm/ti/twl/twl_clks.c =================================================================== --- head/sys/arm/ti/twl/twl_clks.c (revision 306901) +++ head/sys/arm/ti/twl/twl_clks.c (revision 306902) @@ -1,674 +1,672 @@ /*- * Copyright (c) 2012 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Texas Instruments TWL4030/TWL5030/TWL60x0/TPS659x0 Power Management. * * This driver covers the external clocks, allows for enabling & * disabling their output. * * * * FLATTENED DEVICE TREE (FDT) * Startup override settings can be specified in the FDT, if they are they * should be under the twl parent device and take the following form: * * external-clocks = "name1", "state1", * "name2", "state2", * etc; * * Each override should be a pair, the first entry is the name of the clock * the second is the state to set, possible strings are either "on" or "off". * */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include "twl.h" #include "twl_clks.h" static int twl_clks_debug = 1; /* * Power Groups bits for the 4030 and 6030 devices */ #define TWL4030_P3_GRP 0x80 /* Peripherals, power group */ #define TWL4030_P2_GRP 0x40 /* Modem power group */ #define TWL4030_P1_GRP 0x20 /* Application power group (FreeBSD control) */ #define TWL6030_P3_GRP 0x04 /* Modem power group */ #define TWL6030_P2_GRP 0x02 /* Connectivity power group */ #define TWL6030_P1_GRP 0x01 /* Application power group (FreeBSD control) */ /* * Register offsets within a clk regulator register set */ #define TWL_CLKS_GRP 0x00 /* Regulator GRP register */ #define TWL_CLKS_STATE 0x02 /* TWL6030 only */ /** * Support voltage regulators for the different IC's */ struct twl_clock { const char *name; uint8_t subdev; uint8_t regbase; }; static const struct twl_clock twl4030_clocks[] = { { "32kclkout", 0, 0x8e }, { NULL, 0, 0x00 } }; static const struct twl_clock twl6030_clocks[] = { { "clk32kg", 0, 0xbc }, { "clk32kao", 0, 0xb9 }, { "clk32kaudio", 0, 0xbf }, { NULL, 0, 0x00 } }; #define TWL_CLKS_MAX_NAMELEN 32 struct twl_clk_entry { LIST_ENTRY(twl_clk_entry) link; struct sysctl_oid *oid; char name[TWL_CLKS_MAX_NAMELEN]; uint8_t sub_dev; /* the sub-device number for the clock */ uint8_t reg_off; /* register base address of the clock */ }; struct twl_clks_softc { device_t sc_dev; /* twl_clk device */ device_t sc_pdev; /* parent device (twl) */ struct sx sc_sx; /* internal locking */ struct intr_config_hook sc_init_hook; LIST_HEAD(twl_clk_list, twl_clk_entry) sc_clks_list; }; /** * Macros for driver shared locking */ #define TWL_CLKS_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx) #define TWL_CLKS_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx) #define TWL_CLKS_SLOCK(_sc) sx_slock(&(_sc)->sc_sx) #define TWL_CLKS_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx) #define TWL_CLKS_LOCK_INIT(_sc) sx_init(&(_sc)->sc_sx, "twl_clks") #define TWL_CLKS_LOCK_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx); #define TWL_CLKS_ASSERT_LOCKED(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED); #define TWL_CLKS_LOCK_UPGRADE(_sc) \ do { \ while (!sx_try_upgrade(&(_sc)->sc_sx)) \ pause("twl_clks_ex", (hz / 100)); \ } while(0) #define TWL_CLKS_LOCK_DOWNGRADE(_sc) sx_downgrade(&(_sc)->sc_sx); /** * twl_clks_read_1 - read single register from the TWL device * twl_clks_write_1 - writes a single register in the TWL device * @sc: device context * @clk: the clock device we're reading from / writing to * @off: offset within the clock's register set * @val: the value to write or a pointer to a variable to store the result * * RETURNS: * Zero on success or an error code on failure. */ static inline int twl_clks_read_1(struct twl_clks_softc *sc, struct twl_clk_entry *clk, uint8_t off, uint8_t *val) { return (twl_read(sc->sc_pdev, clk->sub_dev, clk->reg_off + off, val, 1)); } static inline int twl_clks_write_1(struct twl_clks_softc *sc, struct twl_clk_entry *clk, uint8_t off, uint8_t val) { return (twl_write(sc->sc_pdev, clk->sub_dev, clk->reg_off + off, &val, 1)); } /** * twl_clks_is_enabled - determines if a clock is enabled * @dev: TWL CLK device * @name: the name of the clock * @enabled: upon return will contain the 'enabled' state * * LOCKING: * Internally the function takes and releases the TWL lock. * * RETURNS: * Zero on success or a negative error code on failure. */ int twl_clks_is_enabled(device_t dev, const char *name, int *enabled) { struct twl_clks_softc *sc = device_get_softc(dev); struct twl_clk_entry *clk; int found = 0; int err; uint8_t grp, state; TWL_CLKS_SLOCK(sc); LIST_FOREACH(clk, &sc->sc_clks_list, link) { if (strcmp(clk->name, name) == 0) { found = 1; break; } } if (!found) { TWL_CLKS_SUNLOCK(sc); return (EINVAL); } if (twl_is_4030(sc->sc_pdev)) { err = twl_clks_read_1(sc, clk, TWL_CLKS_GRP, &grp); if (!err) *enabled = (grp & TWL4030_P1_GRP); } else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) { TWL_CLKS_LOCK_UPGRADE(sc); /* Check the clock is in the application group */ if (twl_is_6030(sc->sc_pdev)) { err = twl_clks_read_1(sc, clk, TWL_CLKS_GRP, &grp); if (err) { TWL_CLKS_LOCK_DOWNGRADE(sc); goto done; } if (!(grp & TWL6030_P1_GRP)) { TWL_CLKS_LOCK_DOWNGRADE(sc); *enabled = 0; /* disabled */ goto done; } } /* Read the application mode state and verify it's ON */ err = twl_clks_read_1(sc, clk, TWL_CLKS_STATE, &state); if (!err) *enabled = ((state & 0x0C) == 0x04); TWL_CLKS_LOCK_DOWNGRADE(sc); } else { err = EINVAL; } done: TWL_CLKS_SUNLOCK(sc); return (err); } /** * twl_clks_set_state - enables/disables a clock output * @sc: device context * @clk: the clock entry to enable/disable * @enable: non-zero the clock is enabled, zero the clock is disabled * * LOCKING: * The TWL CLK lock must be held before this function is called. * * RETURNS: * Zero on success or an error code on failure. */ static int twl_clks_set_state(struct twl_clks_softc *sc, struct twl_clk_entry *clk, int enable) { int xlocked; int err; uint8_t grp; TWL_CLKS_ASSERT_LOCKED(sc); /* Upgrade the lock to exclusive because about to perform read-mod-write */ xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_CLKS_LOCK_UPGRADE(sc); err = twl_clks_read_1(sc, clk, TWL_CLKS_GRP, &grp); if (err) goto done; if (twl_is_4030(sc->sc_pdev)) { /* On the TWL4030 we just need to ensure the clock is in the right * power domain, don't need to turn on explicitly like TWL6030. */ if (enable) grp |= TWL4030_P1_GRP; else grp &= ~(TWL4030_P1_GRP | TWL4030_P2_GRP | TWL4030_P3_GRP); err = twl_clks_write_1(sc, clk, TWL_CLKS_GRP, grp); } else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) { /* Make sure the clock belongs to at least the APP power group */ if (twl_is_6030(sc->sc_pdev) && !(grp & TWL6030_P1_GRP)) { grp |= TWL6030_P1_GRP; err = twl_clks_write_1(sc, clk, TWL_CLKS_GRP, grp); if (err) goto done; } /* On TWL6030 we need to make sure we disable power for all groups */ if (twl_is_6030(sc->sc_pdev)) grp = TWL6030_P1_GRP | TWL6030_P2_GRP | TWL6030_P3_GRP; else grp = 0x00; /* Set the state of the clock */ if (enable) err = twl_clks_write_1(sc, clk, TWL_CLKS_STATE, (grp << 5) | 0x01); else err = twl_clks_write_1(sc, clk, TWL_CLKS_STATE, (grp << 5)); } else { err = EINVAL; } done: if (!xlocked) TWL_CLKS_LOCK_DOWNGRADE(sc); if ((twl_clks_debug > 1) && !err) device_printf(sc->sc_dev, "%s : %sabled\n", clk->name, enable ? "en" : "dis"); return (err); } /** * twl_clks_disable - disables a clock output * @dev: TWL clk device * @name: the name of the clock * * LOCKING: * Internally the function takes and releases the TWL lock. * * RETURNS: * Zero on success or an error code on failure. */ int twl_clks_disable(device_t dev, const char *name) { struct twl_clks_softc *sc = device_get_softc(dev); struct twl_clk_entry *clk; int err = EINVAL; TWL_CLKS_SLOCK(sc); LIST_FOREACH(clk, &sc->sc_clks_list, link) { if (strcmp(clk->name, name) == 0) { err = twl_clks_set_state(sc, clk, 0); break; } } TWL_CLKS_SUNLOCK(sc); return (err); } /** * twl_clks_enable - enables a clock output * @dev: TWL clk device * @name: the name of the clock * * LOCKING: * Internally the function takes and releases the TWL CLKS lock. * * RETURNS: * Zero on success or an error code on failure. */ int twl_clks_enable(device_t dev, const char *name) { struct twl_clks_softc *sc = device_get_softc(dev); struct twl_clk_entry *clk; int err = EINVAL; TWL_CLKS_SLOCK(sc); LIST_FOREACH(clk, &sc->sc_clks_list, link) { if (strcmp(clk->name, name) == 0) { err = twl_clks_set_state(sc, clk, 1); break; } } TWL_CLKS_SUNLOCK(sc); return (err); } /** * twl_clks_sysctl_clock - reads the state of the clock * @SYSCTL_HANDLER_ARGS: arguments for the callback * * Returns the clock status; disabled is zero and enabled is non-zero. * * LOCKING: * It's expected the TWL lock is held while this function is called. * * RETURNS: * EIO if device is not present, otherwise 0 is returned. */ static int twl_clks_sysctl_clock(SYSCTL_HANDLER_ARGS) { struct twl_clks_softc *sc = (struct twl_clks_softc*)arg1; int err; int enabled = 0; if ((err = twl_clks_is_enabled(sc->sc_dev, oidp->oid_name, &enabled)) != 0) return err; return sysctl_handle_int(oidp, &enabled, 0, req); } /** * twl_clks_add_clock - adds single clock sysctls for the device * @sc: device soft context * @name: the name of the regulator * @nsub: the number of the subdevice * @regbase: the base address of the clocks registers * * Adds a single clock to the device and also a sysctl interface for * querying it's status. * * LOCKING: * It's expected the exclusive lock is held while this function is called. * * RETURNS: * Pointer to the new clock entry on success, otherwise NULL on failure. */ static struct twl_clk_entry* twl_clks_add_clock(struct twl_clks_softc *sc, const char *name, uint8_t nsub, uint8_t regbase) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct twl_clk_entry *new; TWL_CLKS_ASSERT_LOCKED(sc); new = malloc(sizeof(struct twl_clk_entry), M_DEVBUF, M_NOWAIT | M_ZERO); if (new == NULL) return (NULL); strncpy(new->name, name, TWL_CLKS_MAX_NAMELEN); new->name[TWL_CLKS_MAX_NAMELEN - 1] = '\0'; new->sub_dev = nsub; new->reg_off = regbase; /* Add a sysctl entry for the clock */ new->oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RD, sc, 0, twl_clks_sysctl_clock, "I", "external clock"); /* Finally add the regulator to list of supported regulators */ LIST_INSERT_HEAD(&sc->sc_clks_list, new, link); return (new); } /** * twl_clks_add_clocks - populates the internal list of clocks * @sc: device soft context * @chip: the name of the chip used in the hints * @clks the list of clocks supported by the device * * Loops over the list of clocks and adds them to the device context. Also * scans the FDT to determine if there are any clocks that should be * enabled/disabled automatically. * * LOCKING: * Internally takes the exclusive lock while adding the clocks to the * device context. * * RETURNS: * Always returns 0. */ static int twl_clks_add_clocks(struct twl_clks_softc *sc, const struct twl_clock *clks) { int err; const struct twl_clock *walker; struct twl_clk_entry *entry; phandle_t child; char rnames[256]; char *name, *state; int len = 0, prop_len; int enable; TWL_CLKS_XLOCK(sc); /* Add the regulators from the list */ walker = &clks[0]; while (walker->name != NULL) { /* Add the regulator to the list */ entry = twl_clks_add_clock(sc, walker->name, walker->subdev, walker->regbase); if (entry == NULL) continue; walker++; } /* Check for any FDT settings that need to be applied */ child = ofw_bus_get_node(sc->sc_pdev); if (child) { prop_len = OF_getprop(child, "external-clocks", rnames, sizeof(rnames)); while (len < prop_len) { name = rnames + len; len += strlen(name) + 1; if ((len >= prop_len) || (name[0] == '\0')) break; state = rnames + len; len += strlen(state) + 1; if (state[0] == '\0') break; enable = !strncmp(state, "on", 2); LIST_FOREACH(entry, &sc->sc_clks_list, link) { if (strcmp(entry->name, name) == 0) { twl_clks_set_state(sc, entry, enable); break; } } } } TWL_CLKS_XUNLOCK(sc); if (twl_clks_debug) { LIST_FOREACH(entry, &sc->sc_clks_list, link) { err = twl_clks_is_enabled(sc->sc_dev, entry->name, &enable); if (!err) device_printf(sc->sc_dev, "%s : %s\n", entry->name, enable ? "on" : "off"); } } return (0); } /** * twl_clks_init - initialises the list of clocks * @dev: the twl_clks device * * This function is called as an intrhook once interrupts have been enabled, * this is done so that the driver has the option to enable/disable a clock * based on settings providied in the FDT. * * LOCKING: * May takes the exclusive lock in the function. */ static void twl_clks_init(void *dev) { struct twl_clks_softc *sc; sc = device_get_softc((device_t)dev); if (twl_is_4030(sc->sc_pdev)) twl_clks_add_clocks(sc, twl4030_clocks); else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) twl_clks_add_clocks(sc, twl6030_clocks); config_intrhook_disestablish(&sc->sc_init_hook); } static int twl_clks_probe(device_t dev) { if (twl_is_4030(device_get_parent(dev))) device_set_desc(dev, "TI TWL4030 PMIC External Clocks"); else if (twl_is_6025(device_get_parent(dev)) || twl_is_6030(device_get_parent(dev))) device_set_desc(dev, "TI TWL6025/TWL6030 PMIC External Clocks"); else return (ENXIO); return (0); } static int twl_clks_attach(device_t dev) { struct twl_clks_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_pdev = device_get_parent(dev); TWL_CLKS_LOCK_INIT(sc); LIST_INIT(&sc->sc_clks_list); sc->sc_init_hook.ich_func = twl_clks_init; sc->sc_init_hook.ich_arg = dev; if (config_intrhook_establish(&sc->sc_init_hook) != 0) return (ENOMEM); return (0); } static int twl_clks_detach(device_t dev) { struct twl_clks_softc *sc; struct twl_clk_entry *clk; struct twl_clk_entry *tmp; sc = device_get_softc(dev); TWL_CLKS_XLOCK(sc); LIST_FOREACH_SAFE(clk, &sc->sc_clks_list, link, tmp) { LIST_REMOVE(clk, link); sysctl_remove_oid(clk->oid, 1, 0); free(clk, M_DEVBUF); } TWL_CLKS_XUNLOCK(sc); TWL_CLKS_LOCK_DESTROY(sc); return (0); } static device_method_t twl_clks_methods[] = { DEVMETHOD(device_probe, twl_clks_probe), DEVMETHOD(device_attach, twl_clks_attach), DEVMETHOD(device_detach, twl_clks_detach), {0, 0}, }; static driver_t twl_clks_driver = { "twl_clks", twl_clks_methods, sizeof(struct twl_clks_softc), }; static devclass_t twl_clks_devclass; DRIVER_MODULE(twl_clks, twl, twl_clks_driver, twl_clks_devclass, 0, 0); MODULE_VERSION(twl_clks, 1); Index: head/sys/arm/ti/twl/twl_vreg.c =================================================================== --- head/sys/arm/ti/twl/twl_vreg.c (revision 306901) +++ head/sys/arm/ti/twl/twl_vreg.c (revision 306902) @@ -1,1054 +1,1052 @@ /*- * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Texas Instruments TWL4030/TWL5030/TWL60x0/TPS659x0 Power Management. * * This driver covers the voltages regulators (LDO), allows for enabling & * disabling the voltage output and adjusting the voltage level. * * Voltage regulators can belong to different power groups, in this driver we * put the regulators under our control in the "Application power group". * * * FLATTENED DEVICE TREE (FDT) * Startup override settings can be specified in the FDT, if they are they * should be under the twl parent device and take the following form: * * voltage-regulators = "name1", "millivolts1", * "name2", "millivolts2"; * * Each override should be a pair, the first entry is the name of the regulator * the second is the voltage (in millivolts) to set for the given regulator. * */ #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include "twl.h" #include "twl_vreg.h" static int twl_vreg_debug = 1; /* * Power Groups bits for the 4030 and 6030 devices */ #define TWL4030_P3_GRP 0x80 /* Peripherals, power group */ #define TWL4030_P2_GRP 0x40 /* Modem power group */ #define TWL4030_P1_GRP 0x20 /* Application power group (FreeBSD control) */ #define TWL6030_P3_GRP 0x04 /* Modem power group */ #define TWL6030_P2_GRP 0x02 /* Connectivity power group */ #define TWL6030_P1_GRP 0x01 /* Application power group (FreeBSD control) */ /* * Register offsets within a LDO regulator register set */ #define TWL_VREG_GRP 0x00 /* Regulator GRP register */ #define TWL_VREG_STATE 0x02 #define TWL_VREG_VSEL 0x03 /* Voltage select register */ #define UNDF 0xFFFF static const uint16_t twl6030_voltages[] = { 0000, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, UNDF, UNDF, UNDF, UNDF, UNDF, UNDF, 2750 }; static const uint16_t twl4030_vaux1_voltages[] = { 1500, 1800, 2500, 2800, 3000, 3000, 3000, 3000 }; static const uint16_t twl4030_vaux2_voltages[] = { 1700, 1700, 1900, 1300, 1500, 1800, 2000, 2500, 2100, 2800, 2200, 2300, 2400, 2400, 2400, 2400 }; static const uint16_t twl4030_vaux3_voltages[] = { 1500, 1800, 2500, 2800, 3000, 3000, 3000, 3000 }; static const uint16_t twl4030_vaux4_voltages[] = { 700, 1000, 1200, 1300, 1500, 1800, 1850, 2500, 2600, 2800, 2850, 3000, 3150, 3150, 3150, 3150 }; static const uint16_t twl4030_vmmc1_voltages[] = { 1850, 2850, 3000, 3150 }; static const uint16_t twl4030_vmmc2_voltages[] = { 1000, 1000, 1200, 1300, 1500, 1800, 1850, 2500, 2600, 2800, 2850, 3000, 3150, 3150, 3150, 3150 }; static const uint16_t twl4030_vpll1_voltages[] = { 1000, 1200, 1300, 1800, 2800, 3000, 3000, 3000 }; static const uint16_t twl4030_vpll2_voltages[] = { 700, 1000, 1200, 1300, 1500, 1800, 1850, 2500, 2600, 2800, 2850, 3000, 3150, 3150, 3150, 3150 }; static const uint16_t twl4030_vsim_voltages[] = { 1000, 1200, 1300, 1800, 2800, 3000, 3000, 3000 }; static const uint16_t twl4030_vdac_voltages[] = { 1200, 1300, 1800, 1800 }; #if 0 /* vdd1, vdd2, vdio, not currently used. */ static const uint16_t twl4030_vdd1_voltages[] = { 800, 1450 }; static const uint16_t twl4030_vdd2_voltages[] = { 800, 1450, 1500 }; static const uint16_t twl4030_vio_voltages[] = { 1800, 1850 }; #endif static const uint16_t twl4030_vintana2_voltages[] = { 2500, 2750 }; /** * Support voltage regulators for the different IC's */ struct twl_regulator { const char *name; uint8_t subdev; uint8_t regbase; uint16_t fixedvoltage; const uint16_t *voltages; uint32_t num_voltages; }; #define TWL_REGULATOR_ADJUSTABLE(name, subdev, reg, voltages) \ { name, subdev, reg, 0, voltages, (sizeof(voltages)/sizeof(voltages[0])) } #define TWL_REGULATOR_FIXED(name, subdev, reg, voltage) \ { name, subdev, reg, voltage, NULL, 0 } static const struct twl_regulator twl4030_regulators[] = { TWL_REGULATOR_ADJUSTABLE("vaux1", 0, 0x17, twl4030_vaux1_voltages), TWL_REGULATOR_ADJUSTABLE("vaux2", 0, 0x1B, twl4030_vaux2_voltages), TWL_REGULATOR_ADJUSTABLE("vaux3", 0, 0x1F, twl4030_vaux3_voltages), TWL_REGULATOR_ADJUSTABLE("vaux4", 0, 0x23, twl4030_vaux4_voltages), TWL_REGULATOR_ADJUSTABLE("vmmc1", 0, 0x27, twl4030_vmmc1_voltages), TWL_REGULATOR_ADJUSTABLE("vmmc2", 0, 0x2B, twl4030_vmmc2_voltages), TWL_REGULATOR_ADJUSTABLE("vpll1", 0, 0x2F, twl4030_vpll1_voltages), TWL_REGULATOR_ADJUSTABLE("vpll2", 0, 0x33, twl4030_vpll2_voltages), TWL_REGULATOR_ADJUSTABLE("vsim", 0, 0x37, twl4030_vsim_voltages), TWL_REGULATOR_ADJUSTABLE("vdac", 0, 0x3B, twl4030_vdac_voltages), TWL_REGULATOR_ADJUSTABLE("vintana2", 0, 0x43, twl4030_vintana2_voltages), TWL_REGULATOR_FIXED("vintana1", 0, 0x3F, 1500), TWL_REGULATOR_FIXED("vintdig", 0, 0x47, 1500), TWL_REGULATOR_FIXED("vusb1v5", 0, 0x71, 1500), TWL_REGULATOR_FIXED("vusb1v8", 0, 0x74, 1800), TWL_REGULATOR_FIXED("vusb3v1", 0, 0x77, 3100), { NULL, 0, 0x00, 0, NULL, 0 } }; static const struct twl_regulator twl6030_regulators[] = { TWL_REGULATOR_ADJUSTABLE("vaux1", 0, 0x84, twl6030_voltages), TWL_REGULATOR_ADJUSTABLE("vaux2", 0, 0x89, twl6030_voltages), TWL_REGULATOR_ADJUSTABLE("vaux3", 0, 0x8C, twl6030_voltages), TWL_REGULATOR_ADJUSTABLE("vmmc", 0, 0x98, twl6030_voltages), TWL_REGULATOR_ADJUSTABLE("vpp", 0, 0x9C, twl6030_voltages), TWL_REGULATOR_ADJUSTABLE("vusim", 0, 0xA4, twl6030_voltages), TWL_REGULATOR_FIXED("vmem", 0, 0x64, 1800), TWL_REGULATOR_FIXED("vusb", 0, 0xA0, 3300), TWL_REGULATOR_FIXED("v1v8", 0, 0x46, 1800), TWL_REGULATOR_FIXED("v2v1", 0, 0x4C, 2100), TWL_REGULATOR_FIXED("v1v29", 0, 0x40, 1290), TWL_REGULATOR_FIXED("vcxio", 0, 0x90, 1800), TWL_REGULATOR_FIXED("vdac", 0, 0x94, 1800), TWL_REGULATOR_FIXED("vana", 0, 0x80, 2100), { NULL, 0, 0x00, 0, NULL, 0 } }; #define TWL_VREG_MAX_NAMELEN 32 struct twl_regulator_entry { LIST_ENTRY(twl_regulator_entry) entries; char name[TWL_VREG_MAX_NAMELEN]; struct sysctl_oid *oid; uint8_t sub_dev; /* TWL sub-device group */ uint8_t reg_off; /* base register offset for the LDO */ uint16_t fixed_voltage; /* the (milli)voltage if LDO is fixed */ const uint16_t *supp_voltages; /* pointer to an array of possible voltages */ uint32_t num_supp_voltages; /* the number of supplied voltages */ }; struct twl_vreg_softc { device_t sc_dev; device_t sc_pdev; struct sx sc_sx; struct intr_config_hook sc_init_hook; LIST_HEAD(twl_regulator_list, twl_regulator_entry) sc_vreg_list; }; #define TWL_VREG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx) #define TWL_VREG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx) #define TWL_VREG_SLOCK(_sc) sx_slock(&(_sc)->sc_sx) #define TWL_VREG_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx) #define TWL_VREG_LOCK_INIT(_sc) sx_init(&(_sc)->sc_sx, "twl_vreg") #define TWL_VREG_LOCK_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx); #define TWL_VREG_ASSERT_LOCKED(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED); #define TWL_VREG_LOCK_UPGRADE(_sc) \ do { \ while (!sx_try_upgrade(&(_sc)->sc_sx)) \ pause("twl_vreg_ex", (hz / 100)); \ } while(0) #define TWL_VREG_LOCK_DOWNGRADE(_sc) sx_downgrade(&(_sc)->sc_sx); /** * twl_vreg_read_1 - read single register from the TWL device * twl_vreg_write_1 - write a single register in the TWL device * @sc: device context * @clk: the clock device we're reading from / writing to * @off: offset within the clock's register set * @val: the value to write or a pointer to a variable to store the result * * RETURNS: * Zero on success or an error code on failure. */ static inline int twl_vreg_read_1(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, uint8_t off, uint8_t *val) { return (twl_read(sc->sc_pdev, regulator->sub_dev, regulator->reg_off + off, val, 1)); } static inline int twl_vreg_write_1(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, uint8_t off, uint8_t val) { return (twl_write(sc->sc_pdev, regulator->sub_dev, regulator->reg_off + off, &val, 1)); } /** * twl_millivolt_to_vsel - gets the vsel bit value to write into the register * for a desired voltage and regulator * @sc: the device soft context * @regulator: pointer to the regulator device * @millivolts: the millivolts to find the bit value for * @vsel: upon return will contain the corresponding register value * * Accepts a (milli)voltage value and tries to find the closest match to the * actual supported voltages for the given regulator. If a match is found * within 100mv of the target, @vsel is written with the match and 0 is * returned. If no voltage match is found the function returns an non-zero * value. * * RETURNS: * Zero on success or an error code on failure. */ static int twl_vreg_millivolt_to_vsel(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, int millivolts, uint8_t *vsel) { int delta, smallest_delta; unsigned i, closest_idx; TWL_VREG_ASSERT_LOCKED(sc); if (regulator->supp_voltages == NULL) return (EINVAL); /* Loop over the support voltages and try and find the closest match */ closest_idx = 0; smallest_delta = 0x7fffffff; for (i = 0; i < regulator->num_supp_voltages; i++) { /* Ignore undefined values */ if (regulator->supp_voltages[i] == UNDF) continue; /* Calculate the difference */ delta = millivolts - (int)regulator->supp_voltages[i]; if (abs(delta) < smallest_delta) { smallest_delta = abs(delta); closest_idx = i; } } /* Check we got a voltage that was within 100mv of the actual target, this * is just a value I picked out of thin air. */ if ((smallest_delta > 100) && (closest_idx < 0x100)) return (EINVAL); *vsel = closest_idx; return (0); } /** * twl_vreg_is_regulator_enabled - returns the enabled status of the regulator * @sc: the device soft context * @regulator: pointer to the regulator device * @enabled: stores the enabled status, zero disabled, non-zero enabled * * LOCKING: * On entry expects the TWL VREG lock to be held. Will upgrade the lock to * exclusive if not already but, if so, it will be downgraded again before * returning. * * RETURNS: * Zero on success or an error code on failure. */ static int twl_vreg_is_regulator_enabled(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, int *enabled) { int err; uint8_t grp; uint8_t state; int xlocked; if (enabled == NULL) return (EINVAL); TWL_VREG_ASSERT_LOCKED(sc); xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_VREG_LOCK_UPGRADE(sc); /* The status reading is different for the different devices */ if (twl_is_4030(sc->sc_pdev)) { err = twl_vreg_read_1(sc, regulator, TWL_VREG_GRP, &state); if (err) goto done; *enabled = (state & TWL4030_P1_GRP); } else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) { /* Check the regulator is in the application group */ if (twl_is_6030(sc->sc_pdev)) { err = twl_vreg_read_1(sc, regulator, TWL_VREG_GRP, &grp); if (err) goto done; if (!(grp & TWL6030_P1_GRP)) { *enabled = 0; /* disabled */ goto done; } } /* Read the application mode state and verify it's ON */ err = twl_vreg_read_1(sc, regulator, TWL_VREG_STATE, &state); if (err) goto done; *enabled = ((state & 0x0C) == 0x04); } else { err = EINVAL; } done: if (!xlocked) TWL_VREG_LOCK_DOWNGRADE(sc); return (err); } /** * twl_vreg_disable_regulator - disables a voltage regulator * @sc: the device soft context * @regulator: pointer to the regulator device * * Disables the regulator which will stop the output drivers. * * LOCKING: * On entry expects the TWL VREG lock to be held. Will upgrade the lock to * exclusive if not already but, if so, it will be downgraded again before * returning. * * RETURNS: * Zero on success or a positive error code on failure. */ static int twl_vreg_disable_regulator(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator) { int err = 0; uint8_t grp; int xlocked; TWL_VREG_ASSERT_LOCKED(sc); xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_VREG_LOCK_UPGRADE(sc); if (twl_is_4030(sc->sc_pdev)) { /* Read the regulator CFG_GRP register */ err = twl_vreg_read_1(sc, regulator, TWL_VREG_GRP, &grp); if (err) goto done; /* On the TWL4030 we just need to remove the regulator from all the * power groups. */ grp &= ~(TWL4030_P1_GRP | TWL4030_P2_GRP | TWL4030_P3_GRP); err = twl_vreg_write_1(sc, regulator, TWL_VREG_GRP, grp); } else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) { /* On TWL6030 we need to make sure we disable power for all groups */ if (twl_is_6030(sc->sc_pdev)) grp = TWL6030_P1_GRP | TWL6030_P2_GRP | TWL6030_P3_GRP; else grp = 0x00; /* Write the resource state to "OFF" */ err = twl_vreg_write_1(sc, regulator, TWL_VREG_STATE, (grp << 5)); } done: if (!xlocked) TWL_VREG_LOCK_DOWNGRADE(sc); return (err); } /** * twl_vreg_enable_regulator - enables the voltage regulator * @sc: the device soft context * @regulator: pointer to the regulator device * * Enables the regulator which will enable the voltage out at the currently * set voltage. Set the voltage before calling this function to avoid * driving the voltage too high/low by mistake. * * LOCKING: * On entry expects the TWL VREG lock to be held. Will upgrade the lock to * exclusive if not already but, if so, it will be downgraded again before * returning. * * RETURNS: * Zero on success or a positive error code on failure. */ static int twl_vreg_enable_regulator(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator) { int err; uint8_t grp; int xlocked; TWL_VREG_ASSERT_LOCKED(sc); xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_VREG_LOCK_UPGRADE(sc); err = twl_vreg_read_1(sc, regulator, TWL_VREG_GRP, &grp); if (err) goto done; /* Enable the regulator by ensuring it's in the application power group * and is in the "on" state. */ if (twl_is_4030(sc->sc_pdev)) { /* On the TWL4030 we just need to ensure the regulator is in the right * power domain, don't need to turn on explicitly like TWL6030. */ grp |= TWL4030_P1_GRP; err = twl_vreg_write_1(sc, regulator, TWL_VREG_GRP, grp); } else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) { if (twl_is_6030(sc->sc_pdev) && !(grp & TWL6030_P1_GRP)) { grp |= TWL6030_P1_GRP; err = twl_vreg_write_1(sc, regulator, TWL_VREG_GRP, grp); if (err) goto done; } /* Write the resource state to "ON" */ err = twl_vreg_write_1(sc, regulator, TWL_VREG_STATE, (grp << 5) | 0x01); } done: if (!xlocked) TWL_VREG_LOCK_DOWNGRADE(sc); return (err); } /** * twl_vreg_write_regulator_voltage - sets the voltage level on a regulator * @sc: the device soft context * @regulator: pointer to the regulator structure * @millivolts: the voltage to set * * Sets the voltage output on a given regulator, if the regulator is not * enabled, it will be enabled. * * LOCKING: * On entry expects the TWL VREG lock to be held, may upgrade the lock to * exclusive but if so it will be downgraded once again before returning. * * RETURNS: * Zero on success or an error code on failure. */ static int twl_vreg_write_regulator_voltage(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, int millivolts) { int err; uint8_t vsel; int xlocked; TWL_VREG_ASSERT_LOCKED(sc); /* If millivolts is zero then we simply disable the output */ if (millivolts == 0) return (twl_vreg_disable_regulator(sc, regulator)); /* If the regulator has a fixed voltage then check the setting matches * and simply enable. */ if (regulator->supp_voltages == NULL || regulator->num_supp_voltages == 0) { if (millivolts != regulator->fixed_voltage) return (EINVAL); return (twl_vreg_enable_regulator(sc, regulator)); } /* Get the VSEL value for the given voltage */ err = twl_vreg_millivolt_to_vsel(sc, regulator, millivolts, &vsel); if (err) return (err); /* Need to upgrade because writing the voltage and enabling should be atomic */ xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_VREG_LOCK_UPGRADE(sc); /* Set voltage and enable (atomically) */ err = twl_vreg_write_1(sc, regulator, TWL_VREG_VSEL, (vsel & 0x1f)); if (!err) { err = twl_vreg_enable_regulator(sc, regulator); } if (!xlocked) TWL_VREG_LOCK_DOWNGRADE(sc); if ((twl_vreg_debug > 1) && !err) device_printf(sc->sc_dev, "%s : setting voltage to %dmV (vsel: 0x%x)\n", regulator->name, millivolts, vsel); return (err); } /** * twl_vreg_read_regulator_voltage - reads the voltage on a given regulator * @sc: the device soft context * @regulator: pointer to the regulator structure * @millivolts: upon return will contain the voltage on the regulator * * LOCKING: * On entry expects the TWL VREG lock to be held. It will upgrade the lock to * exclusive if not already, but if so, it will be downgraded again before * returning. * * RETURNS: * Zero on success, or otherwise an error code. */ static int twl_vreg_read_regulator_voltage(struct twl_vreg_softc *sc, struct twl_regulator_entry *regulator, int *millivolts) { int err; int en = 0; int xlocked; uint8_t vsel; TWL_VREG_ASSERT_LOCKED(sc); /* Need to upgrade the lock because checking enabled state and voltage * should be atomic. */ xlocked = sx_xlocked(&sc->sc_sx); if (!xlocked) TWL_VREG_LOCK_UPGRADE(sc); /* Check if the regulator is currently enabled */ err = twl_vreg_is_regulator_enabled(sc, regulator, &en); if (err) goto done; *millivolts = 0; if (!en) goto done; /* Not all voltages are adjustable */ if (regulator->supp_voltages == NULL || !regulator->num_supp_voltages) { *millivolts = regulator->fixed_voltage; goto done; } /* For variable voltages read the voltage register */ err = twl_vreg_read_1(sc, regulator, TWL_VREG_VSEL, &vsel); if (err) goto done; vsel &= (regulator->num_supp_voltages - 1); if (regulator->supp_voltages[vsel] == UNDF) { err = EINVAL; goto done; } *millivolts = regulator->supp_voltages[vsel]; done: if (!xlocked) TWL_VREG_LOCK_DOWNGRADE(sc); if ((twl_vreg_debug > 1) && !err) device_printf(sc->sc_dev, "%s : reading voltage is %dmV (vsel: 0x%x)\n", regulator->name, *millivolts, vsel); return (err); } /** * twl_vreg_get_voltage - public interface to read the voltage on a regulator * @dev: TWL VREG device * @name: the name of the regulator to read the voltage of * @millivolts: pointer to an integer that upon return will contain the mV * * If the regulator is disabled the function will set the @millivolts to zero. * * LOCKING: * Internally the function takes and releases the TWL VREG lock. * * RETURNS: * Zero on success or a negative error code on failure. */ int twl_vreg_get_voltage(device_t dev, const char *name, int *millivolts) { struct twl_vreg_softc *sc; struct twl_regulator_entry *regulator; int err = EINVAL; if (millivolts == NULL) return (EINVAL); sc = device_get_softc(dev); TWL_VREG_SLOCK(sc); LIST_FOREACH(regulator, &sc->sc_vreg_list, entries) { if (strcmp(regulator->name, name) == 0) { err = twl_vreg_read_regulator_voltage(sc, regulator, millivolts); break; } } TWL_VREG_SUNLOCK(sc); return (err); } /** * twl_vreg_set_voltage - public interface to write the voltage on a regulator * @dev: TWL VREG device * @name: the name of the regulator to read the voltage of * @millivolts: the voltage to set in millivolts * * Sets the output voltage on a given regulator. If the regulator is a fixed * voltage reg then the @millivolts value should match the fixed voltage. If * a variable regulator then the @millivolt value must fit within the max/min * range of the given regulator. * * LOCKING: * Internally the function takes and releases the TWL VREG lock. * * RETURNS: * Zero on success or a negative error code on failure. */ int twl_vreg_set_voltage(device_t dev, const char *name, int millivolts) { struct twl_vreg_softc *sc; struct twl_regulator_entry *regulator; int err = EINVAL; sc = device_get_softc(dev); TWL_VREG_SLOCK(sc); LIST_FOREACH(regulator, &sc->sc_vreg_list, entries) { if (strcmp(regulator->name, name) == 0) { err = twl_vreg_write_regulator_voltage(sc, regulator, millivolts); break; } } TWL_VREG_SUNLOCK(sc); return (err); } /** * twl_sysctl_voltage - reads or writes the voltage for a regulator * @SYSCTL_HANDLER_ARGS: arguments for the callback * * Callback for the sysctl entry for the regulator, simply used to return * the voltage on a particular regulator. * * LOCKING: * Takes the TWL_VREG shared lock internally. * * RETURNS: * Zero on success or an error code on failure. */ static int twl_vreg_sysctl_voltage(SYSCTL_HANDLER_ARGS) { struct twl_vreg_softc *sc = (struct twl_vreg_softc*)arg1; struct twl_regulator_entry *regulator; int voltage; int found = 0; TWL_VREG_SLOCK(sc); /* Find the regulator with the matching name */ LIST_FOREACH(regulator, &sc->sc_vreg_list, entries) { if (strcmp(regulator->name, oidp->oid_name) == 0) { found = 1; break; } } /* Sanity check that we found the regulator */ if (!found) { TWL_VREG_SUNLOCK(sc); return (EINVAL); } twl_vreg_read_regulator_voltage(sc, regulator, &voltage); TWL_VREG_SUNLOCK(sc); return sysctl_handle_int(oidp, &voltage, 0, req); } /** * twl_add_regulator - adds single voltage regulator sysctls for the device * @sc: device soft context * @name: the name of the regulator * @nsub: the number of the subdevice * @regbase: the base address of the voltage regulator registers * @fixed_voltage: if a fixed voltage regulator this defines it's voltage * @voltages: if a variable voltage regulator, an array of possible voltages * @num_voltages: the number of entries @voltages * * Adds a voltage regulator to the device and also a sysctl interface for the * regulator. * * LOCKING: * The TWL_VEG exclusive lock must be held while this function is called. * * RETURNS: * Pointer to the new regulator entry on success, otherwise on failure NULL. */ static struct twl_regulator_entry* twl_vreg_add_regulator(struct twl_vreg_softc *sc, const char *name, uint8_t nsub, uint8_t regbase, uint16_t fixed_voltage, const uint16_t *voltages, uint32_t num_voltages) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct twl_regulator_entry *new; new = malloc(sizeof(struct twl_regulator_entry), M_DEVBUF, M_NOWAIT | M_ZERO); if (new == NULL) return (NULL); strncpy(new->name, name, TWL_VREG_MAX_NAMELEN); new->name[TWL_VREG_MAX_NAMELEN - 1] = '\0'; new->sub_dev = nsub; new->reg_off = regbase; new->fixed_voltage = fixed_voltage; new->supp_voltages = voltages; new->num_supp_voltages = num_voltages; /* Add a sysctl entry for the voltage */ new->oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RD, sc, 0, twl_vreg_sysctl_voltage, "I", "voltage regulator"); /* Finally add the regulator to list of supported regulators */ LIST_INSERT_HEAD(&sc->sc_vreg_list, new, entries); return (new); } /** * twl_vreg_add_regulators - adds any voltage regulators to the device * @sc: device soft context * @chip: the name of the chip used in the hints * @regulators: the list of possible voltage regulators * * Loops over the list of regulators and matches up with the FDT values, * adjusting the actual voltage based on the supplied values. * * LOCKING: * The TWL_VEG exclusive lock must be held while this function is called. * * RETURNS: * Always returns 0. */ static int twl_vreg_add_regulators(struct twl_vreg_softc *sc, const struct twl_regulator *regulators) { int err; int millivolts; const struct twl_regulator *walker; struct twl_regulator_entry *entry; phandle_t child; char rnames[256]; char *name, *voltage; int len = 0, prop_len; /* Add the regulators from the list */ walker = ®ulators[0]; while (walker->name != NULL) { /* Add the regulator to the list */ entry = twl_vreg_add_regulator(sc, walker->name, walker->subdev, walker->regbase, walker->fixedvoltage, walker->voltages, walker->num_voltages); if (entry == NULL) continue; walker++; } /* Check if the FDT is telling us to set any voltages */ child = ofw_bus_get_node(sc->sc_pdev); if (child) { prop_len = OF_getprop(child, "voltage-regulators", rnames, sizeof(rnames)); while (len < prop_len) { name = rnames + len; len += strlen(name) + 1; if ((len >= prop_len) || (name[0] == '\0')) break; voltage = rnames + len; len += strlen(voltage) + 1; if (voltage[0] == '\0') break; millivolts = strtoul(voltage, NULL, 0); LIST_FOREACH(entry, &sc->sc_vreg_list, entries) { if (strcmp(entry->name, name) == 0) { twl_vreg_write_regulator_voltage(sc, entry, millivolts); break; } } } } if (twl_vreg_debug) { LIST_FOREACH(entry, &sc->sc_vreg_list, entries) { err = twl_vreg_read_regulator_voltage(sc, entry, &millivolts); if (!err) device_printf(sc->sc_dev, "%s : %d mV\n", entry->name, millivolts); } } return (0); } /** * twl_vreg_init - initialises the list of regulators * @dev: the twl_vreg device * * This function is called as an intrhook once interrupts have been enabled, * this is done so that the driver has the option to enable/disable or set * the voltage level based on settings providied in the FDT. * * LOCKING: * Takes the exclusive lock in the function. */ static void twl_vreg_init(void *dev) { struct twl_vreg_softc *sc; sc = device_get_softc((device_t)dev); TWL_VREG_XLOCK(sc); if (twl_is_4030(sc->sc_pdev)) twl_vreg_add_regulators(sc, twl4030_regulators); else if (twl_is_6030(sc->sc_pdev) || twl_is_6025(sc->sc_pdev)) twl_vreg_add_regulators(sc, twl6030_regulators); TWL_VREG_XUNLOCK(sc); config_intrhook_disestablish(&sc->sc_init_hook); } static int twl_vreg_probe(device_t dev) { if (twl_is_4030(device_get_parent(dev))) device_set_desc(dev, "TI TWL4030 PMIC Voltage Regulators"); else if (twl_is_6025(device_get_parent(dev)) || twl_is_6030(device_get_parent(dev))) device_set_desc(dev, "TI TWL6025/TWL6030 PMIC Voltage Regulators"); else return (ENXIO); return (0); } static int twl_vreg_attach(device_t dev) { struct twl_vreg_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_pdev = device_get_parent(dev); TWL_VREG_LOCK_INIT(sc); LIST_INIT(&sc->sc_vreg_list); /* We have to wait until interrupts are enabled. I2C read and write * only works if the interrupts are available. */ sc->sc_init_hook.ich_func = twl_vreg_init; sc->sc_init_hook.ich_arg = dev; if (config_intrhook_establish(&sc->sc_init_hook) != 0) return (ENOMEM); return (0); } static int twl_vreg_detach(device_t dev) { struct twl_vreg_softc *sc; struct twl_regulator_entry *regulator; struct twl_regulator_entry *tmp; sc = device_get_softc(dev); /* Take the lock and free all the added regulators */ TWL_VREG_XLOCK(sc); LIST_FOREACH_SAFE(regulator, &sc->sc_vreg_list, entries, tmp) { LIST_REMOVE(regulator, entries); sysctl_remove_oid(regulator->oid, 1, 0); free(regulator, M_DEVBUF); } TWL_VREG_XUNLOCK(sc); TWL_VREG_LOCK_DESTROY(sc); return (0); } static device_method_t twl_vreg_methods[] = { DEVMETHOD(device_probe, twl_vreg_probe), DEVMETHOD(device_attach, twl_vreg_attach), DEVMETHOD(device_detach, twl_vreg_detach), {0, 0}, }; static driver_t twl_vreg_driver = { "twl_vreg", twl_vreg_methods, sizeof(struct twl_vreg_softc), }; static devclass_t twl_vreg_devclass; DRIVER_MODULE(twl_vreg, twl, twl_vreg_driver, twl_vreg_devclass, 0, 0); MODULE_VERSION(twl_vreg, 1); Index: head/sys/arm/xscale/i8134x/i80321_timer.c =================================================================== --- head/sys/arm/xscale/i8134x/i80321_timer.c (revision 306901) +++ head/sys/arm/xscale/i8134x/i80321_timer.c (revision 306902) @@ -1,485 +1,484 @@ /* $NetBSD: i80321_timer.c,v 1.7 2003/07/27 04:52:28 thorpej Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Timer/clock support for the Intel i80321 I/O processor. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #ifdef CPU_XSCALE_81342 #define ICU_INT_TIMER0 (8) /* XXX: Can't include i81342reg.h because definitions overrides the ones from i80321reg.h */ #endif #include "opt_timer.h" void (*i80321_hardclock_hook)(void) = NULL; struct i80321_timer_softc { device_t dev; } timer_softc; static unsigned i80321_timer_get_timecount(struct timecounter *tc); static uint32_t counts_per_hz; #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) static uint32_t offset; static uint32_t last = -1; #endif static int ticked = 0; #ifndef COUNTS_PER_SEC #define COUNTS_PER_SEC 200000000 /* 200MHz */ #endif #define COUNTS_PER_USEC (COUNTS_PER_SEC / 1000000) static struct timecounter i80321_timer_timecounter = { i80321_timer_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ ~0u, /* counter_mask */ #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) COUNTS_PER_SEC, #else COUNTS_PER_SEC * 3, /* frequency */ #endif "i80321 timer", /* name */ 1000 /* quality */ }; static int i80321_timer_probe(device_t dev) { device_set_desc(dev, "i80321 timer"); return (0); } static int i80321_timer_attach(device_t dev) { timer_softc.dev = dev; return (0); } static device_method_t i80321_timer_methods[] = { DEVMETHOD(device_probe, i80321_timer_probe), DEVMETHOD(device_attach, i80321_timer_attach), {0, 0}, }; static driver_t i80321_timer_driver = { "itimer", i80321_timer_methods, sizeof(struct i80321_timer_softc), }; static devclass_t i80321_timer_devclass; DRIVER_MODULE(itimer, iq, i80321_timer_driver, i80321_timer_devclass, 0, 0); int clockhandler(void *); static __inline uint32_t tmr1_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c1, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c1, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tmr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c1, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c1, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tcr1_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c3, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c3, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tcr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c3, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c3, c1, 0" #endif : : "r" (val)); } static __inline void trr1_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c5, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c5, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tmr0_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c0, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c0, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tmr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c0, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c0, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tcr0_read(void) { uint32_t rv; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c2, c9, 0" #else __asm __volatile("mrc p6, 0, %0, c2, c1, 0" #endif : "=r" (rv)); return (rv); } static __inline void tcr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c2, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c2, c1, 0" #endif : : "r" (val)); } static __inline void trr0_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c4, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c4, c1, 0" #endif : : "r" (val)); } static __inline void tisr_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c6, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c6, c1, 0" #endif : : "r" (val)); } static __inline uint32_t tisr_read(void) { int ret; #ifdef CPU_XSCALE_81342 __asm __volatile("mrc p6, 0, %0, c6, c9, 0" : "=r" (ret)); #else __asm __volatile("mrc p6, 0, %0, c6, c1, 0" : "=r" (ret)); #endif return (ret); } static unsigned i80321_timer_get_timecount(struct timecounter *tc) { #if defined(XSCALE_DISABLE_CCNT) || defined(CPU_XSCALE_81342) uint32_t cur = tcr0_read(); if (cur > last && last != -1) { offset += counts_per_hz; if (ticked > 0) ticked--; } if (ticked) { offset += ticked * counts_per_hz; ticked = 0; } return (counts_per_hz - cur + offset); #else uint32_t ret; __asm __volatile("mrc p14, 0, %0, c1, c0, 0\n" : "=r" (ret)); return (ret); #endif } /* * i80321_calibrate_delay: * * Calibrate the delay loop. */ void i80321_calibrate_delay(void) { /* * Just use hz=100 for now -- we'll adjust it, if necessary, * in cpu_initclocks(). */ counts_per_hz = COUNTS_PER_SEC / 100; tmr0_write(0); /* stop timer */ tisr_write(TISR_TMR0); /* clear interrupt */ trr0_write(counts_per_hz); /* reload value */ tcr0_write(counts_per_hz); /* current value */ tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); } /* * cpu_initclocks: * * Initialize the clock and get them going. */ void cpu_initclocks(void) { u_int oldirqstate; struct resource *irq; int rid = 0; void *ihl; device_t dev = timer_softc.dev; if (hz < 50 || COUNTS_PER_SEC % hz) { printf("Cannot get %d Hz clock; using 100 Hz\n", hz); hz = 100; } tick = 1000000 / hz; /* number of microseconds between interrupts */ /* * We only have one timer available; stathz and profhz are * always left as 0 (the upper-layer clock code deals with * this situation). */ if (stathz != 0) printf("Cannot get %d Hz statclock\n", stathz); stathz = 0; if (profhz != 0) printf("Cannot get %d Hz profclock\n", profhz); profhz = 0; /* Report the clock frequency. */ oldirqstate = disable_interrupts(PSR_I); irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, #ifdef CPU_XSCALE_81342 ICU_INT_TIMER0, ICU_INT_TIMER0, #else ICU_INT_TMR0, ICU_INT_TMR0, #endif 1, RF_ACTIVE); if (!irq) panic("Unable to setup the clock irq handler.\n"); else bus_setup_intr(dev, irq, INTR_TYPE_CLK, clockhandler, NULL, NULL, &ihl); tmr0_write(0); /* stop timer */ tisr_write(TISR_TMR0); /* clear interrupt */ counts_per_hz = COUNTS_PER_SEC / hz; trr0_write(counts_per_hz); /* reload value */ tcr0_write(counts_per_hz); /* current value */ tmr0_write(TMRx_ENABLE|TMRx_RELOAD|TMRx_CSEL_CORE); tc_init(&i80321_timer_timecounter); restore_interrupts(oldirqstate); rid = 0; #if !defined(XSCALE_DISABLE_CCNT) && !defined(CPU_XSCALE_81342) /* Enable the clock count register. */ __asm __volatile("mrc p14, 0, %0, c0, c0, 0\n" : "=r" (rid)); rid &= ~(1 << 3); rid |= (1 << 2) | 1; __asm __volatile("mcr p14, 0, %0, c0, c0, 0\n" : : "r" (rid)); #endif } /* * DELAY: * * Delay for at least N microseconds. */ void DELAY(int n) { uint32_t cur, last, delta, usecs; /* * This works by polling the timer and counting the * number of microseconds that go by. */ last = tcr0_read(); delta = usecs = 0; while (n > usecs) { cur = tcr0_read(); /* Check to see if the timer has wrapped around. */ if (last < cur) delta += (last + (counts_per_hz - cur)); else delta += (last - cur); last = cur; if (delta >= COUNTS_PER_USEC) { usecs += delta / COUNTS_PER_USEC; delta %= COUNTS_PER_USEC; } } } /* * clockhandler: * * Handle the hardclock interrupt. */ int clockhandler(void *arg) { struct trapframe *frame = arg; ticked++; tisr_write(TISR_TMR0); hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); if (i80321_hardclock_hook != NULL) (*i80321_hardclock_hook)(); return (FILTER_HANDLED); } void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } Index: head/sys/arm/xscale/i8134x/i80321_wdog.c =================================================================== --- head/sys/arm/xscale/i8134x/i80321_wdog.c (revision 306901) +++ head/sys/arm/xscale/i8134x/i80321_wdog.c (revision 306902) @@ -1,154 +1,153 @@ /* $NetBSD: i80321_wdog.c,v 1.6 2003/07/15 00:24:54 lukem Exp $ */ /*- * Copyright (c) 2005 Olivier Houchard * Copyright (c) 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Watchdog timer support for the Intel i80321 I/O processor. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include -#include #include #include #include struct iopwdog_softc { device_t dev; int armed; int wdog_period; }; static __inline void wdtcr_write(uint32_t val) { #ifdef CPU_XSCALE_81342 __asm __volatile("mcr p6, 0, %0, c7, c9, 0" #else __asm __volatile("mcr p6, 0, %0, c7, c1, 0" #endif : : "r" (val)); } static void iopwdog_tickle(void *arg) { struct iopwdog_softc *sc = arg; if (!sc->armed) return; wdtcr_write(WDTCR_ENABLE1); wdtcr_write(WDTCR_ENABLE2); } static int iopwdog_probe(device_t dev) { struct iopwdog_softc *sc = device_get_softc(dev); char buf[128]; /* * XXX Should compute the period based on processor speed. * For a 600MHz XScale core, the wdog must be tickled approx. * every 7 seconds. */ sc->wdog_period = 7; sprintf(buf, "i80321 Watchdog, must be tickled every %d seconds", sc->wdog_period); device_set_desc_copy(dev, buf); return (0); } static void iopwdog_watchdog_fn(void *private, u_int cmd, int *error) { struct iopwdog_softc *sc = private; cmd &= WD_INTERVAL; if (cmd > 0 && cmd <= 63 && (uint64_t)1<wdog_period * 1000000000) { /* Valid value -> Enable watchdog */ iopwdog_tickle(sc); sc->armed = 1; *error = 0; } else { /* Can't disable this watchdog! */ if (sc->armed) *error = EOPNOTSUPP; } } static int iopwdog_attach(device_t dev) { struct iopwdog_softc *sc = device_get_softc(dev); sc->dev = dev; sc->armed = 0; EVENTHANDLER_REGISTER(watchdog_list, iopwdog_watchdog_fn, sc, 0); return (0); } static device_method_t iopwdog_methods[] = { DEVMETHOD(device_probe, iopwdog_probe), DEVMETHOD(device_attach, iopwdog_attach), {0, 0}, }; static driver_t iopwdog_driver = { "iopwdog", iopwdog_methods, sizeof(struct iopwdog_softc), }; static devclass_t iopwdog_devclass; DRIVER_MODULE(iopwdog, iq, iopwdog_driver, iopwdog_devclass, 0, 0); Index: head/sys/arm/xscale/ixp425/avila_ata.c =================================================================== --- head/sys/arm/xscale/ixp425/avila_ata.c (revision 306901) +++ head/sys/arm/xscale/ixp425/avila_ata.c (revision 306902) @@ -1,553 +1,551 @@ /*- * Copyright (c) 2006 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Compact Flash Support for the Avila Gateworks XScale boards. * The CF slot is operated in "True IDE" mode. Registers are on * the Expansion Bus connected to CS1 and CS2. Interrupts are * tied to GPIO pin 12. No DMA, just PIO. * * The ADI Pronghorn Metro is very similar. It use CS3 and CS4 and * GPIO pin 0 for interrupts. * * See also http://www.intel.com/design/network/applnots/302456.htm. */ #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include #include #include #include #include #define AVILA_IDE_CTRL 0x06 struct ata_config { const char *desc; /* description for probe */ uint8_t gpin; /* GPIO pin */ uint8_t irq; /* IRQ */ uint32_t base16; /* CS base addr for 16-bit */ uint32_t size16; /* CS size for 16-bit */ uint32_t off16; /* CS offset for 16-bit */ uint32_t basealt; /* CS base addr for alt */ uint32_t sizealt; /* CS size for alt */ uint32_t offalt; /* CS offset for alt */ }; static const struct ata_config * ata_getconfig(struct ixp425_softc *sa) { static const struct ata_config configs[] = { { .desc = "Gateworks Avila IDE/CF Controller", .gpin = 12, .irq = IXP425_INT_GPIO_12, .base16 = IXP425_EXP_BUS_CS1_HWBASE, .size16 = IXP425_EXP_BUS_CS1_SIZE, .off16 = EXP_TIMING_CS1_OFFSET, .basealt = IXP425_EXP_BUS_CS2_HWBASE, .sizealt = IXP425_EXP_BUS_CS2_SIZE, .offalt = EXP_TIMING_CS2_OFFSET, }, { .desc = "Gateworks Cambria IDE/CF Controller", .gpin = 12, .irq = IXP425_INT_GPIO_12, .base16 = CAMBRIA_CFSEL0_HWBASE, .size16 = CAMBRIA_CFSEL0_SIZE, .off16 = EXP_TIMING_CS3_OFFSET, .basealt = CAMBRIA_CFSEL1_HWBASE, .sizealt = CAMBRIA_CFSEL1_SIZE, .offalt = EXP_TIMING_CS4_OFFSET, }, { .desc = "ADI Pronghorn Metro IDE/CF Controller", .gpin = 0, .irq = IXP425_INT_GPIO_0, .base16 = IXP425_EXP_BUS_CS3_HWBASE, .size16 = IXP425_EXP_BUS_CS3_SIZE, .off16 = EXP_TIMING_CS3_OFFSET, .basealt = IXP425_EXP_BUS_CS4_HWBASE, .sizealt = IXP425_EXP_BUS_CS4_SIZE, .offalt = EXP_TIMING_CS4_OFFSET, }, }; /* XXX honor hint? (but then no multi-board support) */ /* XXX total hack */ if (cpu_is_ixp43x()) return &configs[1]; /* Cambria */ if (EXP_BUS_READ_4(sa, EXP_TIMING_CS2_OFFSET) != 0) return &configs[0]; /* Avila */ return &configs[2]; /* Pronghorn */ } struct ata_avila_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_exp_ioh; /* Exp Bus config registers */ bus_space_handle_t sc_ioh; /* CS1/3 data registers */ bus_space_handle_t sc_alt_ioh; /* CS2/4 data registers */ struct bus_space sc_expbus_tag; struct resource sc_ata; /* hand-crafted for ATA */ struct resource sc_alt_ata; /* hand-crafted for ATA */ u_int32_t sc_16bit_off; /* EXP_TIMING_CSx_OFFSET */ int sc_rid; /* rid for IRQ */ struct resource *sc_irq; /* IRQ resource */ void *sc_ih; /* interrupt handler */ struct { void (*cb)(void *); void *arg; } sc_intr[1]; /* NB: 1/channel */ }; static void ata_avila_intr(void *); bs_protos(ata); static void ata_bs_rm_2_s(bus_space_tag_t tag, bus_space_handle_t, bus_size_t, u_int16_t *, bus_size_t); static void ata_bs_wm_2_s(bus_space_tag_t tag, bus_space_handle_t, bus_size_t, const u_int16_t *, bus_size_t); static int ata_avila_probe(device_t dev) { struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); const struct ata_config *config; config = ata_getconfig(sa); if (config != NULL) { device_set_desc_copy(dev, config->desc); return 0; } return ENXIO; } static int ata_avila_attach(device_t dev) { struct ata_avila_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); const struct ata_config *config; config = ata_getconfig(sa); KASSERT(config != NULL, ("no board config")); sc->sc_dev = dev; /* NB: borrow from parent */ sc->sc_iot = sa->sc_iot; sc->sc_exp_ioh = sa->sc_exp_ioh; if (bus_space_map(sc->sc_iot, config->base16, config->size16, 0, &sc->sc_ioh)) panic("%s: cannot map 16-bit window (0x%x/0x%x)", __func__, config->base16, config->size16); if (bus_space_map(sc->sc_iot, config->basealt, config->sizealt, 0, &sc->sc_alt_ioh)) panic("%s: cannot map alt window (0x%x/0x%x)", __func__, config->basealt, config->sizealt); sc->sc_16bit_off = config->off16; if (config->base16 != CAMBRIA_CFSEL0_HWBASE) { /* * Craft special resource for ATA bus space ops * that go through the expansion bus and require * special hackery to ena/dis 16-bit operations. * * XXX probably should just make this generic for * accessing the expansion bus. */ sc->sc_expbus_tag.bs_privdata = sc; /* NB: backpointer */ /* read single */ sc->sc_expbus_tag.bs_r_1 = ata_bs_r_1; sc->sc_expbus_tag.bs_r_2 = ata_bs_r_2; /* read multiple */ sc->sc_expbus_tag.bs_rm_2 = ata_bs_rm_2; sc->sc_expbus_tag.bs_rm_2_s = ata_bs_rm_2_s; /* write (single) */ sc->sc_expbus_tag.bs_w_1 = ata_bs_w_1; sc->sc_expbus_tag.bs_w_2 = ata_bs_w_2; /* write multiple */ sc->sc_expbus_tag.bs_wm_2 = ata_bs_wm_2; sc->sc_expbus_tag.bs_wm_2_s = ata_bs_wm_2_s; rman_set_bustag(&sc->sc_ata, &sc->sc_expbus_tag); rman_set_bustag(&sc->sc_alt_ata, &sc->sc_expbus_tag); } else { /* * On Cambria use the shared CS3 expansion bus tag * that handles interlock for sharing access with the * optional UART's. */ rman_set_bustag(&sc->sc_ata, &cambria_exp_bs_tag); rman_set_bustag(&sc->sc_alt_ata, &cambria_exp_bs_tag); } rman_set_bushandle(&sc->sc_ata, sc->sc_ioh); rman_set_bushandle(&sc->sc_alt_ata, sc->sc_alt_ioh); ixp425_set_gpio(sa, config->gpin, GPIO_TYPE_EDG_RISING); /* configure CS1/3 window, leaving timing unchanged */ EXP_BUS_WRITE_4(sc, sc->sc_16bit_off, EXP_BUS_READ_4(sc, sc->sc_16bit_off) | EXP_BYTE_EN | EXP_WR_EN | EXP_BYTE_RD16 | EXP_CS_EN); /* configure CS2/4 window, leaving timing unchanged */ EXP_BUS_WRITE_4(sc, config->offalt, EXP_BUS_READ_4(sc, config->offalt) | EXP_BYTE_EN | EXP_WR_EN | EXP_BYTE_RD16 | EXP_CS_EN); /* setup interrupt */ sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid, config->irq, config->irq, 1, RF_ACTIVE); if (!sc->sc_irq) panic("Unable to allocate irq %u.\n", config->irq); bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_BIO | INTR_MPSAFE | INTR_ENTROPY, NULL, ata_avila_intr, sc, &sc->sc_ih); /* attach channel on this controller */ device_add_child(dev, "ata", -1); bus_generic_attach(dev); return 0; } static int ata_avila_detach(device_t dev) { struct ata_avila_softc *sc = device_get_softc(dev); /* XXX quiesce gpio? */ /* detach & delete all children */ device_delete_children(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq); return 0; } static void ata_avila_intr(void *xsc) { struct ata_avila_softc *sc = xsc; if (sc->sc_intr[0].cb != NULL) sc->sc_intr[0].cb(sc->sc_intr[0].arg); } static struct resource * ata_avila_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ata_avila_softc *sc = device_get_softc(dev); KASSERT(type == SYS_RES_IRQ && *rid == ATA_IRQ_RID, ("type %u rid %u start %ju end %ju count %ju flags %u", type, *rid, start, end, count, flags)); /* doesn't matter what we return so reuse the real thing */ return sc->sc_irq; } static int ata_avila_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { KASSERT(type == SYS_RES_IRQ && rid == ATA_IRQ_RID, ("type %u rid %u", type, rid)); return 0; } static int ata_avila_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *function, void *argument, void **cookiep) { struct ata_avila_softc *sc = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; KASSERT(unit == 0, ("unit %d", unit)); sc->sc_intr[unit].cb = function; sc->sc_intr[unit].arg = argument; *cookiep = sc; return 0; } static int ata_avila_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ata_avila_softc *sc = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; KASSERT(unit == 0, ("unit %d", unit)); sc->sc_intr[unit].cb = NULL; sc->sc_intr[unit].arg = NULL; return 0; } /* * Bus space accessors for CF-IDE PIO operations. */ /* * Enable/disable 16-bit ops on the expansion bus. */ static __inline void enable_16(struct ata_avila_softc *sc) { EXP_BUS_WRITE_4(sc, sc->sc_16bit_off, EXP_BUS_READ_4(sc, sc->sc_16bit_off) &~ EXP_BYTE_EN); DELAY(100); /* XXX? */ } static __inline void disable_16(struct ata_avila_softc *sc) { DELAY(100); /* XXX? */ EXP_BUS_WRITE_4(sc, sc->sc_16bit_off, EXP_BUS_READ_4(sc, sc->sc_16bit_off) | EXP_BYTE_EN); } uint8_t ata_bs_r_1(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o) { struct ata_avila_softc *sc = tag->bs_privdata; return bus_space_read_1(sc->sc_iot, h, o); } void ata_bs_w_1(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, u_int8_t v) { struct ata_avila_softc *sc = tag->bs_privdata; bus_space_write_1(sc->sc_iot, h, o, v); } uint16_t ata_bs_r_2(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o) { struct ata_avila_softc *sc = tag->bs_privdata; uint16_t v; enable_16(sc); v = bus_space_read_2(sc->sc_iot, h, o); disable_16(sc); return v; } void ata_bs_w_2(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, uint16_t v) { struct ata_avila_softc *sc = tag->bs_privdata; enable_16(sc); bus_space_write_2(sc->sc_iot, h, o, v); disable_16(sc); } void ata_bs_rm_2(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, u_int16_t *d, bus_size_t c) { struct ata_avila_softc *sc = tag->bs_privdata; enable_16(sc); bus_space_read_multi_2(sc->sc_iot, h, o, d, c); disable_16(sc); } void ata_bs_wm_2(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, const u_int16_t *d, bus_size_t c) { struct ata_avila_softc *sc = tag->bs_privdata; enable_16(sc); bus_space_write_multi_2(sc->sc_iot, h, o, d, c); disable_16(sc); } /* XXX workaround ata driver by (incorrectly) byte swapping stream cases */ void ata_bs_rm_2_s(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, u_int16_t *d, bus_size_t c) { struct ata_avila_softc *sc = tag->bs_privdata; uint16_t v; bus_size_t i; enable_16(sc); #if 1 for (i = 0; i < c; i++) { v = bus_space_read_2(sc->sc_iot, h, o); d[i] = bswap16(v); } #else bus_space_read_multi_stream_2(sc->sc_iot, h, o, d, c); #endif disable_16(sc); } void ata_bs_wm_2_s(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t o, const u_int16_t *d, bus_size_t c) { struct ata_avila_softc *sc = tag->bs_privdata; bus_size_t i; enable_16(sc); #if 1 for (i = 0; i < c; i++) bus_space_write_2(sc->sc_iot, h, o, bswap16(d[i])); #else bus_space_write_multi_stream_2(sc->sc_iot, h, o, d, c); #endif disable_16(sc); } static device_method_t ata_avila_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_avila_probe), DEVMETHOD(device_attach, ata_avila_attach), DEVMETHOD(device_detach, ata_avila_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus methods */ DEVMETHOD(bus_alloc_resource, ata_avila_alloc_resource), DEVMETHOD(bus_release_resource, ata_avila_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_avila_setup_intr), DEVMETHOD(bus_teardown_intr, ata_avila_teardown_intr), { 0, 0 } }; devclass_t ata_avila_devclass; static driver_t ata_avila_driver = { "ata_avila", ata_avila_methods, sizeof(struct ata_avila_softc), }; DRIVER_MODULE(ata_avila, ixp, ata_avila_driver, ata_avila_devclass, 0, 0); MODULE_VERSION(ata_avila, 1); MODULE_DEPEND(ata_avila, ata, 1, 1, 1); static int avila_channel_probe(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ch->unit = 0; ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE; device_set_desc_copy(dev, "ATA channel 0"); return ata_probe(dev); } static int avila_channel_attach(device_t dev) { struct ata_avila_softc *sc = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; for (i = 0; i < ATA_MAX_RES; i++) ch->r_io[i].res = &sc->sc_ata; ch->r_io[ATA_DATA].offset = ATA_DATA; ch->r_io[ATA_FEATURE].offset = ATA_FEATURE; ch->r_io[ATA_COUNT].offset = ATA_COUNT; ch->r_io[ATA_SECTOR].offset = ATA_SECTOR; ch->r_io[ATA_CYL_LSB].offset = ATA_CYL_LSB; ch->r_io[ATA_CYL_MSB].offset = ATA_CYL_MSB; ch->r_io[ATA_DRIVE].offset = ATA_DRIVE; ch->r_io[ATA_COMMAND].offset = ATA_COMMAND; ch->r_io[ATA_ERROR].offset = ATA_FEATURE; /* NB: should be used only for ATAPI devices */ ch->r_io[ATA_IREASON].offset = ATA_COUNT; ch->r_io[ATA_STATUS].offset = ATA_COMMAND; /* NB: the control and alt status registers are special */ ch->r_io[ATA_ALTSTAT].res = &sc->sc_alt_ata; ch->r_io[ATA_ALTSTAT].offset = AVILA_IDE_CTRL; ch->r_io[ATA_CONTROL].res = &sc->sc_alt_ata; ch->r_io[ATA_CONTROL].offset = AVILA_IDE_CTRL; /* NB: by convention this points at the base of registers */ ch->r_io[ATA_IDX_ADDR].offset = 0; ata_generic_hw(dev); return ata_attach(dev); } static device_method_t avila_channel_methods[] = { /* device interface */ DEVMETHOD(device_probe, avila_channel_probe), DEVMETHOD(device_attach, avila_channel_attach), DEVMETHOD(device_detach, ata_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, ata_suspend), DEVMETHOD(device_resume, ata_resume), { 0, 0 } }; driver_t avila_channel_driver = { "ata", avila_channel_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, ata_avila, avila_channel_driver, ata_devclass, 0, 0); Index: head/sys/arm/xscale/ixp425/ixp425_intr.h =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_intr.h (revision 306901) +++ head/sys/arm/xscale/ixp425/ixp425_intr.h (revision 306902) @@ -1,89 +1,88 @@ /* $NetBSD: ixp425_intr.h,v 1.6 2005/12/24 20:06:52 perry Exp $ */ /* * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _IXP425_INTR_H_ #define _IXP425_INTR_H_ #define ARM_IRQ_HANDLER _C_LABEL(ixp425_intr_dispatch) #ifndef _LOCORE #include -#include #include #define IXPREG(reg) *((__volatile u_int32_t*) (reg)) void ixp425_do_pending(void); extern __volatile uint32_t intr_enabled; extern uint32_t intr_steer; static __inline void __attribute__((__unused__)) ixp425_set_intrmask(void) { IXPREG(IXP425_INT_ENABLE) = intr_enabled & IXP425_INT_HWMASK; } static __inline void ixp425_set_intrsteer(void) { IXPREG(IXP425_INT_SELECT) = intr_steer & IXP425_INT_HWMASK; } extern __volatile uint32_t intr_enabled2; extern uint32_t intr_steer2; static __inline void __attribute__((__unused__)) ixp435_set_intrmask(void) { IXPREG(IXP435_INT_ENABLE2) = intr_enabled2 & IXP435_INT_HWMASK; } static __inline void ixp435_set_intrsteer(void) { IXPREG(IXP435_INT_SELECT2) = intr_steer2 & IXP435_INT_HWMASK; } #endif /* _LOCORE */ #endif /* _IXP425_INTR_H_ */ Index: head/sys/arm/xscale/ixp425/ixp425_npe.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_npe.c (revision 306901) +++ head/sys/arm/xscale/ixp425/ixp425_npe.c (revision 306902) @@ -1,1574 +1,1572 @@ /*- * Copyright (c) 2006-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /*- * Copyright (c) 2001-2005, Intel Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Intel XScale Network Processing Engine (NPE) support. * * Each NPE has an ixpnpeX device associated with it that is * attached at boot. Depending on the microcode loaded into * an NPE there may be an Ethernet interface (npeX) or some * other network interface (e.g. for ATM). This file has support * for loading microcode images and the associated NPE CPU * manipulations (start, stop, reset). * * The code here basically replaces the npeDl and npeMh classes * in the Intel Access Library (IAL). * * NB: Microcode images are loaded with firmware(9). To * include microcode in a static kernel include the * ixpnpe_fw device. Otherwise the firmware will be * automatically loaded from the filesystem. */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include #include struct ixpnpe_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; bus_size_t sc_size; /* size of mapped register window */ struct resource *sc_irq; /* IRQ resource */ void *sc_ih; /* interrupt handler */ struct mtx sc_mtx; /* mailbox lock */ uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */ int sc_msgwaiting; /* sc_msg holds valid data */ int sc_npeid; int sc_nrefs; /* # of references */ int validImage; /* valid ucode image loaded */ int started; /* NPE is started */ uint8_t functionalityId;/* ucode functionality ID */ int insMemSize; /* size of instruction memory */ int dataMemSize; /* size of data memory */ uint32_t savedExecCount; uint32_t savedEcsDbgCtxtReg2; }; static struct ixpnpe_softc *npes[NPE_MAX]; #define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff /* used to read download map from version in microcode image */ #define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000 #define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001 #define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002 #define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F /* * masks used to extract address info from State information context * register addresses as read from microcode image */ #define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F #define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0 /* LSB offset of Context Number field in State-Info Context Address */ #define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4 /* size (in words) of single State Information entry (ctxt reg address|data) */ #define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2 typedef struct { uint32_t type; uint32_t offset; } IxNpeDlNpeMgrDownloadMapBlockEntry; typedef union { IxNpeDlNpeMgrDownloadMapBlockEntry block; uint32_t eodmMarker; } IxNpeDlNpeMgrDownloadMapEntry; typedef struct { /* 1st entry in the download map (there may be more than one) */ IxNpeDlNpeMgrDownloadMapEntry entry[1]; } IxNpeDlNpeMgrDownloadMap; /* used to access an instruction or data block in a microcode image */ typedef struct { uint32_t npeMemAddress; uint32_t size; uint32_t data[1]; } IxNpeDlNpeMgrCodeBlock; /* used to access each Context Reg entry state-information block */ typedef struct { uint32_t addressInfo; uint32_t value; } IxNpeDlNpeMgrStateInfoCtxtRegEntry; /* used to access a state-information block in a microcode image */ typedef struct { uint32_t size; IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1]; } IxNpeDlNpeMgrStateInfoBlock; static int npe_debug = 0; SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug, 0, "IXP4XX NPE debug msgs"); #define DPRINTF(dev, fmt, ...) do { \ if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \ } while (0) #define DPRINTFn(n, dev, fmt, ...) do { \ if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ } while (0) static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t); static int npe_isstopped(struct ixpnpe_softc *); static int npe_load_ins(struct ixpnpe_softc *, const IxNpeDlNpeMgrCodeBlock *bp, int verify); static int npe_load_data(struct ixpnpe_softc *, const IxNpeDlNpeMgrCodeBlock *bp, int verify); static int npe_load_stateinfo(struct ixpnpe_softc *, const IxNpeDlNpeMgrStateInfoBlock *bp, int verify); static int npe_load_image(struct ixpnpe_softc *, const uint32_t *imageCodePtr, int verify); static int npe_cpu_reset(struct ixpnpe_softc *); static int npe_cpu_start(struct ixpnpe_softc *); static int npe_cpu_stop(struct ixpnpe_softc *); static void npe_cmd_issue_write(struct ixpnpe_softc *, uint32_t cmd, uint32_t addr, uint32_t data); static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *, uint32_t cmd, uint32_t addr); static int npe_ins_write(struct ixpnpe_softc *, uint32_t addr, uint32_t data, int verify); static int npe_data_write(struct ixpnpe_softc *, uint32_t addr, uint32_t data, int verify); static void npe_ecs_reg_write(struct ixpnpe_softc *, uint32_t reg, uint32_t data); static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg); static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command); static void npe_cpu_step_save(struct ixpnpe_softc *); static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction, uint32_t ctxtNum, uint32_t ldur); static void npe_cpu_step_restore(struct ixpnpe_softc *); static int npe_logical_reg_read(struct ixpnpe_softc *, uint32_t regAddr, uint32_t regSize, uint32_t ctxtNum, uint32_t *regVal); static int npe_logical_reg_write(struct ixpnpe_softc *, uint32_t regAddr, uint32_t regVal, uint32_t regSize, uint32_t ctxtNum, int verify); static int npe_physical_reg_write(struct ixpnpe_softc *, uint32_t regAddr, uint32_t regValue, int verify); static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum, uint32_t ctxtReg, uint32_t ctxtRegVal, int verify); static void ixpnpe_intr(void *arg); static uint32_t npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off) { uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v); return v; } static void npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val) { DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val); bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); } struct ixpnpe_softc * ixpnpe_attach(device_t dev, int npeid) { struct npeconfig { uint32_t base; uint32_t size; int irq; uint32_t ins_memsize; uint32_t data_memsize; }; static const struct npeconfig npeconfigs[NPE_MAX] = { [NPE_A] = { .base = IXP425_NPE_A_HWBASE, .size = IXP425_NPE_A_SIZE, .irq = IXP425_INT_NPE_A, .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEA, .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA }, [NPE_B] = { .base = IXP425_NPE_B_HWBASE, .size = IXP425_NPE_B_SIZE, .irq = IXP425_INT_NPE_B, .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB, .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB }, [NPE_C] = { .base = IXP425_NPE_C_HWBASE, .size = IXP425_NPE_C_SIZE, .irq = IXP425_INT_NPE_C, .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC, .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC }, }; struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); struct ixpnpe_softc *sc; const struct npeconfig *config; int rid; if (npeid >= NPE_MAX) { device_printf(dev, "%s: bad npeid %d\n", __func__, npeid); return NULL; } sc = npes[npeid]; if (sc != NULL) { sc->sc_nrefs++; return sc; } config = &npeconfigs[npeid]; /* XXX M_BUS */ sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO); sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF); sc->sc_npeid = npeid; sc->sc_nrefs = 1; sc->sc_size = config->size; if (cpu_is_ixp42x()) { /* NB: instruction/data memory sizes are NPE-dependent */ sc->insMemSize = config->ins_memsize; sc->dataMemSize = config->data_memsize; } else { sc->insMemSize = IXP46X_NPEDL_INS_MEMSIZE_WORDS; sc->dataMemSize = IXP46X_NPEDL_DATA_MEMSIZE_WORDS; } if (bus_space_map(sc->sc_iot, config->base, sc->sc_size, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_get_name(dev)); /* * Setup IRQ and handler for NPE message support. */ rid = 0; sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, config->irq, config->irq, 1, RF_ACTIVE); if (sc->sc_irq == NULL) panic("%s: Unable to allocate irq %u", device_get_name(dev), config->irq); /* XXX could be a source of entropy */ bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixpnpe_intr, sc, &sc->sc_ih); /* * Enable output fifo interrupts (NB: must also set OFIFO Write Enable) */ npe_reg_write(sc, IX_NPECTL, npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE)); npes[npeid] = sc; return sc; } void ixpnpe_detach(struct ixpnpe_softc *sc) { if (--sc->sc_nrefs == 0) { npes[sc->sc_npeid] = NULL; /* disable output fifo interrupts */ npe_reg_write(sc, IX_NPECTL, npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE)); bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih); bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size); mtx_destroy(&sc->sc_mtx); free(sc, M_TEMP); } } int ixpnpe_stopandreset(struct ixpnpe_softc *sc) { int error; mtx_lock(&sc->sc_mtx); error = npe_cpu_stop(sc); /* stop NPE */ if (error == 0) error = npe_cpu_reset(sc); /* reset it */ if (error == 0) sc->started = 0; /* mark stopped */ mtx_unlock(&sc->sc_mtx); DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); return error; } static int ixpnpe_start_locked(struct ixpnpe_softc *sc) { int error; if (!sc->started) { error = npe_cpu_start(sc); if (error == 0) sc->started = 1; } else error = 0; DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); return error; } int ixpnpe_start(struct ixpnpe_softc *sc) { int ret; mtx_lock(&sc->sc_mtx); ret = ixpnpe_start_locked(sc); mtx_unlock(&sc->sc_mtx); return (ret); } int ixpnpe_stop(struct ixpnpe_softc *sc) { int error; mtx_lock(&sc->sc_mtx); error = npe_cpu_stop(sc); if (error == 0) sc->started = 0; mtx_unlock(&sc->sc_mtx); DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); return error; } /* * Indicates the start of an NPE Image, in new NPE Image Library format. * 2 consecutive occurrences indicates the end of the NPE Image Library */ #define NPE_IMAGE_MARKER 0xfeedf00d /* * NPE Image Header definition, used in new NPE Image Library format */ typedef struct { uint32_t marker; uint32_t id; uint32_t size; } IxNpeDlImageMgrImageHeader; static int npe_findimage(struct ixpnpe_softc *sc, const uint32_t *imageLibrary, uint32_t imageId, const uint32_t **imagePtr, uint32_t *imageSize) { const IxNpeDlImageMgrImageHeader *image; uint32_t offset = 0; while (imageLibrary[offset] == NPE_IMAGE_MARKER) { image = (const IxNpeDlImageMgrImageHeader *) &imageLibrary[offset]; offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t); DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n", __func__, offset, image->marker, image->id, image->size); if (image->id == imageId) { *imagePtr = imageLibrary + offset; *imageSize = image->size; return 0; } /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */ if (image->id == NPE_IMAGE_MARKER) { DPRINTF(sc->sc_dev, "imageId 0x%08x not found in " "image library header\n", imageId); /* reached end of library, image not found */ return ESRCH; } offset += image->size; } return ESRCH; } static int ixpnpe_load_firmware(struct ixpnpe_softc *sc, const char *imageName, uint32_t imageId) { static const char *devname[4] = { "IXP425", "IXP435/IXP465", "DeviceID#2", "DeviceID#3" }; uint32_t imageSize; const uint32_t *imageCodePtr; const struct firmware *fw; int error; DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId); #if 0 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId); /* * Checking if image being loaded is meant for device that is running. * Image is forward compatible. i.e Image built for IXP42X should run * on IXP46X but not vice versa. */ if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK)) return EINVAL; #endif error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */ if (error != 0) return error; fw = firmware_get(imageName); if (fw == NULL) return ENOENT; /* Locate desired image in files w/ combined images */ error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize); if (error != 0) goto done; device_printf(sc->sc_dev, "load fw image %s.NPE-%c Func 0x%x Rev %u.%u\n", devname[NPEIMAGE_DEVID(imageId)], 'A' + NPEIMAGE_NPEID(imageId), NPEIMAGE_FUNCID(imageId), NPEIMAGE_MAJOR(imageId), NPEIMAGE_MINOR(imageId)); /* * If download was successful, store image Id in list of * currently loaded images. If a critical error occurred * during download, record that the NPE has an invalid image */ mtx_lock(&sc->sc_mtx); error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/); if (error == 0) { sc->validImage = 1; error = ixpnpe_start_locked(sc); } else { sc->validImage = 0; } sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId); mtx_unlock(&sc->sc_mtx); done: firmware_put(fw, FIRMWARE_UNLOAD); DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); return error; } static int override_imageid(device_t dev, const char *resname, uint32_t *val) { int unit = device_get_unit(dev); int resval; if (resource_int_value("npe", unit, resname, &resval) != 0) return 0; /* XXX validate */ if (bootverbose) device_printf(dev, "using npe.%d.%s=0x%x override\n", unit, resname, resval); *val = resval; return 1; } int ixpnpe_init(struct ixpnpe_softc *sc) { static const uint32_t npeconfig[NPE_MAX] = { [NPE_A] = IXP425_NPE_A_IMAGEID, [NPE_B] = IXP425_NPE_B_IMAGEID, [NPE_C] = IXP425_NPE_C_IMAGEID, }; uint32_t imageid, msg[2]; int error; if (sc->started) return 0; /* * Load NPE firmware and start it running. We assume * that minor version bumps remain compatible so probe * the firmware image starting with the expected version * and then bump the minor version up to the max. */ if (!override_imageid(sc->sc_dev, "imageid", &imageid)) imageid = npeconfig[sc->sc_npeid]; for (;;) { error = ixpnpe_load_firmware(sc, "npe_fw", imageid); if (error == 0) break; /* * ESRCH is returned when the requested image * is not present */ if (error != ESRCH) { device_printf(sc->sc_dev, "cannot init NPE (error %d)\n", error); return error; } /* bump the minor version up to the max possible */ if (NPEIMAGE_MINOR(imageid) == 0xff) { device_printf(sc->sc_dev, "cannot locate firmware " "(imageid 0x%08x)\n", imageid); return error; } imageid++; } /* NB: firmware should respond with a status msg */ if (ixpnpe_recvmsg_sync(sc, msg) != 0) { device_printf(sc->sc_dev, "firmware did not respond as expected\n"); return EIO; } return 0; } int ixpnpe_getfunctionality(struct ixpnpe_softc *sc) { return (sc->validImage ? sc->functionalityId : 0); } static int npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet) { uint32_t val; val = npe_reg_read(sc, reg); DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n", __func__, reg, expectedBitsSet, val, (val & expectedBitsSet) == expectedBitsSet); return ((val & expectedBitsSet) == expectedBitsSet); } static int npe_isstopped(struct ixpnpe_softc *sc) { return npe_checkbits(sc, IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP); } static int npe_load_ins(struct ixpnpe_softc *sc, const IxNpeDlNpeMgrCodeBlock *bp, int verify) { uint32_t npeMemAddress; int i, blockSize; npeMemAddress = bp->npeMemAddress; blockSize = bp->size; /* NB: instruction/data count */ if (npeMemAddress + blockSize > sc->insMemSize) { device_printf(sc->sc_dev, "Block size %u too big for NPE memory\n", blockSize); return EINVAL; /* XXX */ } for (i = 0; i < blockSize; i++, npeMemAddress++) { if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) { device_printf(sc->sc_dev, "NPE instruction write failed"); return EIO; } } return 0; } static int npe_load_data(struct ixpnpe_softc *sc, const IxNpeDlNpeMgrCodeBlock *bp, int verify) { uint32_t npeMemAddress; int i, blockSize; npeMemAddress = bp->npeMemAddress; blockSize = bp->size; /* NB: instruction/data count */ if (npeMemAddress + blockSize > sc->dataMemSize) { device_printf(sc->sc_dev, "Block size %u too big for NPE memory\n", blockSize); return EINVAL; } for (i = 0; i < blockSize; i++, npeMemAddress++) { if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) { device_printf(sc->sc_dev, "NPE data write failed\n"); return EIO; } } return 0; } static int npe_load_stateinfo(struct ixpnpe_softc *sc, const IxNpeDlNpeMgrStateInfoBlock *bp, int verify) { int i, nentries, error; npe_cpu_step_save(sc); /* for each state-info context register entry in block */ nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE; error = 0; for (i = 0; i < nentries; i++) { /* each state-info entry is 2 words (address, value) */ uint32_t regVal = bp->ctxtRegEntry[i].value; uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo; uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG); uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >> IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM; /* error-check Context Register No. and Context Number values */ if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) { device_printf(sc->sc_dev, "invalid Context Register %u\n", reg); error = EINVAL; break; } if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) { device_printf(sc->sc_dev, "invalid Context Number %u\n", cNum); error = EINVAL; break; } /* NOTE that there is no STEVT register for Context 0 */ if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) { device_printf(sc->sc_dev, "no STEVT for Context 0\n"); error = EINVAL; break; } if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) { device_printf(sc->sc_dev, "write of state-info to NPE failed\n"); error = EIO; break; } } npe_cpu_step_restore(sc); return error; } static int npe_load_image(struct ixpnpe_softc *sc, const uint32_t *imageCodePtr, int verify) { #define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP) const IxNpeDlNpeMgrDownloadMap *downloadMap; int i, error; if (!npe_isstopped(sc)) { /* verify NPE is stopped */ device_printf(sc->sc_dev, "cannot load image, NPE not stopped\n"); return EIO; } /* * Read Download Map, checking each block type and calling * appropriate function to perform download */ error = 0; downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr; for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) { /* calculate pointer to block to be downloaded */ const uint32_t *bp = imageCodePtr + downloadMap->entry[i].block.offset; switch (downloadMap->entry[i].block.type) { case IX_NPEDL_BLOCK_TYPE_INSTRUCTION: error = npe_load_ins(sc, (const IxNpeDlNpeMgrCodeBlock *) bp, verify); DPRINTF(sc->sc_dev, "%s: inst, error %d\n", __func__, error); break; case IX_NPEDL_BLOCK_TYPE_DATA: error = npe_load_data(sc, (const IxNpeDlNpeMgrCodeBlock *) bp, verify); DPRINTF(sc->sc_dev, "%s: data, error %d\n", __func__, error); break; case IX_NPEDL_BLOCK_TYPE_STATE: error = npe_load_stateinfo(sc, (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify); DPRINTF(sc->sc_dev, "%s: state, error %d\n", __func__, error); break; default: device_printf(sc->sc_dev, "unknown block type 0x%x in download map\n", downloadMap->entry[i].block.type); error = EIO; /* XXX */ break; } if (error != 0) break; } return error; #undef EOM } /* contains Reset values for Context Store Registers */ static const struct { uint32_t regAddr; uint32_t regResetVal; } ixNpeDlEcsRegResetValues[] = { { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET }, { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET }, { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET }, { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET }, { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET }, { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET }, { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET }, { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET }, { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET }, { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET }, { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET }, { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET }, { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET } }; /* contains Reset values for Context Store Registers */ static const uint32_t ixNpeDlCtxtRegResetValues[] = { IX_NPEDL_CTXT_REG_RESET_STEVT, IX_NPEDL_CTXT_REG_RESET_STARTPC, IX_NPEDL_CTXT_REG_RESET_REGMAP, IX_NPEDL_CTXT_REG_RESET_CINDEX, }; #define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF #define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF #if 0 /* * Reset the NPE and its coprocessor using the * fuse bits in the feature control register. */ static void npe_reset(int npeid) { uint32_t mask = EXP_FCTRL_NPEA << npeid; uint32_t v; v = ixp4xx_read_feature_bits(); ixp4xx_write_feature_bits(v &~ mask); /* un-fuse and un-reset the NPE & coprocessor */ ixp4xx_write_feature_bits(v | mask); } #endif static int npe_cpu_reset(struct ixpnpe_softc *sc) { #define N(a) (sizeof(a) / sizeof(a[0])) uint32_t ctxtReg; /* identifies Context Store reg (0-3) */ uint32_t regAddr; uint32_t regVal; uint32_t ixNpeConfigCtrlRegVal; int i, error = 0; /* pre-store the NPE Config Control Register Value */ ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL); ixNpeConfigCtrlRegVal |= 0x3F000000; /* disable the parity interrupt */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK)); DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n", __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK); npe_cpu_step_save(sc); /* * Clear the FIFOs. */ while (npe_checkbits(sc, IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) { /* read from the Watch-point FIFO until empty */ (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO); } while (npe_checkbits(sc, IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) { /* read from the outFIFO until empty */ (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO); } while (npe_checkbits(sc, IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) { /* * Step execution of the NPE instruction to read inFIFO using * the Debug Executing Context stack. */ error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0); if (error != 0) { DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n", __func__, error); npe_cpu_step_restore(sc); return error; } } /* * Reset the mailbox reg */ /* ...from XScale side */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST); /* ...from NPE side */ error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0); if (error != 0) { DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n", __func__, error); npe_cpu_step_restore(sc); return error; } /* * Reset the physical registers in the NPE register file: * Note: no need to save/restore REGMAP for Context 0 here * since all Context Store regs are reset in subsequent code. */ for (regAddr = 0; regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0; regAddr++) { /* for each physical register in the NPE reg file, write 0 : */ error = npe_physical_reg_write(sc, regAddr, 0, TRUE); if (error != 0) { DPRINTF(sc->sc_dev, "%s: cannot write phy reg," "error %u\n", __func__, error); npe_cpu_step_restore(sc); return error; /* abort reset */ } } /* * Reset the context store: */ for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) { /* set each context's Context Store registers to reset values */ for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) { /* NOTE that there is no STEVT register for Context 0 */ if (i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT) continue; regVal = ixNpeDlCtxtRegResetValues[ctxtReg]; error = npe_ctx_reg_write(sc, i, ctxtReg, regVal, TRUE); if (error != 0) { DPRINTF(sc->sc_dev, "%s: cannot write ctx reg," "error %u\n", __func__, error); npe_cpu_step_restore(sc); return error; /* abort reset */ } } } npe_cpu_step_restore(sc); /* write Reset values to Execution Context Stack registers */ for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++) npe_ecs_reg_write(sc, ixNpeDlEcsRegResetValues[i].regAddr, ixNpeDlEcsRegResetValues[i].regResetVal); /* clear the profile counter */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT); /* clear registers EXCT, AP0, AP1, AP2 and AP3 */ for (regAddr = IX_NPEDL_REG_OFFSET_EXCT; regAddr <= IX_NPEDL_REG_OFFSET_AP3; regAddr += sizeof(uint32_t)) npe_reg_write(sc, regAddr, 0); /* Reset the Watch-count register */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0); #if 0 /* * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation * XXX Removed because it breaks IXP435 operation; e.g. on Gateworks * XXX 2358 boards reseting NPE-A after NPE-C is running causes both * XXX npe's to stop working */ npe_reset(sc->sc_npeid); #endif /* * Call NpeMgr function to stop the NPE again after the Feature Control * has unfused and Un-Reset the NPE and its associated Coprocessors. */ error = npe_cpu_stop(sc); /* restore NPE configuration bus Control Register - Parity Settings */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK)); DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n", __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL)); return error; #undef N } static int npe_cpu_start(struct ixpnpe_softc *sc) { uint32_t ecsRegVal; /* * Ensure only Background Context Stack Level is Active by turning off * the Active bit in each of the other Executing Context Stack levels. */ ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0); ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal); ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0); ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal); ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0); ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal); /* clear the pipeline */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); /* start NPE execution by issuing cmd through EXCTL register on NPE */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START); /* * Check execution status of NPE to verify operation was successful. */ return npe_checkbits(sc, IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO; } static int npe_cpu_stop(struct ixpnpe_softc *sc) { /* stop NPE execution by issuing cmd through EXCTL register on NPE */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP); /* verify that NPE Stop was successful */ return npe_checkbits(sc, IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO; } #define IX_NPEDL_REG_SIZE_BYTE 8 #define IX_NPEDL_REG_SIZE_SHORT 16 #define IX_NPEDL_REG_SIZE_WORD 32 /* * Introduce extra read cycles after issuing read command to NPE * so that we read the register after the NPE has updated it * This is to overcome race condition between XScale and NPE */ #define IX_NPEDL_DELAY_READ_CYCLES 2 /* * To mask top three MSBs of 32bit word to download into NPE IMEM */ #define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF; static void npe_cmd_issue_write(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr, uint32_t data) { npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data); npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); } static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr) { uint32_t data; int i; npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++) data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); return data; } static int npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) { DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data); if (verify) { uint32_t rdata; /* * Write invalid data to this reg, so we can see if we're * reading the EXDATA register too early. */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); /* * Disabled since top 3 MSB are not used for Azusa * hardware Refer WR:IXA00053900 */ data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM, addr); rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; if (data != rdata) return EIO; } return 0; } static int npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) { DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data); if (verify) { /* * Write invalid data to this reg, so we can see if we're * reading the EXDATA register too early. */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr)) return EIO; } return 0; } static void npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data) { npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data); } static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg) { return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg); } static void npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command) { npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command); } static void npe_cpu_step_save(struct ixpnpe_softc *sc) { /* turn off the halt bit by clearing Execution Count register. */ /* save reg contents 1st and restore later */ sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT); npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0); /* ensure that IF and IE are on (temporarily), so that we don't end up * stepping forever */ sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2); npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE)); } static int npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction, uint32_t ctxtNum, uint32_t ldur) { #define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000 uint32_t ecsDbgRegVal; uint32_t oldWatchcount, newWatchcount; int tries; /* set the Active bit, and the LDUR, in the debug level */ ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE | (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR); npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal); /* * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the * instruction, and set SELCTXT at ECS DEBUG Level to specify which * context store to access. * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number */ ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) | (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT); npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal); /* clear the pipeline */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); /* load NPE instruction into the instruction register */ npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction); /* need this value later to wait for completion of NPE execution step */ oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); /* issue a Step One command via the Execution Control register */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP); /* * Force the XScale to wait until the NPE has finished execution step * NOTE that this delay will be very small, just long enough to allow a * single NPE instruction to complete execution; if instruction * execution is not completed before timeout retries, exit the while * loop. */ newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES && newWatchcount == oldWatchcount; tries++) { /* Watch Count register incr's when NPE completes an inst */ newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); } return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO; #undef IX_NPE_DL_MAX_NUM_OF_RETRIES } static void npe_cpu_step_restore(struct ixpnpe_softc *sc) { /* clear active bit in debug level */ npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0); /* clear the pipeline */ npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); /* restore Execution Count register contents. */ npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount); /* restore IF and IE bits to original values */ npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2); } static int npe_logical_reg_read(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regSize, uint32_t ctxtNum, uint32_t *regVal) { uint32_t npeInstruction, mask; int error; switch (regSize) { case IX_NPEDL_REG_SIZE_BYTE: npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE; mask = 0xff; break; case IX_NPEDL_REG_SIZE_SHORT: npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT; mask = 0xffff; break; case IX_NPEDL_REG_SIZE_WORD: npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD; mask = 0xffffffff; break; default: return EINVAL; } /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */ npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) | (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); /* step execution of NPE inst using Debug Executing Context stack */ error = npe_cpu_step(sc, npeInstruction, ctxtNum, IX_NPEDL_RD_INSTR_LDUR); if (error != 0) { DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n", __func__, regAddr, regSize, ctxtNum, error); return error; } /* read value of register from Execution Data register */ *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); /* align value from left to right */ *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask; return 0; } static int npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal, uint32_t regSize, uint32_t ctxtNum, int verify) { int error; DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n", __func__, regAddr, regVal, regSize, ctxtNum); if (regSize == IX_NPEDL_REG_SIZE_WORD) { /* * NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3| * Write upper half-word (short) to |d0|d1| */ error = npe_logical_reg_write(sc, regAddr, regVal >> IX_NPEDL_REG_SIZE_SHORT, IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); if (error != 0) return error; /* Write lower half-word (short) to |d2|d3| */ error = npe_logical_reg_write(sc, regAddr + sizeof(uint16_t), regVal & 0xffff, IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); } else { uint32_t npeInstruction; switch (regSize) { case IX_NPEDL_REG_SIZE_BYTE: npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE; regVal &= 0xff; break; case IX_NPEDL_REG_SIZE_SHORT: npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT; regVal &= 0xffff; break; default: return EINVAL; } /* fill dest operand field of inst with dest reg addr */ npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); /* fill src operand field of inst with least-sig 5 bits of val*/ npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) << IX_NPEDL_OFFSET_INSTR_SRC); /* fill coprocessor field of inst with most-sig 11 bits of val*/ npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) << IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA); /* step execution of NPE instruction using Debug ECS */ error = npe_cpu_step(sc, npeInstruction, ctxtNum, IX_NPEDL_WR_INSTR_LDUR); } if (error != 0) { DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u " "writing reg\n", __func__, regAddr, regVal, regSize, ctxtNum, error); return error; } if (verify) { uint32_t retRegVal; error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum, &retRegVal); if (error == 0 && regVal != retRegVal) error = EIO; /* XXX ambiguous */ } return error; } /* * There are 32 physical registers used in an NPE. These are * treated as 16 pairs of 32-bit registers. To write one of the pair, * write the pair number (0-16) to the REGMAP for Context 0. Then write * the value to register 0 or 4 in the regfile, depending on which * register of the pair is to be written */ static int npe_physical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regValue, int verify) { int error; /* * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair * (0-16) of physical registers to write . */ error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP, (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP), IX_NPEDL_REG_SIZE_SHORT, 0, verify); if (error == 0) { /* regAddr = 0 or 4 */ regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) * sizeof(uint32_t); error = npe_logical_reg_write(sc, regAddr, regValue, IX_NPEDL_REG_SIZE_WORD, 0, verify); } return error; } static int npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum, uint32_t ctxtReg, uint32_t ctxtRegVal, int verify) { DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n", __func__, ctxtNum, ctxtReg, ctxtRegVal); /* * Context 0 has no STARTPC. Instead, this value is used to set * NextPC for Background ECS, to set where NPE starts executing code */ if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) { /* read BG_CTXT_REG_0, update NEXTPC bits, & write back to reg*/ uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0); v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC; v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) & IX_NPEDL_MASK_ECS_REG_0_NEXTPC; npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v); return 0; } else { static const struct { uint32_t regAddress; uint32_t regSize; } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = { { IX_NPEDL_CTXT_REG_ADDR_STEVT, IX_NPEDL_REG_SIZE_BYTE }, { IX_NPEDL_CTXT_REG_ADDR_STARTPC, IX_NPEDL_REG_SIZE_SHORT }, { IX_NPEDL_CTXT_REG_ADDR_REGMAP, IX_NPEDL_REG_SIZE_SHORT }, { IX_NPEDL_CTXT_REG_ADDR_CINDEX, IX_NPEDL_REG_SIZE_BYTE } }; return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress, ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify); } } /* * NPE Mailbox support. */ #define IX_NPEMH_MAXTRIES 100000 static int ofifo_wait(struct ixpnpe_softc *sc) { int i; for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE) return 1; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n", __func__, npe_reg_read(sc, IX_NPESTAT)); return 0; } static int getmsg(struct ixpnpe_softc *sc, uint32_t msg[2]) { mtx_assert(&sc->sc_mtx, MA_OWNED); if (!ofifo_wait(sc)) return EAGAIN; msg[0] = npe_reg_read(sc, IX_NPEFIFO); DPRINTF(sc->sc_dev, "%s: msg0 0x%x\n", __func__, msg[0]); if (!ofifo_wait(sc)) return EAGAIN; msg[1] = npe_reg_read(sc, IX_NPEFIFO); DPRINTF(sc->sc_dev, "%s: msg1 0x%x\n", __func__, msg[1]); return 0; } static void ixpnpe_intr(void *arg) { struct ixpnpe_softc *sc = arg; uint32_t status; mtx_lock(&sc->sc_mtx); status = npe_reg_read(sc, IX_NPESTAT); DPRINTF(sc->sc_dev, "%s: status 0x%x\n", __func__, status); if ((status & IX_NPESTAT_OFINT) == 0) { /* NB: should not happen */ device_printf(sc->sc_dev, "%s: status 0x%x\n", __func__, status); /* XXX must silence interrupt? */ mtx_unlock(&sc->sc_mtx); return; } /* * A message is waiting in the output FIFO, copy it so * the interrupt will be silenced. */ if (getmsg(sc, sc->sc_msg) == 0) sc->sc_msgwaiting = 1; mtx_unlock(&sc->sc_mtx); } static int ififo_wait(struct ixpnpe_softc *sc) { int i; for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF) return 1; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n", __func__, npe_reg_read(sc, IX_NPESTAT)); return 0; } static int putmsg(struct ixpnpe_softc *sc, const uint32_t msg[2]) { mtx_assert(&sc->sc_mtx, MA_OWNED); DPRINTF(sc->sc_dev, "%s: msg 0x%x:0x%x\n", __func__, msg[0], msg[1]); if (!ififo_wait(sc)) return EIO; npe_reg_write(sc, IX_NPEFIFO, msg[0]); if (!ififo_wait(sc)) return EIO; npe_reg_write(sc, IX_NPEFIFO, msg[1]); return 0; } /* * Send a msg to the NPE and wait for a reply. We spin as * we may be called early with interrupts not properly setup. */ int ixpnpe_sendandrecvmsg_sync(struct ixpnpe_softc *sc, const uint32_t send[2], uint32_t recv[2]) { int error; mtx_lock(&sc->sc_mtx); error = putmsg(sc, send); if (error == 0) error = getmsg(sc, recv); mtx_unlock(&sc->sc_mtx); return error; } /* * Send a msg to the NPE w/o waiting for a reply. */ int ixpnpe_sendmsg_async(struct ixpnpe_softc *sc, const uint32_t msg[2]) { int error; mtx_lock(&sc->sc_mtx); error = putmsg(sc, msg); mtx_unlock(&sc->sc_mtx); return error; } static int recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2]) { mtx_assert(&sc->sc_mtx, MA_OWNED); DPRINTF(sc->sc_dev, "%s: msgwaiting %d\n", __func__, sc->sc_msgwaiting); if (sc->sc_msgwaiting) { msg[0] = sc->sc_msg[0]; msg[1] = sc->sc_msg[1]; sc->sc_msgwaiting = 0; return 0; } return EAGAIN; } /* * Receive any msg previously received from the NPE. If nothing * is available we return EAGAIN and the caller is required to * do a synchronous receive or try again later. */ int ixpnpe_recvmsg_async(struct ixpnpe_softc *sc, uint32_t msg[2]) { int error; mtx_lock(&sc->sc_mtx); error = recvmsg_locked(sc, msg); mtx_unlock(&sc->sc_mtx); return error; } /* * Receive a msg from the NPE. If one was received asynchronously * then it's returned; otherwise we poll synchronously. */ int ixpnpe_recvmsg_sync(struct ixpnpe_softc *sc, uint32_t msg[2]) { int error; mtx_lock(&sc->sc_mtx); error = recvmsg_locked(sc, msg); if (error == EAGAIN) error = getmsg(sc, msg); mtx_unlock(&sc->sc_mtx); return error; } Index: head/sys/arm/xscale/ixp425/ixp425_qmgr.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_qmgr.c (revision 306901) +++ head/sys/arm/xscale/ixp425/ixp425_qmgr.c (revision 306902) @@ -1,1104 +1,1102 @@ /*- * Copyright (c) 2006 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /*- * Copyright (c) 2001-2005, Intel Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Intel XScale Queue Manager support. * * Each IXP4XXX device has a hardware block that implements a priority * queue manager that is shared between the XScale cpu and the backend * devices (such as the NPE). Queues are accessed by reading/writing * special memory locations. The queue contents are mapped into a shared * SRAM region with entries managed in a circular buffer. The XScale * processor can receive interrupts based on queue contents (a condition * code determines when interrupts should be delivered). * * The code here basically replaces the qmgr class in the Intel Access * Library (IAL). */ #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include #include /* * State per AQM hw queue. * This structure holds q configuration and dispatch state. */ struct qmgrInfo { int qSizeInWords; /* queue size in words */ uint32_t qOflowStatBitMask; /* overflow status mask */ int qWriteCount; /* queue write count */ bus_size_t qAccRegAddr; /* access register */ bus_size_t qUOStatRegAddr; /* status register */ bus_size_t qConfigRegAddr; /* config register */ int qSizeInEntries; /* queue size in entries */ uint32_t qUflowStatBitMask; /* underflow status mask */ int qReadCount; /* queue read count */ /* XXX union */ uint32_t qStatRegAddr; uint32_t qStatBitsOffset; uint32_t qStat0BitMask; uint32_t qStat1BitMask; uint32_t intRegCheckMask; /* interrupt reg check mask */ void (*cb)(int, void *); /* callback function */ void *cbarg; /* callback argument */ int priority; /* dispatch priority */ #if 0 /* NB: needed only for A0 parts */ u_int statusWordOffset; /* status word offset */ uint32_t statusMask; /* status mask */ uint32_t statusCheckValue; /* status check value */ #endif }; struct ixpqmgr_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; struct resource *sc_irq1; /* IRQ resource */ void *sc_ih1; /* interrupt handler */ int sc_rid1; /* resource id for irq */ struct resource *sc_irq2; void *sc_ih2; int sc_rid2; struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES]; /* * This array contains a list of queue identifiers ordered by * priority. The table is split logically between queue * identifiers 0-31 and 32-63. To optimize lookups bit masks * are kept for the first-32 and last-32 q's. When the * table needs to be rebuilt mark rebuildTable and it'll * happen after the next interrupt. */ int priorityTable[IX_QMGR_MAX_NUM_QUEUES]; uint32_t lowPriorityTableFirstHalfMask; uint32_t uppPriorityTableFirstHalfMask; int rebuildTable; /* rebuild priorityTable */ uint32_t aqmFreeSramAddress; /* SRAM free space */ }; static int qmgr_debug; SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RWTUN, &qmgr_debug, 0, "IXP4XX Q-Manager debug msgs"); #define DPRINTF(dev, fmt, ...) do { \ if (qmgr_debug) printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFn(n, dev, fmt, ...) do { \ if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \ } while (0) static struct ixpqmgr_softc *ixpqmgr_sc = NULL; static void ixpqmgr_rebuild(struct ixpqmgr_softc *); static void ixpqmgr_intr(void *); static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId); static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId); static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf); static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId); static void aqm_reset(struct ixpqmgr_softc *sc); static void dummyCallback(int qId, void *arg) { /* XXX complain */ } static uint32_t aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off) { DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off); return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); } static void aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val) { DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val); bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); } static int ixpqmgr_probe(device_t dev) { device_set_desc(dev, "IXP4XX Q-Manager"); return 0; } static int ixpqmgr_attach(device_t dev) { struct ixpqmgr_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); int i, err; ixpqmgr_sc = sc; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_get_name(dev)); /* NB: we only use the lower 32 q's */ /* Set up QMGR interrupts */ sc->sc_rid1 = 0; sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1, IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE); sc->sc_rid2 = 1; sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2, IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE); if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL) panic("Unable to allocate the qmgr irqs.\n"); err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixpqmgr_intr, NULL, &sc->sc_ih1); if (err) { device_printf(dev, "failed to set up qmgr irq=%d\n", IXP425_INT_QUE1_32); return (ENXIO); } err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixpqmgr_intr, NULL, &sc->sc_ih2); if (err) { device_printf(dev, "failed to set up qmgr irq=%d\n", IXP425_INT_QUE33_64); return (ENXIO); } /* NB: softc is pre-zero'd */ for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) { struct qmgrInfo *qi = &sc->qinfo[i]; qi->cb = dummyCallback; qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */ /* * There are two interrupt registers, 32 bits each. One * for the lower queues(0-31) and one for the upper * queues(32-63). Therefore need to mod by 32 i.e the * min upper queue identifier. */ qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID))); /* * Register addresses and bit masks are calculated and * stored here to optimize QRead, QWrite and QStatusGet * functions. */ /* AQM Queue access reg addresses, per queue */ qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i); /* AQM Queue lower-group (0-31), only */ if (i < IX_QMGR_MIN_QUEUPP_QID) { /* AQM Q underflow/overflow status reg address, per queue */ qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET + ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) * sizeof(uint32_t)); /* AQM Q underflow status bit masks for status reg per queue */ qi->qUflowStatBitMask = (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) << ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); /* AQM Q overflow status bit masks for status reg, per queue */ qi->qOflowStatBitMask = (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) << ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); /* AQM Q lower-group (0-31) status reg addresses, per queue */ qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET + ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * sizeof(uint32_t)); /* AQM Q lower-group (0-31) status register bit offset */ qi->qStatBitsOffset = (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD); } else { /* AQM Q upper-group (32-63), only */ qi->qUOStatRegAddr = 0; /* XXX */ /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */ qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); /* AQM Q upper-group (32-63) Full status register bitmasks */ qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); } } sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */ ixpqmgr_rebuild(sc); /* build initial priority table */ aqm_reset(sc); /* reset h/w */ return (0); } static int ixpqmgr_detach(device_t dev) { struct ixpqmgr_softc *sc = device_get_softc(dev); aqm_reset(sc); /* disable interrupts */ bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1); bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2); bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE); return (0); } int ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel, qconfig_hand_t *cb, void *cbarg) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n", __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg); /* NB: entry size is always 1 */ qi->qSizeInWords = qEntries; qi->qReadCount = 0; qi->qWriteCount = 0; qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */ if (cb == NULL) { /* Reset to dummy callback */ qi->cb = dummyCallback; qi->cbarg = NULL; } else { qi->cb = cb; qi->cbarg = cbarg; } /* Write the config register; NB must be AFTER qinfo setup */ aqm_qcfg(sc, qId, ne, nf); /* * Account for space just allocated to queue. */ sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t)); /* Set the interrupt source if this queue is in the range 0-31 */ if (qId < IX_QMGR_MIN_QUEUPP_QID) aqm_srcsel_write(sc, qId, srcSel); if (cb != NULL) /* Enable the interrupt */ aqm_int_enable(sc, qId); sc->rebuildTable = TRUE; return 0; /* XXX */ } int ixpqmgr_qwrite(int qId, uint32_t entry) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n", __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries); /* write the entry */ aqm_reg_write(sc, qi->qAccRegAddr, entry); /* NB: overflow is available for lower queues only */ if (qId < IX_QMGR_MIN_QUEUPP_QID) { int qSize = qi->qSizeInEntries; /* * Increment the current number of entries in the queue * and check for overflow . */ if (qi->qWriteCount++ == qSize) { /* check for overflow */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); int qPtrs; /* * Read the status twice because the status may * not be immediately ready after the write operation */ if ((status & qi->qOflowStatBitMask) || ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) { /* * The queue is full, clear the overflow status bit if set. */ aqm_reg_write(sc, qi->qUOStatRegAddr, status & ~qi->qOflowStatBitMask); qi->qWriteCount = qSize; DPRINTFn(5, sc->sc_dev, "%s(%u, 0x%x) Q full, overflow status cleared\n", __func__, qId, entry); return ENOSPC; } /* * No overflow occurred : someone is draining the queue * and the current counter needs to be * updated from the current number of entries in the queue */ /* calculate number of words in q */ qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr); DPRINTFn(2, sc->sc_dev, "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n", __func__, qId, entry, qPtrs); qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f; if (qPtrs == 0) { /* * The queue may be full at the time of the * snapshot. Next access will check * the overflow status again. */ qi->qWriteCount = qSize; } else { /* convert the number of words to a number of entries */ qi->qWriteCount = qPtrs & (qSize - 1); } } } return 0; } int ixpqmgr_qread(int qId, uint32_t *entry) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; bus_size_t off = qi->qAccRegAddr; *entry = aqm_reg_read(sc, off); /* * Reset the current read count : next access to the read function * will force a underflow status check. */ qi->qReadCount = 0; /* Check if underflow occurred on the read */ if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { /* get the queue status */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); if (status & qi->qUflowStatBitMask) { /* clear underflow status */ aqm_reg_write(sc, qi->qUOStatRegAddr, status &~ qi->qUflowStatBitMask); return ENOSPC; } } return 0; } int ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t entry; bus_size_t off = qi->qAccRegAddr; entry = aqm_reg_read(sc, off); while (--n) { if (entry == 0) { /* if we read a NULL entry, stop. We have underflowed */ break; } *p++ = entry; /* store */ entry = aqm_reg_read(sc, off); } *p = entry; /* * Reset the current read count : next access to the read function * will force a underflow status check. */ qi->qReadCount = 0; /* Check if underflow occurred on the read */ if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { /* get the queue status */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); if (status & qi->qUflowStatBitMask) { /* clear underflow status */ aqm_reg_write(sc, qi->qUOStatRegAddr, status &~ qi->qUflowStatBitMask); return ENOSPC; } } return 0; } uint32_t ixpqmgr_getqstatus(int qId) { #define QLOWSTATMASK \ ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1) struct ixpqmgr_softc *sc = ixpqmgr_sc; const struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t status; if (qId < IX_QMGR_MIN_QUEUPP_QID) { /* read the status of a queue in the range 0-31 */ status = aqm_reg_read(sc, qi->qStatRegAddr); /* mask out the status bits relevant only to this queue */ status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK; } else { /* read status of a queue in the range 32-63 */ status = 0; if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask) status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */ if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask) status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */ } return status; #undef QLOWSTATMASK } uint32_t ixpqmgr_getqconfig(int qId) { struct ixpqmgr_softc *sc = ixpqmgr_sc; return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); } void ixpqmgr_dump(void) { struct ixpqmgr_softc *sc = ixpqmgr_sc; int i, a; /* status registers */ printf("0x%04x: %08x %08x %08x %08x\n" , 0x400 , aqm_reg_read(sc, 0x400) , aqm_reg_read(sc, 0x400+4) , aqm_reg_read(sc, 0x400+8) , aqm_reg_read(sc, 0x400+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x410 , aqm_reg_read(sc, 0x410) , aqm_reg_read(sc, 0x410+4) , aqm_reg_read(sc, 0x410+8) , aqm_reg_read(sc, 0x410+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x420 , aqm_reg_read(sc, 0x420) , aqm_reg_read(sc, 0x420+4) , aqm_reg_read(sc, 0x420+8) , aqm_reg_read(sc, 0x420+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x430 , aqm_reg_read(sc, 0x430) , aqm_reg_read(sc, 0x430+4) , aqm_reg_read(sc, 0x430+8) , aqm_reg_read(sc, 0x430+12) ); /* q configuration registers */ for (a = 0x2000; a < 0x20ff; a += 32) printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" , a , aqm_reg_read(sc, a) , aqm_reg_read(sc, a+4) , aqm_reg_read(sc, a+8) , aqm_reg_read(sc, a+12) , aqm_reg_read(sc, a+16) , aqm_reg_read(sc, a+20) , aqm_reg_read(sc, a+24) , aqm_reg_read(sc, a+28) ); /* allocated SRAM */ for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) { a = 0x2000 + i; printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" , a , aqm_reg_read(sc, a) , aqm_reg_read(sc, a+4) , aqm_reg_read(sc, a+8) , aqm_reg_read(sc, a+12) , aqm_reg_read(sc, a+16) , aqm_reg_read(sc, a+20) , aqm_reg_read(sc, a+24) , aqm_reg_read(sc, a+28) ); } for (i = 0; i < 16; i++) { printf("Q[%2d] config 0x%08x status 0x%02x " "Q[%2d] config 0x%08x status 0x%02x\n" , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i) , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16) ); } } void ixpqmgr_notify_enable(int qId, int srcSel) { struct ixpqmgr_softc *sc = ixpqmgr_sc; #if 0 /* Calculate the checkMask and checkValue for this q */ aqm_calc_statuscheck(sc, qId, srcSel); #endif /* Set the interrupt source if this queue is in the range 0-31 */ if (qId < IX_QMGR_MIN_QUEUPP_QID) aqm_srcsel_write(sc, qId, srcSel); /* Enable the interrupt */ aqm_int_enable(sc, qId); } void ixpqmgr_notify_disable(int qId) { struct ixpqmgr_softc *sc = ixpqmgr_sc; aqm_int_disable(sc, qId); } /* * Rebuild the priority table used by the dispatcher. */ static void ixpqmgr_rebuild(struct ixpqmgr_softc *sc) { int q, pri; int lowQuePriorityTableIndex, uppQuePriorityTableIndex; struct qmgrInfo *qi; sc->lowPriorityTableFirstHalfMask = 0; sc->uppPriorityTableFirstHalfMask = 0; lowQuePriorityTableIndex = 0; uppQuePriorityTableIndex = 32; for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) { /* low priority q's */ for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) { qi = &sc->qinfo[q]; if (qi->priority == pri) { /* * Build the priority table bitmask which match the * queues of the first half of the priority table. */ if (lowQuePriorityTableIndex < 16) { sc->lowPriorityTableFirstHalfMask |= qi->intRegCheckMask; } sc->priorityTable[lowQuePriorityTableIndex++] = q; } } /* high priority q's */ for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) { qi = &sc->qinfo[q]; if (qi->priority == pri) { /* * Build the priority table bitmask which match the * queues of the first half of the priority table . */ if (uppQuePriorityTableIndex < 48) { sc->uppPriorityTableFirstHalfMask |= qi->intRegCheckMask; } sc->priorityTable[uppQuePriorityTableIndex++] = q; } } } sc->rebuildTable = FALSE; } /* * Count the number of leading zero bits in a word, * and return the same value than the CLZ instruction. * Note this is similar to the standard ffs function but * it counts zero's from the MSB instead of the LSB. * * word (in) return value (out) * 0x80000000 0 * 0x40000000 1 * ,,, ,,, * 0x00000002 30 * 0x00000001 31 * 0x00000000 32 * * The C version of this function is used as a replacement * for system not providing the equivalent of the CLZ * assembly language instruction. * * Note that this version is big-endian */ static unsigned int _lzcount(uint32_t word) { unsigned int lzcount = 0; if (word == 0) return 32; while ((word & 0x80000000) == 0) { word <<= 1; lzcount++; } return lzcount; } static void ixpqmgr_intr(void *arg) { struct ixpqmgr_softc *sc = ixpqmgr_sc; uint32_t intRegVal; /* Interrupt reg val */ struct qmgrInfo *qi; int priorityTableIndex; /* Priority table index */ int qIndex; /* Current queue being processed */ /* Read the interrupt register */ intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET); /* Write back to clear interrupt */ aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal); DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n", __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET)); /* No queue has interrupt register set */ if (intRegVal != 0) { /* get the first queue Id from the interrupt register value */ qIndex = (32 - 1) - _lzcount(intRegVal); DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n", __func__, intRegVal, qIndex); /* * Optimize for single callback case. */ qi = &sc->qinfo[qIndex]; if (intRegVal == qi->intRegCheckMask) { /* * Only 1 queue event triggered a notification. * Call the callback function for this queue */ qi->cb(qIndex, qi->cbarg); } else { /* * The event is triggered by more than 1 queue, * the queue search will start from the beginning * or the middle of the priority table. * * The search will end when all the bits of the interrupt * register are cleared. There is no need to maintain * a separate value and test it at each iteration. */ if (intRegVal & sc->lowPriorityTableFirstHalfMask) { priorityTableIndex = 0; } else { priorityTableIndex = 16; } /* * Iterate over the priority table until all the bits * of the interrupt register are cleared. */ do { qIndex = sc->priorityTable[priorityTableIndex++]; qi = &sc->qinfo[qIndex]; /* If this queue caused this interrupt to be raised */ if (intRegVal & qi->intRegCheckMask) { /* Call the callback function for this queue */ qi->cb(qIndex, qi->cbarg); /* Clear the interrupt register bit */ intRegVal &= ~qi->intRegCheckMask; } } while (intRegVal); } } /* Rebuild the priority table if needed */ if (sc->rebuildTable) ixpqmgr_rebuild(sc); } #if 0 /* * Generate the parameters used to check if a Q's status matches * the specified source select. We calculate which status word * to check (statusWordOffset), the value to check the status * against (statusCheckValue) and the mask (statusMask) to mask * out all but the bits to check in the status word. */ static void aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel) { struct qmgrInfo *qi = &qinfo[qId]; uint32_t shiftVal; if (qId < IX_QMGR_MIN_QUEUPP_QID) { switch (srcSel) { case IX_QMGR_Q_SOURCE_ID_E: qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NE: qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NF: qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_F: qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_E: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_NE: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_NF: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_F: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; break; default: /* Should never hit */ IX_OSAL_ASSERT(0); break; } /* One nibble of status per queue so need to shift the * check value and mask out to the correct position. */ shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * IX_QMGR_QUELOWSTAT_BITS_PER_Q; /* Calculate the which status word to check from the qId, * 8 Qs status per word */ qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD; qi->statusCheckValue <<= shiftVal; qi->statusMask <<= shiftVal; } else { /* One status word */ qi->statusWordOffset = 0; /* Single bits per queue and int source bit hardwired NE, * Qs start at 32. */ qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID); qi->statusCheckValue = qi->statusMask; } } #endif static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId) { bus_size_t reg; uint32_t v; if (qId < IX_QMGR_MIN_QUEUPP_QID) reg = IX_QMGR_QUEIEREG0_OFFSET; else reg = IX_QMGR_QUEIEREG1_OFFSET; v = aqm_reg_read(sc, reg); aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", __func__, qId, reg, v, aqm_reg_read(sc, reg)); } static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId) { bus_size_t reg; uint32_t v; if (qId < IX_QMGR_MIN_QUEUPP_QID) reg = IX_QMGR_QUEIEREG0_OFFSET; else reg = IX_QMGR_QUEIEREG1_OFFSET; v = aqm_reg_read(sc, reg); aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", __func__, qId, reg, v, aqm_reg_read(sc, reg)); } static unsigned log2(unsigned n) { unsigned count; /* * N.B. this function will return 0 if supplied 0. */ for (count = 0; n/2; count++) n /= 2; return count; } static __inline unsigned toAqmEntrySize(int entrySize) { /* entrySize 1("00"),2("01"),4("10") */ return log2(entrySize); } static __inline unsigned toAqmBufferSize(unsigned bufferSizeInWords) { /* bufferSize 16("00"),32("01),64("10"),128("11") */ return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE); } static __inline unsigned toAqmWatermark(int watermark) { /* * Watermarks 0("000"),1("001"),2("010"),4("011"), * 8("100"),16("101"),32("110"),64("111") */ return log2(2 * watermark); } static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf) { const struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t qCfg; uint32_t baseAddress; /* Build config register */ qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) << IX_QMGR_Q_CONFIG_ESIZE_OFFSET) | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) << IX_QMGR_Q_CONFIG_BSIZE_OFFSET); /* baseAddress, calculated relative to start address */ baseAddress = sc->aqmFreeSramAddress; /* base address must be word-aligned */ KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0, ("address not word-aligned")); /* Now convert to a 16 word pointer as required by QUECONFIG register */ baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT; qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET; /* set watermarks */ qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET) | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET); DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n", __func__, qId, ne, nf, aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)), qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg); } static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId) { bus_size_t off; uint32_t v; /* * Calculate the register offset; multiple queues split across registers */ off = IX_QMGR_INT0SRCSELREG0_OFFSET + ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t)); v = aqm_reg_read(sc, off); if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) { /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */ v |= 0x7; } else { const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD; uint32_t mask; int qshift; qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq; mask = ((1 << bpq) - 1) << qshift; /* q's status mask */ /* merge sourceId */ v = (v &~ mask) | ((sourceId << qshift) & mask); } DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n", __func__, qId, sourceId, aqm_reg_read(sc, off), v, off); aqm_reg_write(sc, off, v); } /* * Reset AQM registers to default values. */ static void aqm_reset(struct ixpqmgr_softc *sc) { int i; /* Reset queues 0..31 status registers 0..3 */ aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); /* Reset underflow/overflow status registers 0..1 */ aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET, IX_QMGR_QUEUOSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET, IX_QMGR_QUEUOSTAT_RESET_VALUE); /* Reset queues 32..63 nearly empty status registers */ aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET, IX_QMGR_QUEUPPSTAT0_RESET_VALUE); /* Reset queues 32..63 full status registers */ aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET, IX_QMGR_QUEUPPSTAT1_RESET_VALUE); /* Reset int0 status flag source select registers 0..3 */ aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); /* Reset queue interrupt enable register 0..1 */ aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET, IX_QMGR_QUEIEREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET, IX_QMGR_QUEIEREG_RESET_VALUE); /* Reset queue interrupt register 0..1 */ aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); /* Reset queue configuration words 0..63 */ for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr, IX_QMGR_QUECONFIG_RESET_VALUE); /* XXX zero SRAM to simplify debugging */ for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET; i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t)) aqm_reg_write(sc, i, 0); } static device_method_t ixpqmgr_methods[] = { DEVMETHOD(device_probe, ixpqmgr_probe), DEVMETHOD(device_attach, ixpqmgr_attach), DEVMETHOD(device_detach, ixpqmgr_detach), { 0, 0 } }; static driver_t ixpqmgr_driver = { "ixpqmgr", ixpqmgr_methods, sizeof(struct ixpqmgr_softc), }; static devclass_t ixpqmgr_devclass; DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0); Index: head/sys/arm/xscale/ixp425/ixp425_timer.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_timer.c (revision 306901) +++ head/sys/arm/xscale/ixp425/ixp425_timer.c (revision 306902) @@ -1,269 +1,268 @@ /* $NetBSD: ixp425_timer.c,v 1.11 2006/04/10 03:36:03 simonb Exp $ */ /* * Copyright (c) 2003 * Ichiro FUKUHARA . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Ichiro FUKUHARA. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include static uint32_t counts_per_hz; /* callback functions for intr_functions */ int ixpclk_intr(void *); struct ixpclk_softc { device_t sc_dev; bus_addr_t sc_baseaddr; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; }; static unsigned ixp425_timer_get_timecount(struct timecounter *tc); #ifndef IXP425_CLOCK_FREQ #define COUNTS_PER_SEC 66666600 /* 66MHz */ #else #define COUNTS_PER_SEC IXP425_CLOCK_FREQ #endif #define COUNTS_PER_USEC ((COUNTS_PER_SEC / 1000000) + 1) static struct ixpclk_softc *ixpclk_sc = NULL; #define GET_TS_VALUE(sc) (*(volatile u_int32_t *) \ (IXP425_TIMER_VBASE + IXP425_OST_TS)) static struct timecounter ixp425_timer_timecounter = { ixp425_timer_get_timecount, /* get_timecount */ NULL, /* no poll_pps */ ~0u, /* counter_mask */ COUNTS_PER_SEC, /* frequency */ "IXP4XX Timer", /* name */ 1000, /* quality */ }; static int ixpclk_probe(device_t dev) { device_set_desc(dev, "IXP4XX Timer"); return (0); } static int ixpclk_attach(device_t dev) { struct ixpclk_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); ixpclk_sc = sc; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; sc->sc_baseaddr = IXP425_TIMER_HWBASE; if (bus_space_map(sc->sc_iot, sc->sc_baseaddr, 8, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_get_name(dev)); return (0); } static device_method_t ixpclk_methods[] = { DEVMETHOD(device_probe, ixpclk_probe), DEVMETHOD(device_attach, ixpclk_attach), {0, 0}, }; static driver_t ixpclk_driver = { "ixpclk", ixpclk_methods, sizeof(struct ixpclk_softc), }; static devclass_t ixpclk_devclass; DRIVER_MODULE(ixpclk, ixp, ixpclk_driver, ixpclk_devclass, 0, 0); static unsigned ixp425_timer_get_timecount(struct timecounter *tc) { uint32_t ret; ret = GET_TS_VALUE(sc); return (ret); } /* * cpu_initclocks: * * Initialize the clock and get them going. */ void cpu_initclocks(void) { struct ixpclk_softc* sc = ixpclk_sc; struct resource *irq; device_t dev = sc->sc_dev; u_int oldirqstate; int rid = 0; void *ihl; if (hz < 50 || COUNTS_PER_SEC % hz) { printf("Cannot get %d Hz clock; using 100 Hz\n", hz); hz = 100; } tick = 1000000 / hz; /* number of microseconds between interrupts */ /* * We only have one timer available; stathz and profhz are * always left as 0 (the upper-layer clock code deals with * this situation). */ if (stathz != 0) printf("Cannot get %d Hz statclock\n", stathz); stathz = 0; if (profhz != 0) printf("Cannot get %d Hz profclock\n", profhz); profhz = 0; /* Report the clock frequency. */ oldirqstate = disable_interrupts(PSR_I); irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, IXP425_INT_TMR0, IXP425_INT_TMR0, 1, RF_ACTIVE); if (!irq) panic("Unable to setup the clock irq handler.\n"); else bus_setup_intr(dev, irq, INTR_TYPE_CLK, ixpclk_intr, NULL, NULL, &ihl); /* Set up the new clock parameters. */ /* clear interrupt */ bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, OST_WARM_RESET | OST_WDOG_INT | OST_TS_INT | OST_TIM1_INT | OST_TIM0_INT); counts_per_hz = COUNTS_PER_SEC / hz; /* reload value & Timer enable */ bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_TIM0_RELOAD, (counts_per_hz & TIMERRELOAD_MASK) | OST_TIMER_EN); tc_init(&ixp425_timer_timecounter); restore_interrupts(oldirqstate); rid = 0; } /* * DELAY: * * Delay for at least N microseconds. */ void DELAY(int n) { u_int32_t first, last; int usecs; if (n == 0) return; /* * Clamp the timeout at a maximum value (about 32 seconds with * a 66MHz clock). *Nobody* should be delay()ing for anywhere * near that length of time and if they are, they should be hung * out to dry. */ if (n >= (0x80000000U / COUNTS_PER_USEC)) usecs = (0x80000000U / COUNTS_PER_USEC) - 1; else usecs = n * COUNTS_PER_USEC; /* Note: Timestamp timer counts *up*, unlike the other timers */ first = GET_TS_VALUE(); while (usecs > 0) { last = GET_TS_VALUE(); usecs -= (int)(last - first); first = last; } } /* * ixpclk_intr: * * Handle the hardclock interrupt. */ int ixpclk_intr(void *arg) { struct ixpclk_softc* sc = ixpclk_sc; struct trapframe *frame = arg; bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, OST_TIM0_INT); hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); } void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } Index: head/sys/arm/xscale/ixp425/ixp425_wdog.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_wdog.c (revision 306901) +++ head/sys/arm/xscale/ixp425/ixp425_wdog.c (revision 306902) @@ -1,117 +1,115 @@ /*- * Copyright (c) 2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IXP4XX Watchdog Timer Support. */ #include #include #include #include #include #include #include #include #include #include -#include -#include #include #include #include #include struct ixpwdog_softc { device_t sc_dev; }; static __inline uint32_t RD4(struct ixpwdog_softc *sc, bus_size_t off) { return bus_space_read_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, off); } static __inline void WR4(struct ixpwdog_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, off, val); } static void ixp425_watchdog(void *arg, u_int cmd, int *error) { struct ixpwdog_softc *sc = arg; u_int u = cmd & WD_INTERVAL; WR4(sc, IXP425_OST_WDOG_KEY, OST_WDOG_KEY_MAJICK); if (4 <= u && u <= 35) { WR4(sc, IXP425_OST_WDOG_ENAB, 0); /* approximate 66.66MHz cycles */ WR4(sc, IXP425_OST_WDOG, 2<<(u - 4)); /* NB: reset on timer expiration */ WR4(sc, IXP425_OST_WDOG_ENAB, OST_WDOG_ENAB_CNT_ENA | OST_WDOG_ENAB_RST_ENA); *error = 0; } else { /* disable watchdog */ WR4(sc, IXP425_OST_WDOG_ENAB, 0); } WR4(sc, IXP425_OST_WDOG_KEY, 0); } static int ixpwdog_probe(device_t dev) { device_set_desc(dev, "IXP4XX Watchdog Timer"); return (0); } static int ixpwdog_attach(device_t dev) { struct ixpwdog_softc *sc = device_get_softc(dev); sc->sc_dev = dev; EVENTHANDLER_REGISTER(watchdog_list, ixp425_watchdog, sc, 0); return (0); } static device_method_t ixpwdog_methods[] = { DEVMETHOD(device_probe, ixpwdog_probe), DEVMETHOD(device_attach, ixpwdog_attach), {0, 0}, }; static driver_t ixpwdog_driver = { "ixpwdog", ixpwdog_methods, sizeof(struct ixpwdog_softc), }; static devclass_t ixpwdog_devclass; DRIVER_MODULE(ixpwdog, ixp, ixpwdog_driver, ixpwdog_devclass, 0, 0);