diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c index 366a27b30be8..b4c5b8d0e3ea 100644 --- a/sys/arm/arm/intr.c +++ b/sys/arm/arm/intr.c @@ -1,204 +1,204 @@ /* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2004 Olivier Houchard. * Copyright (c) 1994-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Soft interrupt and other generic interrupt functions. */ #include "opt_platform.h" #include "opt_hwpmc_hooks.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #define INTRNAME_LEN (MAXCOMLEN + 1) typedef void (*mask_fn)(void *); static struct intr_event *intr_events[NIRQ]; void intr_irq_handler(struct trapframe *); void (*arm_post_filter)(void *) = NULL; int (*arm_config_irq)(int irq, enum intr_trigger trig, enum intr_polarity pol) = NULL; /* Data for statistics reporting. */ u_long intrcnt[NIRQ]; char intrnames[(NIRQ * INTRNAME_LEN) + 1]; size_t sintrcnt = sizeof(intrcnt); size_t sintrnames = sizeof(intrnames); /* * Pre-format intrnames into an array of fixed-size strings containing spaces. * This allows us to avoid the need for an intermediate table of indices into * the names and counts arrays, while still meeting the requirements and * assumptions of vmstat(8) and the kdb "show intrcnt" command, the two * consumers of this data. */ static void intr_init(void *unused) { int i; for (i = 0; i < NIRQ; ++i) { snprintf(&intrnames[i * INTRNAME_LEN], INTRNAME_LEN, "%-*s", INTRNAME_LEN - 1, ""); } } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); #ifdef FDT int intr_fdt_map_irq(phandle_t iparent, pcell_t *intr, int icells) { fdt_pic_decode_t intr_decode; phandle_t intr_parent; int i, rv, interrupt, trig, pol; intr_parent = OF_node_from_xref(iparent); for (i = 0; i < icells; i++) intr[i] = cpu_to_fdt32(intr[i]); for (i = 0; fdt_pic_table[i] != NULL; i++) { intr_decode = fdt_pic_table[i]; rv = intr_decode(intr_parent, intr, &interrupt, &trig, &pol); if (rv == 0) { /* This was recognized as our PIC and decoded. */ interrupt = FDT_MAP_IRQ(intr_parent, interrupt); return (interrupt); } } /* Not in table, so guess */ interrupt = FDT_MAP_IRQ(intr_parent, fdt32_to_cpu(intr[0])); return (interrupt); } #endif void arm_setup_irqhandler(const char *name, driver_filter_t *filt, void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; if (irq < 0 || irq >= NIRQ) return; event = intr_events[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, (mask_fn)arm_mask_irq, (mask_fn)arm_unmask_irq, arm_post_filter, NULL, "intr%d:", irq); if (error) return; intr_events[irq] = event; snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN, "irq%d: %-*s", irq, INTRNAME_LEN - 1, name); } intr_event_add_handler(event, name, filt, hand, arg, intr_priority(flags), flags, cookiep); } int arm_remove_irqhandler(int irq, void *cookie) { struct intr_event *event; int error; event = intr_events[irq]; arm_mask_irq(irq); error = intr_event_remove_handler(cookie); - if (!TAILQ_EMPTY(&event->ie_handlers)) + if (!CK_SLIST_EMPTY(&event->ie_handlers)) arm_unmask_irq(irq); return (error); } void dosoftints(void); void dosoftints(void) { } void intr_irq_handler(struct trapframe *frame) { struct intr_event *event; int i; VM_CNT_INC(v_intr); i = -1; while ((i = arm_get_next_irq(i)) != -1) { intrcnt[i]++; event = intr_events[i]; if (intr_event_handle(event, frame) != 0) { /* XXX: Log stray IRQs */ arm_mask_irq(i); } } #ifdef HWPMC_HOOKS if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, frame); #endif } diff --git a/sys/arm/mv/gpio.c b/sys/arm/mv/gpio.c index 2ba6b0e78903..80155932deeb 100644 --- a/sys/arm/mv/gpio.c +++ b/sys/arm/mv/gpio.c @@ -1,1188 +1,1189 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Benno Rice. * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2017 Semihalf. * All rights reserved. * * Adapted and extended for Marvell SoCs by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_gpio.c, rev 1 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #define GPIO_MAX_INTR_COUNT 8 #define GPIO_PINS_PER_REG 32 #define GPIO_GENERIC_CAP (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \ GPIO_PIN_OPENDRAIN | GPIO_PIN_PUSHPULL | \ GPIO_PIN_TRISTATE | GPIO_PIN_PULLUP | \ GPIO_PIN_PULLDOWN | GPIO_PIN_INVIN | \ GPIO_PIN_INVOUT) #define DEBOUNCE_CHECK_MS 1 #define DEBOUNCE_LO_HI_MS 2 #define DEBOUNCE_HI_LO_MS 2 #define DEBOUNCE_CHECK_TICKS ((hz / 1000) * DEBOUNCE_CHECK_MS) struct mv_gpio_softc { device_t sc_busdev; struct resource * mem_res; int mem_rid; struct resource * irq_res[GPIO_MAX_INTR_COUNT]; int irq_rid[GPIO_MAX_INTR_COUNT]; struct intr_event * gpio_events[MV_GPIO_MAX_NPINS]; void *ih_cookie[GPIO_MAX_INTR_COUNT]; bus_space_tag_t bst; bus_space_handle_t bsh; struct mtx mutex; uint8_t pin_num; /* number of GPIO pins */ uint8_t irq_num; /* number of real IRQs occupied by GPIO controller */ struct gpio_pin gpio_setup[MV_GPIO_MAX_NPINS]; /* Used for debouncing. */ uint32_t debounced_state_lo; uint32_t debounced_state_hi; struct callout **debounce_callouts; int *debounce_counters; }; struct mv_gpio_pindev { device_t dev; int pin; }; static int mv_gpio_probe(device_t); static int mv_gpio_attach(device_t); static int mv_gpio_intr(device_t, void *); static void mv_gpio_double_edge_init(device_t, int); static int mv_gpio_debounce_setup(device_t, int); static int mv_gpio_debounce_prepare(device_t, int); static int mv_gpio_debounce_init(device_t, int); static void mv_gpio_debounce_start(device_t, int); static void mv_gpio_debounce(void *); static void mv_gpio_debounced_state_set(device_t, int, uint8_t); static uint32_t mv_gpio_debounced_state_get(device_t, int); static void mv_gpio_exec_intr_handlers(device_t, uint32_t, int); static void mv_gpio_intr_handler(device_t, int); static uint32_t mv_gpio_reg_read(device_t, uint32_t); static void mv_gpio_reg_write(device_t, uint32_t, uint32_t); static void mv_gpio_reg_set(device_t, uint32_t, uint32_t); static void mv_gpio_reg_clear(device_t, uint32_t, uint32_t); static void mv_gpio_blink(device_t, uint32_t, uint8_t); static void mv_gpio_polarity(device_t, uint32_t, uint8_t, uint8_t); static void mv_gpio_level(device_t, uint32_t, uint8_t); static void mv_gpio_edge(device_t, uint32_t, uint8_t); static void mv_gpio_out_en(device_t, uint32_t, uint8_t); static void mv_gpio_int_ack(struct mv_gpio_pindev *); static void mv_gpio_value_set(device_t, uint32_t, uint8_t); static uint32_t mv_gpio_value_get(device_t, uint32_t, uint8_t); static void mv_gpio_intr_mask(struct mv_gpio_pindev *); static void mv_gpio_intr_unmask(struct mv_gpio_pindev *); void mv_gpio_finish_intrhandler(struct mv_gpio_pindev *); int mv_gpio_setup_intrhandler(device_t, const char *, driver_filter_t *, void (*)(void *), void *, int, int, void **); int mv_gpio_configure(device_t, uint32_t, uint32_t, uint32_t); void mv_gpio_out(device_t, uint32_t, uint8_t, uint8_t); uint8_t mv_gpio_in(device_t, uint32_t); /* * GPIO interface */ static device_t mv_gpio_get_bus(device_t); static int mv_gpio_pin_max(device_t, int *); static int mv_gpio_pin_getcaps(device_t, uint32_t, uint32_t *); static int mv_gpio_pin_getflags(device_t, uint32_t, uint32_t *); static int mv_gpio_pin_getname(device_t, uint32_t, char *); static int mv_gpio_pin_setflags(device_t, uint32_t, uint32_t); static int mv_gpio_pin_set(device_t, uint32_t, unsigned int); static int mv_gpio_pin_get(device_t, uint32_t, unsigned int *); static int mv_gpio_pin_toggle(device_t, uint32_t); static int mv_gpio_map_gpios(device_t, phandle_t, phandle_t, int, pcell_t *, uint32_t *, uint32_t *); #define MV_GPIO_LOCK() mtx_lock_spin(&sc->mutex) #define MV_GPIO_UNLOCK() mtx_unlock_spin(&sc->mutex) #define MV_GPIO_ASSERT_LOCKED() mtx_assert(&sc->mutex, MA_OWNED) static device_method_t mv_gpio_methods[] = { DEVMETHOD(device_probe, mv_gpio_probe), DEVMETHOD(device_attach, mv_gpio_attach), /* GPIO protocol */ DEVMETHOD(gpio_get_bus, mv_gpio_get_bus), DEVMETHOD(gpio_pin_max, mv_gpio_pin_max), DEVMETHOD(gpio_pin_getname, mv_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, mv_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, mv_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, mv_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, mv_gpio_pin_get), DEVMETHOD(gpio_pin_set, mv_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, mv_gpio_pin_toggle), DEVMETHOD(gpio_map_gpios, mv_gpio_map_gpios), DEVMETHOD_END }; static driver_t mv_gpio_driver = { "gpio", mv_gpio_methods, sizeof(struct mv_gpio_softc), }; static devclass_t mv_gpio_devclass; DRIVER_MODULE(mv_gpio, simplebus, mv_gpio_driver, mv_gpio_devclass, 0, 0); struct ofw_compat_data gpio_controllers[] = { { "mrvl,gpio", (uintptr_t)true }, { "marvell,orion-gpio", (uintptr_t)true }, { NULL, 0 } }; static int mv_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, gpio_controllers)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Marvell Integrated GPIO Controller"); return (0); } static int mv_gpio_attach(device_t dev) { int i, size; struct mv_gpio_softc *sc; pcell_t pincnt = 0; pcell_t irq_cells = 0; phandle_t iparent; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (sc == NULL) return (ENXIO); if (OF_getencprop(ofw_bus_get_node(dev), "pin-count", &pincnt, sizeof(pcell_t)) >= 0 || OF_getencprop(ofw_bus_get_node(dev), "ngpios", &pincnt, sizeof(pcell_t)) >= 0) { sc->pin_num = MIN(pincnt, MV_GPIO_MAX_NPINS); if (bootverbose) device_printf(dev, "%d pins available\n", sc->pin_num); } else { device_printf(dev, "ERROR: no pin-count or ngpios entry found!\n"); return (ENXIO); } /* Assign generic capabilities to every gpio pin */ for(i = 0; i < sc->pin_num; i++) sc->gpio_setup[i].gp_caps = GPIO_GENERIC_CAP; /* Find root interrupt controller */ iparent = ofw_bus_find_iparent(ofw_bus_get_node(dev)); if (iparent == 0) { device_printf(dev, "No interrupt-parrent found. " "Error in DTB\n"); return (ENXIO); } else { /* While at parent - store interrupt cells prop */ if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &irq_cells, sizeof(irq_cells)) == -1) { device_printf(dev, "DTB: Missing #interrupt-cells " "property in interrupt parent node\n"); return (ENXIO); } } size = OF_getproplen(ofw_bus_get_node(dev), "interrupts"); if (size != -1) { size = size / sizeof(pcell_t); size = size / irq_cells; sc->irq_num = size; device_printf(dev, "%d IRQs available\n", sc->irq_num); } else { device_printf(dev, "ERROR: no interrupts entry found!\n"); return (ENXIO); } sc->debounce_callouts = (struct callout **)malloc(sc->pin_num * sizeof(struct callout *), M_DEVBUF, M_WAITOK | M_ZERO); if (sc->debounce_callouts == NULL) return (ENOMEM); sc->debounce_counters = (int *)malloc(sc->pin_num * sizeof(int), M_DEVBUF, M_WAITOK); if (sc->debounce_counters == NULL) return (ENOMEM); mtx_init(&sc->mutex, device_get_nameunit(dev), NULL, MTX_SPIN); sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (!sc->mem_res) { mtx_destroy(&sc->mutex); device_printf(dev, "could not allocate memory window\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); for (i = 0; i < sc->irq_num; i++) { sc->irq_rid[i] = i; sc->irq_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[i], RF_ACTIVE); if (!sc->irq_res[i]) { mtx_destroy(&sc->mutex); device_printf(dev, "could not allocate gpio%d interrupt\n", i+1); return (ENXIO); } } /* Disable all interrupts */ bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_EDGE_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_LEV_MASK, 0); for (i = 0; i < sc->irq_num; i++) { if (bus_setup_intr(dev, sc->irq_res[i], INTR_TYPE_MISC, (driver_filter_t *)mv_gpio_intr, NULL, sc, &sc->ih_cookie[i]) != 0) { mtx_destroy(&sc->mutex); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[i], sc->irq_res[i]); device_printf(dev, "could not set up intr %d\n", i); return (ENXIO); } } /* Clear interrupt status. */ bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_CAUSE, 0); sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) { mtx_destroy(&sc->mutex); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[i], sc->irq_res[i]); return (ENXIO); } return (0); } static int mv_gpio_intr(device_t dev, void *arg) { uint32_t int_cause, gpio_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); /* * According to documentation, edge sensitive interrupts are asserted * when unmasked GPIO_INT_CAUSE register bits are set. */ int_cause = mv_gpio_reg_read(dev, GPIO_INT_CAUSE); int_cause &= mv_gpio_reg_read(dev, GPIO_INT_EDGE_MASK); /* * Level sensitive interrupts are asserted when unmasked GPIO_DATA_IN * register bits are set. */ gpio_val = mv_gpio_reg_read(dev, GPIO_DATA_IN); gpio_val &= mv_gpio_reg_read(dev, GPIO_INT_LEV_MASK); mv_gpio_exec_intr_handlers(dev, int_cause | gpio_val, 0); MV_GPIO_UNLOCK(); return (FILTER_HANDLED); } /* * GPIO interrupt handling */ void mv_gpio_finish_intrhandler(struct mv_gpio_pindev *s) { /* When we acheive full interrupt support * This function will be opposite to * mv_gpio_setup_intrhandler */ /* Now it exists only to remind that * there should be place to free mv_gpio_pindev * allocated by mv_gpio_setup_intrhandler */ free(s, M_DEVBUF); } int mv_gpio_setup_intrhandler(device_t dev, const char *name, driver_filter_t *filt, void (*hand)(void *), void *arg, int pin, int flags, void **cookiep) { struct intr_event *event; int error; struct mv_gpio_pindev *s; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); s = malloc(sizeof(struct mv_gpio_pindev), M_DEVBUF, M_NOWAIT | M_ZERO); if (pin < 0 || pin >= sc->pin_num) return (ENXIO); event = sc->gpio_events[pin]; if (event == NULL) { MV_GPIO_LOCK(); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) { error = mv_gpio_debounce_init(dev, pin); if (error != 0) { MV_GPIO_UNLOCK(); return (error); } } else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) mv_gpio_double_edge_init(dev, pin); MV_GPIO_UNLOCK(); error = intr_event_create(&event, (void *)s, 0, pin, (void (*)(void *))mv_gpio_intr_mask, (void (*)(void *))mv_gpio_intr_unmask, (void (*)(void *))mv_gpio_int_ack, NULL, "gpio%d:", pin); if (error != 0) return (error); sc->gpio_events[pin] = event; } intr_event_add_handler(event, name, filt, hand, arg, intr_priority(flags), flags, cookiep); return (0); } static void mv_gpio_intr_mask(struct mv_gpio_pindev *s) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); if (s->pin >= sc->pin_num) return; MV_GPIO_LOCK(); if (sc->gpio_setup[s->pin].gp_flags & (MV_GPIO_IN_IRQ_EDGE | MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_edge(s->dev, s->pin, 0); else mv_gpio_level(s->dev, s->pin, 0); /* * The interrupt has to be acknowledged before scheduling an interrupt * thread. This way we allow for interrupt source to trigger again * (which can happen with shared IRQs e.g. PCI) while processing the * current event. */ mv_gpio_int_ack(s); MV_GPIO_UNLOCK(); return; } static void mv_gpio_intr_unmask(struct mv_gpio_pindev *s) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); if (s->pin >= sc->pin_num) return; MV_GPIO_LOCK(); if (sc->gpio_setup[s->pin].gp_flags & (MV_GPIO_IN_IRQ_EDGE | MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_edge(s->dev, s->pin, 1); else mv_gpio_level(s->dev, s->pin, 1); MV_GPIO_UNLOCK(); return; } static void mv_gpio_exec_intr_handlers(device_t dev, uint32_t status, int high) { int i, pin; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); i = 0; while (status != 0) { if (status & 1) { pin = (high ? (i + GPIO_PINS_PER_REG) : i); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) mv_gpio_debounce_start(dev, pin); else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) { mv_gpio_polarity(dev, pin, 0, 1); mv_gpio_intr_handler(dev, pin); } else mv_gpio_intr_handler(dev, pin); } status >>= 1; i++; } } static void mv_gpio_intr_handler(device_t dev, int pin) { #ifdef INTRNG struct intr_irqsrc isrc; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); #ifdef INTR_SOLO isrc.isrc_filter = NULL; #endif isrc.isrc_event = sc->gpio_events[pin]; - if (isrc.isrc_event == NULL || TAILQ_EMPTY(&isrc.isrc_event->ie_handlers)) + if (isrc.isrc_event == NULL || + CK_SLIST_EMPTY(&isrc.isrc_event->ie_handlers)) return; intr_isrc_dispatch(&isrc, NULL); #endif } int mv_gpio_configure(device_t dev, uint32_t pin, uint32_t flags, uint32_t mask) { int error; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); error = 0; if (pin >= sc->pin_num) return (EINVAL); /* check flags consistency */ if (((flags & mask) & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) return (EINVAL); if (mask & MV_GPIO_IN_DEBOUNCE) { error = mv_gpio_debounce_prepare(dev, pin); if (error != 0) return (error); } MV_GPIO_LOCK(); if ((mask & flags) & GPIO_PIN_INPUT) mv_gpio_out_en(dev, pin, 0); if ((mask & flags) & GPIO_PIN_OUTPUT) { if ((flags & mask) & GPIO_PIN_OPENDRAIN) mv_gpio_value_set(dev, pin, 0); else mv_gpio_value_set(dev, pin, 1); mv_gpio_out_en(dev, pin, 1); } if (mask & MV_GPIO_OUT_BLINK) mv_gpio_blink(dev, pin, flags & MV_GPIO_OUT_BLINK); if (mask & MV_GPIO_IN_POL_LOW) mv_gpio_polarity(dev, pin, flags & MV_GPIO_IN_POL_LOW, 0); if (mask & MV_GPIO_IN_DEBOUNCE) { error = mv_gpio_debounce_setup(dev, pin); if (error) { MV_GPIO_UNLOCK(); return (error); } } sc->gpio_setup[pin].gp_flags &= ~(mask); sc->gpio_setup[pin].gp_flags |= (flags & mask); MV_GPIO_UNLOCK(); return (0); } static void mv_gpio_double_edge_init(device_t dev, int pin) { uint8_t raw_read; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); if (raw_read) mv_gpio_polarity(dev, pin, 1, 0); else mv_gpio_polarity(dev, pin, 0, 0); } static int mv_gpio_debounce_setup(device_t dev, int pin) { struct callout *c; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); c = sc->debounce_callouts[pin]; if (c == NULL) return (ENXIO); if (callout_active(c)) callout_deactivate(c); callout_stop(c); return (0); } static int mv_gpio_debounce_prepare(device_t dev, int pin) { struct callout *c; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); c = sc->debounce_callouts[pin]; if (c == NULL) { c = (struct callout *)malloc(sizeof(struct callout), M_DEVBUF, M_WAITOK); sc->debounce_callouts[pin] = c; if (c == NULL) return (ENOMEM); callout_init(c, 1); } return (0); } static int mv_gpio_debounce_init(device_t dev, int pin) { uint8_t raw_read; int *cnt; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); cnt = &sc->debounce_counters[pin]; raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); if (raw_read) { mv_gpio_polarity(dev, pin, 1, 0); *cnt = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; } else { mv_gpio_polarity(dev, pin, 0, 0); *cnt = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; } mv_gpio_debounced_state_set(dev, pin, raw_read); return (0); } static void mv_gpio_debounce_start(device_t dev, int pin) { struct callout *c; struct mv_gpio_pindev s = {dev, pin}; struct mv_gpio_pindev *sd; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); c = sc->debounce_callouts[pin]; if (c == NULL) { mv_gpio_int_ack(&s); return; } if (callout_pending(c) || callout_active(c)) { mv_gpio_int_ack(&s); return; } sd = (struct mv_gpio_pindev *)malloc(sizeof(struct mv_gpio_pindev), M_DEVBUF, M_WAITOK); if (sd == NULL) { mv_gpio_int_ack(&s); return; } sd->pin = pin; sd->dev = dev; callout_reset(c, DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, sd); } static void mv_gpio_debounce(void *arg) { uint8_t raw_read, last_state; int pin; device_t dev; int *debounce_counter; struct mv_gpio_softc *sc; struct mv_gpio_pindev *s; s = (struct mv_gpio_pindev *)arg; dev = s->dev; pin = s->pin; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); raw_read = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); last_state = (mv_gpio_debounced_state_get(dev, pin) ? 1 : 0); debounce_counter = &sc->debounce_counters[pin]; if (raw_read == last_state) { if (last_state) *debounce_counter = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; else *debounce_counter = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; callout_reset(sc->debounce_callouts[pin], DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg); } else { *debounce_counter = *debounce_counter - 1; if (*debounce_counter != 0) callout_reset(sc->debounce_callouts[pin], DEBOUNCE_CHECK_TICKS, mv_gpio_debounce, arg); else { mv_gpio_debounced_state_set(dev, pin, raw_read); if (last_state) *debounce_counter = DEBOUNCE_HI_LO_MS / DEBOUNCE_CHECK_MS; else *debounce_counter = DEBOUNCE_LO_HI_MS / DEBOUNCE_CHECK_MS; if (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) && (raw_read == 0)) || (((sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) == 0) && raw_read) || (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE)) mv_gpio_intr_handler(dev, pin); /* Toggle polarity for next edge. */ mv_gpio_polarity(dev, pin, 0, 1); free(arg, M_DEVBUF); callout_deactivate(sc->debounce_callouts[pin]); } } MV_GPIO_UNLOCK(); } static void mv_gpio_debounced_state_set(device_t dev, int pin, uint8_t new_state) { uint32_t *old_state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= GPIO_PINS_PER_REG) { old_state = &sc->debounced_state_hi; pin -= GPIO_PINS_PER_REG; } else old_state = &sc->debounced_state_lo; if (new_state) *old_state |= (1 << pin); else *old_state &= ~(1 << pin); } static uint32_t mv_gpio_debounced_state_get(device_t dev, int pin) { uint32_t *state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= GPIO_PINS_PER_REG) { state = &sc->debounced_state_hi; pin -= GPIO_PINS_PER_REG; } else state = &sc->debounced_state_lo; return (*state & (1 << pin)); } void mv_gpio_out(device_t dev, uint32_t pin, uint8_t val, uint8_t enable) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_LOCK(); mv_gpio_value_set(dev, pin, val); mv_gpio_out_en(dev, pin, enable); MV_GPIO_UNLOCK(); } uint8_t mv_gpio_in(device_t dev, uint32_t pin) { uint8_t state; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_DEBOUNCE) { if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) state = (mv_gpio_debounced_state_get(dev, pin) ? 0 : 1); else state = (mv_gpio_debounced_state_get(dev, pin) ? 1 : 0); } else if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_IRQ_DOUBLE_EDGE) { if (sc->gpio_setup[pin].gp_flags & MV_GPIO_IN_POL_LOW) state = (mv_gpio_value_get(dev, pin, 1) ? 0 : 1); else state = (mv_gpio_value_get(dev, pin, 1) ? 1 : 0); } else state = (mv_gpio_value_get(dev, pin, 0) ? 1 : 0); return (state); } static uint32_t mv_gpio_reg_read(device_t dev, uint32_t reg) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); return (bus_space_read_4(sc->bst, sc->bsh, reg)); } static void mv_gpio_reg_write(device_t dev, uint32_t reg, uint32_t val) { struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); bus_space_write_4(sc->bst, sc->bsh, reg, val); } static void mv_gpio_reg_set(device_t dev, uint32_t reg, uint32_t pin) { uint32_t reg_val; reg_val = mv_gpio_reg_read(dev, reg); reg_val |= GPIO(pin); mv_gpio_reg_write(dev, reg, reg_val); } static void mv_gpio_reg_clear(device_t dev, uint32_t reg, uint32_t pin) { uint32_t reg_val; reg_val = mv_gpio_reg_read(dev, reg); reg_val &= ~(GPIO(pin)); mv_gpio_reg_write(dev, reg, reg_val); } static void mv_gpio_out_en(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_DATA_OUT_EN_CTRL; if (enable) mv_gpio_reg_clear(dev, reg, pin); else mv_gpio_reg_set(dev, reg, pin); } static void mv_gpio_blink(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_BLINK_EN; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_polarity(device_t dev, uint32_t pin, uint8_t enable, uint8_t toggle) { uint32_t reg, reg_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_DATA_IN_POLAR; if (toggle) { reg_val = mv_gpio_reg_read(dev, reg) & GPIO(pin); if (reg_val) mv_gpio_reg_clear(dev, reg, pin); else mv_gpio_reg_set(dev, reg, pin); } else if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_level(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_INT_LEV_MASK; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_edge(device_t dev, uint32_t pin, uint8_t enable) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return; reg = GPIO_INT_EDGE_MASK; if (enable) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } static void mv_gpio_int_ack(struct mv_gpio_pindev *s) { uint32_t reg, pin; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(s->dev); pin = s->pin; if (pin >= sc->pin_num) return; reg = GPIO_INT_CAUSE; mv_gpio_reg_clear(s->dev, reg, pin); } static uint32_t mv_gpio_value_get(device_t dev, uint32_t pin, uint8_t exclude_polar) { uint32_t reg, polar_reg, reg_val, polar_reg_val; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (pin >= sc->pin_num) return (0); reg = GPIO_DATA_IN; polar_reg = GPIO_DATA_IN_POLAR; reg_val = mv_gpio_reg_read(dev, reg); if (exclude_polar) { polar_reg_val = mv_gpio_reg_read(dev, polar_reg); return ((reg_val & GPIO(pin)) ^ (polar_reg_val & GPIO(pin))); } else return (reg_val & GPIO(pin)); } static void mv_gpio_value_set(device_t dev, uint32_t pin, uint8_t val) { uint32_t reg; struct mv_gpio_softc *sc; sc = (struct mv_gpio_softc *)device_get_softc(dev); MV_GPIO_ASSERT_LOCKED(); if (pin >= sc->pin_num) return; reg = GPIO_DATA_OUT; if (val) mv_gpio_reg_set(dev, reg, pin); else mv_gpio_reg_clear(dev, reg, pin); } /* * GPIO interface methods */ static int mv_gpio_pin_max(device_t dev, int *maxpin) { struct mv_gpio_softc *sc; if (maxpin == NULL) return (EINVAL); sc = device_get_softc(dev); *maxpin = sc->pin_num; return (0); } static int mv_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct mv_gpio_softc *sc = device_get_softc(dev); if (caps == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *caps = sc->gpio_setup[pin].gp_caps; MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct mv_gpio_softc *sc = device_get_softc(dev); if (flags == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *flags = sc->gpio_setup[pin].gp_flags; MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct mv_gpio_softc *sc = device_get_softc(dev); if (name == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); memcpy(name, sc->gpio_setup[pin].gp_name, GPIOMAXNAME); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { int ret; struct mv_gpio_softc *sc = device_get_softc(dev); if (pin >= sc->pin_num) return (EINVAL); /* Check for unwanted flags. */ if ((flags & sc->gpio_setup[pin].gp_caps) != flags) return (EINVAL); ret = mv_gpio_configure(dev, pin, flags, ~0); return (ret); } static int mv_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct mv_gpio_softc *sc = device_get_softc(dev); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); mv_gpio_value_set(dev, pin, value); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *value) { struct mv_gpio_softc *sc = device_get_softc(dev); if (value == NULL) return (EINVAL); if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); *value = mv_gpio_in(dev, pin); MV_GPIO_UNLOCK(); return (0); } static int mv_gpio_pin_toggle(device_t dev, uint32_t pin) { struct mv_gpio_softc *sc = device_get_softc(dev); uint32_t value; if (pin >= sc->pin_num) return (EINVAL); MV_GPIO_LOCK(); value = mv_gpio_in(dev, pin); value = (~value) & 1; mv_gpio_value_set(dev, pin, value); MV_GPIO_UNLOCK(); return (0); } static device_t mv_gpio_get_bus(device_t dev) { struct mv_gpio_softc *sc = device_get_softc(dev); return (sc->sc_busdev); } static int mv_gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin, uint32_t *flags) { struct mv_gpio_softc *sc = device_get_softc(bus); if (gpios[0] >= sc->pin_num) return (EINVAL); *pin = gpios[0]; *flags = gpios[1]; mv_gpio_configure(bus, *pin, *flags, ~0); return (0); } diff --git a/sys/mips/atheros/apb.c b/sys/mips/atheros/apb.c index 5098af3abbbb..37819fac2e9e 100644 --- a/sys/mips/atheros/apb.c +++ b/sys/mips/atheros/apb.c @@ -1,539 +1,539 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define APB_INTR_PMC 5 #undef APB_DEBUG #ifdef APB_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* APB_DEBUG */ #define DEVTOAPB(dev) ((struct apb_ivar *) device_get_ivars(dev)) static int apb_activate_resource(device_t, device_t, int, int, struct resource *); static device_t apb_add_child(device_t, u_int, const char *, int); static struct resource * apb_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int apb_attach(device_t); static int apb_deactivate_resource(device_t, device_t, int, int, struct resource *); static struct resource_list * apb_get_resource_list(device_t, device_t); static void apb_hinted_child(device_t, const char *, int); static int apb_filter(void *); static int apb_probe(device_t); static int apb_release_resource(device_t, device_t, int, int, struct resource *); static int apb_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int apb_teardown_intr(device_t, device_t, struct resource *, void *); static void apb_mask_irq(void *source) { unsigned int irq = (unsigned int)source; uint32_t reg; reg = ATH_READ_REG(AR71XX_MISC_INTR_MASK); ATH_WRITE_REG(AR71XX_MISC_INTR_MASK, reg & ~(1 << irq)); } static void apb_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; reg = ATH_READ_REG(AR71XX_MISC_INTR_MASK); ATH_WRITE_REG(AR71XX_MISC_INTR_MASK, reg | (1 << irq)); } static int apb_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int apb_attach(device_t dev) { struct apb_softc *sc = device_get_softc(dev); int rid = 0; device_set_desc(dev, "APB Bus bridge"); sc->apb_mem_rman.rm_type = RMAN_ARRAY; sc->apb_mem_rman.rm_descr = "APB memory window"; if (rman_init(&sc->apb_mem_rman) != 0 || rman_manage_region(&sc->apb_mem_rman, AR71XX_APB_BASE, AR71XX_APB_BASE + AR71XX_APB_SIZE - 1) != 0) panic("apb_attach: failed to set up memory rman"); sc->apb_irq_rman.rm_type = RMAN_ARRAY; sc->apb_irq_rman.rm_descr = "APB IRQ"; if (rman_init(&sc->apb_irq_rman) != 0 || rman_manage_region(&sc->apb_irq_rman, APB_IRQ_BASE, APB_IRQ_END) != 0) panic("apb_attach: failed to set up IRQ rman"); if ((sc->sc_misc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_misc_irq, INTR_TYPE_MISC, apb_filter, NULL, sc, &sc->sc_misc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); /* * Unmask performance counter IRQ */ apb_unmask_irq((void*)APB_INTR_PMC); sc->sc_intr_counter[APB_INTR_PMC] = mips_intrcnt_create("apb irq5: pmc"); return (0); } static struct resource * apb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct apb_softc *sc = device_get_softc(bus); struct apb_ivar *ivar = device_get_ivars(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int isdefault, needactivate, passthrough; isdefault = (RMAN_IS_DEFAULT_RANGE(start, end)); needactivate = flags & RF_ACTIVE; /* * Pass memory requests to nexus device */ passthrough = (device_get_parent(child) != bus); rle = NULL; dprintf("%s: entry (%p, %p, %d, %d, %p, %p, %jd, %d)\n", __func__, bus, child, type, *rid, (void *)(intptr_t)start, (void *)(intptr_t)end, count, flags); if (passthrough) return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); /* * If this is an allocation of the "default" range for a given RID, * and we know what the resources for this device are (ie. they aren't * maintained by a child bus), then work out the start/end values. */ if (isdefault) { rle = resource_list_find(&ivar->resources, type, *rid); if (rle == NULL) { return (NULL); } if (rle->res != NULL) { panic("%s: resource entry is busy", __func__); } start = rle->start; end = rle->end; count = rle->count; dprintf("%s: default resource (%p, %p, %ld)\n", __func__, (void *)(intptr_t)start, (void *)(intptr_t)end, count); } switch (type) { case SYS_RES_IRQ: rm = &sc->apb_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->apb_mem_rman; break; default: printf("%s: unknown resource type %d\n", __func__, type); return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) { printf("%s: could not reserve resource\n", __func__); return (0); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { printf("%s: could not activate resource\n", __func__); rman_release_resource(rv); return (0); } } return (rv); } static int apb_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* XXX: should we mask/unmask IRQ here? */ return (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); } static int apb_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* XXX: should we mask/unmask IRQ here? */ return (BUS_DEACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); } static int apb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list *rl; struct resource_list_entry *rle; rl = apb_get_resource_list(dev, child); if (rl == NULL) return (EINVAL); rle = resource_list_find(rl, type, rid); if (rle == NULL) return (EINVAL); rman_release_resource(r); rle->res = NULL; return (0); } static int apb_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct apb_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > APB_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, apb_mask_irq, apb_unmask_irq, NULL, NULL, "apb intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); apb_unmask_irq((void*)irq); return (0); } static int apb_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct apb_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > APB_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); apb_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int apb_filter(void *arg) { struct apb_softc *sc = arg; struct intr_event *event; uint32_t reg, irq; struct thread *td; struct trapframe *tf; reg = ATH_READ_REG(AR71XX_MISC_INTR_STATUS); for (irq = 0; irq < APB_NIRQS; irq++) { if (reg & (1 << irq)) { switch (ar71xx_soc) { case AR71XX_SOC_AR7240: case AR71XX_SOC_AR7241: case AR71XX_SOC_AR7242: case AR71XX_SOC_AR9330: case AR71XX_SOC_AR9331: case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: /* ACK/clear the given interrupt */ ATH_WRITE_REG(AR71XX_MISC_INTR_STATUS, (1 << irq)); break; default: /* fallthrough */ break; } event = sc->sc_eventstab[irq]; /* always count interrupts; spurious or otherwise */ mips_intrcnt_inc(sc->sc_intr_counter[irq]); - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { if (irq == APB_INTR_PMC) { td = PCPU_GET(curthread); tf = td->td_intr_frame; if (pmc_intr) (*pmc_intr)(tf); continue; } /* Ignore timer interrupts */ if (irq != 0 && irq != 8 && irq != 9 && irq != 10) printf("Stray APB IRQ %d\n", irq); continue; } intr_event_handle(event, PCPU_GET(curthread)->td_intr_frame); } } return (FILTER_HANDLED); } static void apb_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; long maddr; int msize; int irq; int result; int mem_hints_count; child = BUS_ADD_CHILD(bus, 0, dname, dunit); /* * Set hard-wired resources for hinted child using * specific RIDs. */ mem_hints_count = 0; if (resource_long_value(dname, dunit, "maddr", &maddr) == 0) mem_hints_count++; if (resource_int_value(dname, dunit, "msize", &msize) == 0) mem_hints_count++; /* check if all info for mem resource has been provided */ if ((mem_hints_count > 0) && (mem_hints_count < 2)) { printf("Either maddr or msize hint is missing for %s%d\n", dname, dunit); } else if (mem_hints_count) { result = bus_set_resource(child, SYS_RES_MEMORY, 0, maddr, msize); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { result = bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static device_t apb_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct apb_ivar *ivar; ivar = malloc(sizeof(struct apb_ivar), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&ivar->resources); child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { printf("Can't add child %s%d ordered\n", name, unit); return (0); } device_set_ivars(child, ivar); return (child); } /* * Helper routine for bus_generic_rl_get_resource/bus_generic_rl_set_resource * Provides pointer to resource_list for these routines */ static struct resource_list * apb_get_resource_list(device_t dev, device_t child) { struct apb_ivar *ivar; ivar = device_get_ivars(child); return (&(ivar->resources)); } static int apb_print_all_resources(device_t dev) { struct apb_ivar *ndev = DEVTOAPB(dev); struct resource_list *rl = &ndev->resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); return (retval); } static int apb_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += apb_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static device_method_t apb_methods[] = { DEVMETHOD(bus_activate_resource, apb_activate_resource), DEVMETHOD(bus_add_child, apb_add_child), DEVMETHOD(bus_alloc_resource, apb_alloc_resource), DEVMETHOD(bus_deactivate_resource, apb_deactivate_resource), DEVMETHOD(bus_get_resource_list, apb_get_resource_list), DEVMETHOD(bus_hinted_child, apb_hinted_child), DEVMETHOD(bus_release_resource, apb_release_resource), DEVMETHOD(bus_setup_intr, apb_setup_intr), DEVMETHOD(bus_teardown_intr, apb_teardown_intr), DEVMETHOD(device_attach, apb_attach), DEVMETHOD(device_probe, apb_probe), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_print_child, apb_print_child), DEVMETHOD_END }; static driver_t apb_driver = { "apb", apb_methods, sizeof(struct apb_softc), }; static devclass_t apb_devclass; DRIVER_MODULE(apb, nexus, apb_driver, apb_devclass, 0, 0); diff --git a/sys/mips/atheros/ar531x/apb.c b/sys/mips/atheros/ar531x/apb.c index cbf6776c6408..6e224a5e9305 100644 --- a/sys/mips/atheros/ar531x/apb.c +++ b/sys/mips/atheros/ar531x/apb.c @@ -1,756 +1,756 @@ /*- * Copyright (c) 2016, Hiroki Mori * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include "opt_ar531x.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include #else #include #endif #ifdef INTRNG #include "pic_if.h" #define PIC_INTR_ISRC(sc, irq) (&(sc)->pic_irqs[(irq)].isrc) #endif #include #include #include #include #ifdef AR531X_APB_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* AR531X_APB_DEBUG */ static int apb_activate_resource(device_t, device_t, int, int, struct resource *); static device_t apb_add_child(device_t, u_int, const char *, int); static struct resource * apb_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int apb_attach(device_t); static int apb_deactivate_resource(device_t, device_t, int, int, struct resource *); static struct resource_list * apb_get_resource_list(device_t, device_t); static void apb_hinted_child(device_t, const char *, int); static int apb_filter(void *); static int apb_probe(device_t); static int apb_release_resource(device_t, device_t, int, int, struct resource *); #ifndef INTRNG static int apb_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int apb_teardown_intr(device_t, device_t, struct resource *, void *); #endif static void apb_mask_irq(void *source) { unsigned int irq = (unsigned int)source; uint32_t reg; if(ar531x_soc >= AR531X_SOC_AR5315) { reg = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTMASK); ATH_WRITE_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTMASK, reg & ~(1 << irq)); } else { reg = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTMASK); ATH_WRITE_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTMASK, reg & ~(1 << irq)); } } static void apb_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; if(ar531x_soc >= AR531X_SOC_AR5315) { reg = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTMASK); ATH_WRITE_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTMASK, reg | (1 << irq)); } else { reg = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTMASK); ATH_WRITE_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTMASK, reg | (1 << irq)); } } #ifdef INTRNG static int apb_pic_register_isrcs(struct apb_softc *sc) { int error; uint32_t irq; struct intr_irqsrc *isrc; const char *name; name = device_get_nameunit(sc->apb_dev); for (irq = 0; irq < APB_NIRQS; irq++) { sc->pic_irqs[irq].irq = irq; isrc = PIC_INTR_ISRC(sc, irq); error = intr_isrc_register(isrc, sc->apb_dev, 0, "%s", name); if (error != 0) { /* XXX call intr_isrc_deregister */ device_printf(sc->apb_dev, "%s failed", __func__); return (error); } } return (0); } static inline intptr_t pic_xref(device_t dev) { return (0); } #endif static int apb_probe(device_t dev) { #ifdef INTRNG device_set_desc(dev, "APB Bus bridge INTRNG"); #else device_set_desc(dev, "APB Bus bridge"); #endif return (0); } static int apb_attach(device_t dev) { struct apb_softc *sc = device_get_softc(dev); #ifdef INTRNG intptr_t xref = pic_xref(dev); int miscirq; #else int rid = 0; #endif sc->apb_dev = dev; sc->apb_mem_rman.rm_type = RMAN_ARRAY; sc->apb_mem_rman.rm_descr = "APB memory window"; if(ar531x_soc >= AR531X_SOC_AR5315) { if (rman_init(&sc->apb_mem_rman) != 0 || rman_manage_region(&sc->apb_mem_rman, AR5315_APB_BASE, AR5315_APB_BASE + AR5315_APB_SIZE - 1) != 0) panic("apb_attach: failed to set up memory rman"); } else { if (rman_init(&sc->apb_mem_rman) != 0 || rman_manage_region(&sc->apb_mem_rman, AR5312_APB_BASE, AR5312_APB_BASE + AR5312_APB_SIZE - 1) != 0) panic("apb_attach: failed to set up memory rman"); } sc->apb_irq_rman.rm_type = RMAN_ARRAY; sc->apb_irq_rman.rm_descr = "APB IRQ"; if (rman_init(&sc->apb_irq_rman) != 0 || rman_manage_region(&sc->apb_irq_rman, APB_IRQ_BASE, APB_IRQ_END) != 0) panic("apb_attach: failed to set up IRQ rman"); #ifndef INTRNG if ((sc->sc_misc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_misc_irq, INTR_TYPE_MISC, apb_filter, NULL, sc, &sc->sc_misc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } #else /* Register the interrupts */ if (apb_pic_register_isrcs(sc) != 0) { device_printf(dev, "could not register PIC ISRCs\n"); return (ENXIO); } /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); return (ENXIO); } if(ar531x_soc >= AR531X_SOC_AR5315) { miscirq = AR5315_CPU_IRQ_MISC; } else { miscirq = AR5312_IRQ_MISC; } cpu_establish_hardintr("aric", apb_filter, NULL, sc, miscirq, INTR_TYPE_MISC, NULL); #endif /* mask all misc interrupt */ if(ar531x_soc >= AR531X_SOC_AR5315) { ATH_WRITE_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTMASK, 0); } else { ATH_WRITE_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTMASK, 0); } bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static struct resource * apb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct apb_softc *sc = device_get_softc(bus); struct apb_ivar *ivar = device_get_ivars(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int isdefault, needactivate, passthrough; isdefault = (RMAN_IS_DEFAULT_RANGE(start, end)); needactivate = flags & RF_ACTIVE; /* * Pass memory requests to nexus device */ passthrough = (device_get_parent(child) != bus); rle = NULL; dprintf("%s: entry (%p, %p, %d, %d, %p, %p, %jd, %d)\n", __func__, bus, child, type, *rid, (void *)(intptr_t)start, (void *)(intptr_t)end, count, flags); if (passthrough) return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); /* * If this is an allocation of the "default" range for a given RID, * and we know what the resources for this device are (ie. they aren't * maintained by a child bus), then work out the start/end values. */ if (isdefault) { rle = resource_list_find(&ivar->resources, type, *rid); if (rle == NULL) { return (NULL); } if (rle->res != NULL) { panic("%s: resource entry is busy", __func__); } start = rle->start; end = rle->end; count = rle->count; dprintf("%s: default resource (%p, %p, %jd)\n", __func__, (void *)(intptr_t)start, (void *)(intptr_t)end, count); } switch (type) { case SYS_RES_IRQ: rm = &sc->apb_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->apb_mem_rman; break; default: printf("%s: unknown resource type %d\n", __func__, type); return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) { printf("%s: could not reserve resource %d\n", __func__, type); return (0); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { printf("%s: could not activate resource\n", __func__); rman_release_resource(rv); return (0); } } return (rv); } static int apb_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* XXX: should we mask/unmask IRQ here? */ return (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); } static int apb_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* XXX: should we mask/unmask IRQ here? */ return (BUS_DEACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); } static int apb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list *rl; struct resource_list_entry *rle; rl = apb_get_resource_list(dev, child); if (rl == NULL) return (EINVAL); rle = resource_list_find(rl, type, rid); if (rle == NULL) return (EINVAL); rman_release_resource(r); rle->res = NULL; return (0); } static int apb_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct apb_softc *sc = device_get_softc(bus); int error; int irq; #ifndef INTRNG struct intr_event *event; #endif #ifdef INTRNG struct intr_irqsrc *isrc; const char *name; if ((rman_get_flags(ires) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; irq = rman_get_start(ires); isrc = PIC_INTR_ISRC(sc, irq); if(isrc->isrc_event == 0) { error = intr_event_create(&isrc->isrc_event, (void *)irq, 0, irq, apb_mask_irq, apb_unmask_irq, NULL, NULL, "apb intr%d:", irq); if(error != 0) return(error); } name = device_get_nameunit(child); error = intr_event_add_handler(isrc->isrc_event, name, filt, handler, arg, intr_priority(flags), flags, cookiep); return(error); #else irq = rman_get_start(ires); if (irq > APB_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, apb_mask_irq, apb_unmask_irq, NULL, NULL, "apb intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); apb_unmask_irq((void*)irq); return (0); #endif } #ifndef INTRNG static int apb_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { #ifdef INTRNG return (intr_teardown_irq(child, ires, cookie)); #else struct apb_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > APB_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); apb_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); #endif } static int apb_filter(void *arg) { struct apb_softc *sc = arg; struct intr_event *event; uint32_t reg, irq; if(ar531x_soc >= AR531X_SOC_AR5315) reg = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT); else reg = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT); for (irq = 0; irq < APB_NIRQS; irq++) { if (reg & (1 << irq)) { if(ar531x_soc >= AR531X_SOC_AR5315) { ATH_WRITE_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT, reg & ~(1 << irq)); } else { ATH_WRITE_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT, reg & ~(1 << irq)); } event = sc->sc_eventstab[irq]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { if(irq == 1 && ar531x_soc < AR531X_SOC_AR5315) { ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBPERR); ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBDMAE); } /* Ignore non handle interrupts */ if (irq != 0 && irq != 6) printf("Stray APB IRQ %d\n", irq); continue; } intr_event_handle(event, PCPU_GET(curthread)->td_intr_frame); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } } return (FILTER_HANDLED); } #else static int apb_filter(void *arg) { struct apb_softc *sc = arg; struct thread *td; uint32_t i, intr; td = curthread; /* Workaround: do not inflate intr nesting level */ td->td_intr_nesting_level--; if(ar531x_soc >= AR531X_SOC_AR5315) intr = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT); else intr = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT); while ((i = fls(intr)) != 0) { i--; intr &= ~(1u << i); if(i == 1 && ar531x_soc < AR531X_SOC_AR5315) { ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBPERR); ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBDMAE); } if (intr_isrc_dispatch(PIC_INTR_ISRC(sc, i), curthread->td_intr_frame) != 0) { device_printf(sc->apb_dev, "Stray interrupt %u detected\n", i); apb_mask_irq((void*)i); continue; } } KASSERT(i == 0, ("all interrupts handled")); td->td_intr_nesting_level++; return (FILTER_HANDLED); } #endif static void apb_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; long maddr; int msize; int irq; int result; int mem_hints_count; child = BUS_ADD_CHILD(bus, 0, dname, dunit); /* * Set hard-wired resources for hinted child using * specific RIDs. */ mem_hints_count = 0; if (resource_long_value(dname, dunit, "maddr", &maddr) == 0) mem_hints_count++; if (resource_int_value(dname, dunit, "msize", &msize) == 0) mem_hints_count++; /* check if all info for mem resource has been provided */ if ((mem_hints_count > 0) && (mem_hints_count < 2)) { printf("Either maddr or msize hint is missing for %s%d\n", dname, dunit); } else if (mem_hints_count) { result = bus_set_resource(child, SYS_RES_MEMORY, 0, maddr, msize); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { result = bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static device_t apb_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct apb_ivar *ivar; ivar = malloc(sizeof(struct apb_ivar), M_DEVBUF, M_WAITOK | M_ZERO); if (ivar == NULL) { printf("Failed to allocate ivar\n"); return (0); } resource_list_init(&ivar->resources); child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { printf("Can't add child %s%d ordered\n", name, unit); return (0); } device_set_ivars(child, ivar); return (child); } /* * Helper routine for bus_generic_rl_get_resource/bus_generic_rl_set_resource * Provides pointer to resource_list for these routines */ static struct resource_list * apb_get_resource_list(device_t dev, device_t child) { struct apb_ivar *ivar; ivar = device_get_ivars(child); return (&(ivar->resources)); } #ifdef INTRNG static void apb_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct apb_pic_irqsrc *)isrc)->irq; apb_unmask_irq((void*)irq); } static void apb_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct apb_pic_irqsrc *)isrc)->irq; apb_mask_irq((void*)irq); } static void apb_pic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { apb_pic_disable_intr(dev, isrc); } static void apb_pic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { apb_pic_enable_intr(dev, isrc); } static void apb_pic_post_filter(device_t dev, struct intr_irqsrc *isrc) { uint32_t reg, irq; irq = ((struct apb_pic_irqsrc *)isrc)->irq; if(ar531x_soc >= AR531X_SOC_AR5315) { reg = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT); ATH_WRITE_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT, reg & ~(1 << irq)); } else { reg = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT); ATH_WRITE_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT, reg & ~(1 << irq)); } } static int apb_pic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { return (ENOTSUP); } #endif static device_method_t apb_methods[] = { DEVMETHOD(bus_activate_resource, apb_activate_resource), DEVMETHOD(bus_add_child, apb_add_child), DEVMETHOD(bus_alloc_resource, apb_alloc_resource), DEVMETHOD(bus_deactivate_resource, apb_deactivate_resource), DEVMETHOD(bus_get_resource_list, apb_get_resource_list), DEVMETHOD(bus_hinted_child, apb_hinted_child), DEVMETHOD(bus_release_resource, apb_release_resource), DEVMETHOD(device_attach, apb_attach), DEVMETHOD(device_probe, apb_probe), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), #ifdef INTRNG DEVMETHOD(pic_disable_intr, apb_pic_disable_intr), DEVMETHOD(pic_enable_intr, apb_pic_enable_intr), DEVMETHOD(pic_map_intr, apb_pic_map_intr), DEVMETHOD(pic_post_filter, apb_pic_post_filter), DEVMETHOD(pic_post_ithread, apb_pic_post_ithread), DEVMETHOD(pic_pre_ithread, apb_pic_pre_ithread), // DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), #else DEVMETHOD(bus_teardown_intr, apb_teardown_intr), #endif DEVMETHOD(bus_setup_intr, apb_setup_intr), DEVMETHOD_END }; static driver_t apb_driver = { "apb", apb_methods, sizeof(struct apb_softc), }; static devclass_t apb_devclass; EARLY_DRIVER_MODULE(apb, nexus, apb_driver, apb_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/mips/atheros/ar71xx_pci.c b/sys/mips/atheros/ar71xx_pci.c index 88e0f60093a8..9ff605acb971 100644 --- a/sys/mips/atheros/ar71xx_pci.c +++ b/sys/mips/atheros/ar71xx_pci.c @@ -1,707 +1,707 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR71XX_PCI_DEBUG #ifdef AR71XX_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct mtx ar71xx_pci_mtx; MTX_SYSINIT(ar71xx_pci_mtx, &ar71xx_pci_mtx, "ar71xx PCI space mutex", MTX_SPIN); struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; int sc_baseslot; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar71xx_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar71xx_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar71xx_pci_intr(void *); static void ar71xx_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg & ~(1 << irq)); } static void ar71xx_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg | (1 << irq)); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); } /* * get bitmask for bytes of interest: * 0 - we want this byte, 1 - ignore it. e.g: we read 1 byte * from register 7. Bitmask would be: 0111 */ static uint32_t ar71xx_get_bytes_to_read(int reg, int bytes) { uint32_t bytes_to_read = 0; if ((bytes % 4) == 0) bytes_to_read = 0; else if ((bytes % 4) == 1) bytes_to_read = (~(1 << (reg % 4))) & 0xf; else if ((bytes % 4) == 2) bytes_to_read = (~(3 << (reg % 4))) & 0xf; else panic("%s: wrong combination", __func__); return (bytes_to_read); } static int ar71xx_pci_check_bus_error(void) { uint32_t error, addr, has_errors = 0; mtx_assert(&ar71xx_pci_mtx, MA_OWNED); error = ATH_READ_REG(AR71XX_PCI_ERROR) & 0x3; dprintf("%s: PCI error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("PCI bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_ERROR, error); has_errors = 1; } error = ATH_READ_REG(AR71XX_PCI_AHB_ERROR) & 0x1; dprintf("%s: AHB error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_AHB_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("AHB bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_AHB_ERROR, error); has_errors = 1; } return (has_errors); } static uint32_t ar71xx_pci_make_addr(int bus, int slot, int func, int reg) { if (bus == 0) { return ((1 << slot) | (func << 8) | (reg & ~3)); } else { return ((bus << 16) | (slot << 11) | (func << 8) | (reg & ~3) | 1); } } static int ar71xx_pci_conf_setup(int bus, int slot, int func, int reg, int bytes, uint32_t cmd) { uint32_t addr = ar71xx_pci_make_addr(bus, slot, func, (reg & ~3)); mtx_assert(&ar71xx_pci_mtx, MA_OWNED); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 4); ATH_WRITE_REG(AR71XX_PCI_CONF_ADDR, addr); ATH_WRITE_REG(AR71XX_PCI_CONF_CMD, cmd); dprintf("%s: tag (%x, %x, %x) %d/%d addr=%08x, cmd=%08x\n", __func__, bus, slot, func, reg, bytes, addr, cmd); return ar71xx_pci_check_bus_error(); } static uint32_t ar71xx_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data; uint32_t shift, mask; /* register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_READ) == 0) data = ATH_READ_REG(AR71XX_PCI_CONF_READ_DATA); else data = -1; mtx_unlock_spin(&ar71xx_pci_mtx); /* get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar71xx_pci_local_write(device_t dev, uint32_t reg, uint32_t data, int bytes) { uint32_t cmd; dprintf("%s: local write reg %d(%d)\n", __func__, reg, bytes); data = data << (8*(reg % 4)); cmd = PCI_LCONF_CMD_WRITE | (reg & ~3); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 20); mtx_lock_spin(&ar71xx_pci_mtx); ATH_WRITE_REG(AR71XX_PCI_LCONF_CMD, cmd); ATH_WRITE_REG(AR71XX_PCI_LCONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } static void ar71xx_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); data = data << (8*(reg % 4)); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_WRITE) == 0) ATH_WRITE_REG(AR71XX_PCI_CONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } #ifdef AR71XX_ATH_EEPROM /* * Some embedded boards (eg AP94) have the MAC attached via PCI but they * don't have the MAC-attached EEPROM. The register initialisation * values and calibration data are stored in the on-board flash. * This routine initialises the NIC via the EEPROM register contents * before the probe/attach routines get a go at things. */ static void ar71xx_pci_fixup(device_t dev, u_int bus, u_int slot, u_int func, long flash_addr, int len) { uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); uint32_t reg, val, bar0; if (bootverbose) device_printf(dev, "%s: flash_addr=%lx, cal_data=%p\n", __func__, flash_addr, cal_data); /* XXX check 0xa55a */ /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_BAR(0), 4); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" reg: %x, val=%x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), bar0, 4); } static void ar71xx_pci_slot_fixup(device_t dev, u_int bus, u_int slot, u_int func) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), bus, slot, func); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", bus, slot, func); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", bus, slot, func); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, bus, slot, func); ar71xx_pci_fixup(dev, bus, slot, func, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, bus, slot, func, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar71xx_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar71xx_pci_attach(device_t dev) { int rid = 0; struct ar71xx_pci_softc *sc = device_get_softc(dev); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar71xx PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar71xx_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar71xx PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar71xx_pci_attach: failed to set up IRQ rman"); /* * Check if there is a base slot hint. Otherwise use default value. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "baseslot", &sc->sc_baseslot) != 0) { device_printf(dev, "%s: missing hint '%s', default to AR71XX_PCI_BASE_SLOT\n", __func__, "baseslot"); sc->sc_baseslot = AR71XX_PCI_BASE_SLOT; } ATH_WRITE_REG(AR71XX_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar71xx_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* reset PCI core and PCI bus */ ar71xx_device_stop(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); ar71xx_device_start(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); /* Init PCI windows */ ATH_WRITE_REG(AR71XX_PCI_WINDOW0, PCI_WINDOW0_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW1, PCI_WINDOW1_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW2, PCI_WINDOW2_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW3, PCI_WINDOW3_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW4, PCI_WINDOW4_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW5, PCI_WINDOW5_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW6, PCI_WINDOW6_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW7, PCI_WINDOW7_CONF_ADDR); DELAY(100000); mtx_lock_spin(&ar71xx_pci_mtx); ar71xx_pci_check_bus_error(); mtx_unlock_spin(&ar71xx_pci_mtx); /* Fixup internal PCI bridge */ ar71xx_pci_local_write(dev, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 4); #ifdef AR71XX_ATH_EEPROM /* * Hard-code a check for slot 17 and 18 - these are * the two PCI slots which may have a PCI device that * requires "fixing". */ ar71xx_pci_slot_fixup(dev, 0, 17, 0); ar71xx_pci_slot_fixup(dev, 0, 18, 0); #endif /* AR71XX_ATH_EEPROM */ device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar71xx_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar71xx_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar71xx_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar71xx_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar71xx_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar71xx_pci_mask_irq, ar71xx_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar71xx_pci_unmask_irq((void*)irq); return (0); } static int ar71xx_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar71xx_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar71xx_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR71XX_PCI_INTR_STATUS); mask = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; for (irq = AR71XX_PCI_IRQ_START; irq <= AR71XX_PCI_IRQ_END; irq++) { if (reg & (1 << irq)) { event = sc->sc_eventstab[irq]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { /* Ignore timer interrupts */ if (irq != 0) printf("Stray IRQ %d\n", irq); continue; } /* Flush DDR FIFO for PCI/PCIe */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } } return (FILTER_HANDLED); } static int ar71xx_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar71xx_pci_route_interrupt(device_t pcib, device_t device, int pin) { struct ar71xx_pci_softc *sc = device_get_softc(pcib); if (pci_get_slot(device) < sc->sc_baseslot) panic("%s: PCI slot %d is less then AR71XX_PCI_BASE_SLOT", __func__, pci_get_slot(device)); return (pci_get_slot(device) - sc->sc_baseslot); } static device_method_t ar71xx_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar71xx_pci_probe), DEVMETHOD(device_attach, ar71xx_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar71xx_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar71xx_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar71xx_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar71xx_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar71xx_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar71xx_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar71xx_pci_maxslots), DEVMETHOD(pcib_read_config, ar71xx_pci_read_config), DEVMETHOD(pcib_write_config, ar71xx_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar71xx_pci_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; static driver_t ar71xx_pci_driver = { "pcib", ar71xx_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar71xx_pci_devclass; DRIVER_MODULE(ar71xx_pci, nexus, ar71xx_pci_driver, ar71xx_pci_devclass, 0, 0); diff --git a/sys/mips/atheros/ar724x_pci.c b/sys/mips/atheros/ar724x_pci.c index 2cd37a462815..ebb40142ccb3 100644 --- a/sys/mips/atheros/ar724x_pci.c +++ b/sys/mips/atheros/ar724x_pci.c @@ -1,674 +1,674 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar724x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar724x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar724x_pci_intr(void *); static void ar724x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; rmb(); val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); wmb(); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t ar724x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); rmb(); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(AR724X_PCI_CFG_BASE + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar724x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; /* * WAR for BAR issue on AR7240 - We are unable to access the PCI * device space if we set the BAR with proper base address. * * However, we _do_ want to allow programming in the probe value * (0xffffffff) so the PCI code can find out how big the memory * map is for this device. Without it, it'll think the memory * map is 32 bits wide, the PCI code will then end up thinking * the register window is '0' and fail to allocate resources. * * Note: Test on AR7241/AR7242/AR9344! Those use a WAR value of * 0x1000ffff. */ if (reg == PCIR_BAR(0) && bytes == 4 && ar71xx_soc == AR71XX_SOC_AR7240 && data != 0xffffffff) ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, 0xffff, bytes); else ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, data, bytes); } static void ar724x_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX - Only one interrupt ? Only one device ? */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg & ~AR724X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, reg | AR724X_PCI_INTR_DEV0); } static void ar724x_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg | AR724X_PCI_INTR_DEV0); } static int ar724x_pci_setup(device_t dev) { uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; ar724x_pci_write(AR724X_PCI_CRP_BASE, PCIR_COMMAND, reg, 2); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x20, 0x1ff01000, 4); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x24, 0x1ff01000, 4); reg = ATH_READ_REG(AR724X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(AR724X_PCI_RESET, 0); DELAY(100); ATH_WRITE_REG(AR724X_PCI_RESET, 4); DELAY(100000); } if (ar71xx_soc == AR71XX_SOC_AR7240) reg = AR724X_PCI_APP_LTSSM_ENABLE; else reg = 0x1ffc1; ATH_WRITE_REG(AR724X_PCI_APP, reg); /* Flush write */ (void) ATH_READ_REG(AR724X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(AR724X_PCI_RESET); if ((reg & AR724X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } if (ar71xx_soc == AR71XX_SOC_AR7241 || ar71xx_soc == AR71XX_SOC_AR7242) { reg = ATH_READ_REG(AR724X_PCI_APP); reg |= (1 << 16); ATH_WRITE_REG(AR724X_PCI_APP, reg); } return (0); } #ifdef AR71XX_ATH_EEPROM #define AR5416_EEPROM_MAGIC 0xa55a /* * XXX - This should not be here ! And this looks like Atheros (if_ath) only. */ static void ar724x_pci_fixup(device_t dev, long flash_addr, int len) { uint32_t bar0, reg, val; uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); #if 0 if (cal_data[0] != AR5416_EEPROM_MAGIC) { device_printf(dev, "%s: Invalid calibration data from 0x%x\n", __func__, (uintptr_t) flash_addr); return; } #endif /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_BAR(0), 4); /* Write temporary BAR0 to map the NIC into a fixed location */ /* XXX AR7240: 0xffff; 7241/7242/9344: 0x1000ffff */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* set pointer to first reg address */ cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" 0x%08x=0x%08x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), bar0, 4); } #undef AR5416_EEPROM_MAGIC /* * XXX This is (mostly) duplicated with ar71xx_pci.c. * It should at some point be fixed. */ static void ar724x_pci_slot_fixup(device_t dev) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), 0, 0, 0); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", 0, 0, 0); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", 0, 0, 0); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, 0, 0, 0); ar724x_pci_fixup(dev, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, 0, 0, 0, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar724x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar724x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int rid = 0; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar724x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar724x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar724x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar724x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar724x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(AR724X_RESET_PCIE); ar71xx_device_stop(AR724X_RESET_PCIE_PHY); ar71xx_device_stop(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY); ar71xx_device_start(AR724X_RESET_PCIE); if (ar724x_pci_setup(dev)) return (ENXIO); #ifdef AR71XX_ATH_EEPROM ar724x_pci_slot_fixup(dev); #endif /* AR71XX_ATH_EEPROM */ /* Fixup internal PCI bridge */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar724x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar724x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar724x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar724x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar724x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar724x_pci_mask_irq, ar724x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar724x_pci_unmask_irq((void*)irq); return (0); } static int ar724x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar724x_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar724x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); mask = ATH_READ_REG(AR724X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; if (reg & AR724X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* Flush pending memory transactions */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int ar724x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar724x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t ar724x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar724x_pci_probe), DEVMETHOD(device_attach, ar724x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar724x_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar724x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar724x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar724x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar724x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar724x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar724x_pci_maxslots), DEVMETHOD(pcib_read_config, ar724x_pci_read_config), DEVMETHOD(pcib_write_config, ar724x_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar724x_pci_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; static driver_t ar724x_pci_driver = { "pcib", ar724x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar724x_pci_devclass; DRIVER_MODULE(ar724x_pci, nexus, ar724x_pci_driver, ar724x_pci_devclass, 0, 0); diff --git a/sys/mips/atheros/qca955x_pci.c b/sys/mips/atheros/qca955x_pci.c index e736942b0e84..48edec9cc5c2 100644 --- a/sys/mips/atheros/qca955x_pci.c +++ b/sys/mips/atheros/qca955x_pci.c @@ -1,606 +1,606 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * Copyright (c) 2015, Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* XXX aim to eliminate this! */ #include #include #include #include #undef AR724X_PCI_DEBUG //#define AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* * This is a PCI controller for the QCA955x and later SoCs. * It needs to be aware of >1 PCIe host endpoints. * * XXX TODO; it may be nice to merge this with ar724x_pci.c; * they're very similar. */ struct ar71xx_pci_irq { struct ar71xx_pci_softc *sc; int irq; }; struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; uint32_t sc_pci_reg_base; /* XXX until bus stuff is done */ uint32_t sc_pci_crp_base; /* XXX until bus stuff is done */ uint32_t sc_pci_ctrl_base; /* XXX until bus stuff is done */ uint32_t sc_pci_mem_base; /* XXX until bus stuff is done */ uint32_t sc_pci_membase_limit; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct ar71xx_pci_irq sc_pci_irq[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int qca955x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int qca955x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int qca955x_pci_intr(void *); static void qca955x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t qca955x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(sc->sc_pci_reg_base + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void qca955x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; qca955x_pci_write(sc->sc_pci_reg_base, reg, data, bytes); } static void qca955x_pci_mask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; /* XXX - Only one interrupt ? Only one device ? */ if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg & ~QCA955X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, reg | QCA955X_PCI_INTR_DEV0); } static void qca955x_pci_unmask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg | QCA955X_PCI_INTR_DEV0); } static int qca955x_pci_setup(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; qca955x_pci_write(sc->sc_pci_crp_base, PCIR_COMMAND, reg, 2); /* These are the memory/prefetch base/limit parameters */ qca955x_pci_write(sc->sc_pci_crp_base, 0x20, sc->sc_pci_membase_limit, 4); qca955x_pci_write(sc->sc_pci_crp_base, 0x24, sc->sc_pci_membase_limit, 4); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 0); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 4); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100000); } ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP, 0x1ffc1); /* Flush write */ (void) ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if ((reg & QCA955X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } return (0); } static int qca955x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int qca955x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int unit = device_get_unit(dev); int rid = 0; /* Dirty; maybe these could all just be hints */ if (unit == 0) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE0; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE0; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE0; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE0; /* XXX verify */ sc->sc_pci_membase_limit = 0x11f01000; } else if (unit == 1) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE1; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE1; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE1; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE1; /* XXX verify */ sc->sc_pci_membase_limit = 0x12f01200; } else { device_printf(dev, "%s: invalid unit (%d)\n", __func__, unit); return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "qca955x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_pci_mem_base, sc->sc_pci_mem_base + QCA955X_PCI_MEM_SIZE - 1) != 0) { panic("qca955x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "qca955x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("qca955x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, qca955x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(QCA955X_RESET_PCIE); ar71xx_device_stop(QCA955X_RESET_PCIE_PHY); DELAY(100); ar71xx_device_start(QCA955X_RESET_PCIE_PHY); ar71xx_device_start(QCA955X_RESET_PCIE); if (qca955x_pci_setup(dev)) return (ENXIO); /* * Write initial base address. * * I'm not yet sure why this is required and/or why it isn't * initialised like this. The AR71xx PCI code initialises * the PCI windows for each device, but neither it or the * 724x PCI bridge modules explicitly initialise the BAR. * * So before this gets committed, have a chat with jhb@ or * someone else who knows PCI well and figure out whether * the initial BAR is supposed to be determined by /other/ * means. */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), sc->sc_pci_mem_base, 4); /* Fixup internal PCI bridge */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int qca955x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int qca955x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * qca955x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int qca955x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int qca955x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { sc->sc_pci_irq[irq].sc = sc; sc->sc_pci_irq[irq].irq = irq; error = intr_event_create(&event, (void *)&sc->sc_pci_irq[irq], 0, irq, qca955x_pci_mask_irq, qca955x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); qca955x_pci_unmask_irq(&sc->sc_pci_irq[irq]); return (0); } static int qca955x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); qca955x_pci_mask_irq(&sc->sc_pci_irq[irq]); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int qca955x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; /* There's only one PCIe DDR flush for both PCIe EPs */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); mask = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; /* * XXX TODO: handle >1 PCIe end point! */ if (reg & QCA955X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int qca955x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int qca955x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t qca955x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qca955x_pci_probe), DEVMETHOD(device_attach, qca955x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, qca955x_pci_read_ivar), DEVMETHOD(bus_write_ivar, qca955x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, qca955x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, qca955x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, qca955x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, qca955x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, qca955x_pci_maxslots), DEVMETHOD(pcib_read_config, qca955x_pci_read_config), DEVMETHOD(pcib_write_config, qca955x_pci_write_config), DEVMETHOD(pcib_route_interrupt, qca955x_pci_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; static driver_t qca955x_pci_driver = { "pcib", qca955x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t qca955x_pci_devclass; DRIVER_MODULE(qca955x_pci, nexus, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); DRIVER_MODULE(qca955x_pci, apb, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); diff --git a/sys/mips/malta/gt_pci.c b/sys/mips/malta/gt_pci.c index a164be4941e2..a294661943b8 100644 --- a/sys/mips/malta/gt_pci.c +++ b/sys/mips/malta/gt_pci.c @@ -1,776 +1,776 @@ /* $NetBSD: gt_pci.c,v 1.4 2003/07/15 00:24:54 lukem Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PCI configuration support for gt I/O Processor chip. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #define ICU_LEN 16 /* number of ISA IRQs */ /* * XXX: These defines are from NetBSD's . Respective file * from FreeBSD src tree lacks some definitions. */ #define PIC_OCW1 1 #define PIC_OCW2 0 #define PIC_OCW3 0 #define OCW2_SELECT 0 #define OCW2_ILS(x) ((x) << 0) /* interrupt level select */ #define OCW3_POLL_IRQ(x) ((x) & 0x7f) #define OCW3_POLL_PENDING (1U << 7) /* * Galileo controller's registers are LE so convert to then * to/from native byte order. We rely on boot loader or emulator * to set "swap bytes" configuration correctly for us */ #define GT_PCI_DATA(v) htole32((v)) #define GT_HOST_DATA(v) le32toh((v)) struct gt_pci_softc; struct gt_pci_intr_cookie { int irq; struct gt_pci_softc *sc; }; struct gt_pci_softc { device_t sc_dev; bus_space_tag_t sc_st; bus_space_handle_t sc_ioh_icu1; bus_space_handle_t sc_ioh_icu2; bus_space_handle_t sc_ioh_elcr; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; unsigned long sc_mem; bus_space_handle_t sc_io; struct resource *sc_irq; struct intr_event *sc_eventstab[ICU_LEN]; struct gt_pci_intr_cookie sc_intr_cookies[ICU_LEN]; uint16_t sc_imask; uint16_t sc_elcr; uint16_t sc_reserved; void *sc_ih; }; static void gt_pci_set_icus(struct gt_pci_softc *); static int gt_pci_intr(void *v); static int gt_pci_probe(device_t); static int gt_pci_attach(device_t); static int gt_pci_activate_resource(device_t, device_t, int, int, struct resource *); static int gt_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int gt_pci_teardown_intr(device_t, device_t, struct resource *, void*); static int gt_pci_maxslots(device_t ); static int gt_pci_conf_setup(struct gt_pci_softc *, int, int, int, int, uint32_t *); static uint32_t gt_pci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void gt_pci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin); static struct resource * gt_pci_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static void gt_pci_mask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; sc->sc_imask |= (1 << irq); sc->sc_elcr |= (1 << irq); gt_pci_set_icus(sc); } static void gt_pci_unmask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); gt_pci_set_icus(sc); } static void gt_pci_set_icus(struct gt_pci_softc *sc) { /* Enable the cascade IRQ (2) if 8-15 is enabled. */ if ((sc->sc_imask & 0xff00) != 0xff00) sc->sc_imask &= ~(1U << 2); else sc->sc_imask |= (1U << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW1, sc->sc_imask & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW1, (sc->sc_imask >> 8) & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); } static int gt_pci_intr(void *v) { struct gt_pci_softc *sc = v; struct intr_event *event; int irq; for (;;) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3); if ((irq & OCW3_POLL_PENDING) == 0) { return FILTER_HANDLED; } irq = OCW3_POLL_IRQ(irq); if (irq == 2) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3); if (irq & OCW3_POLL_PENDING) irq = OCW3_POLL_IRQ(irq) + 8; else irq = 2; } event = sc->sc_eventstab[irq]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) continue; /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); /* XXX: Log stray IRQs */ /* Send a specific EOI to the 8259. */ if (irq > 7) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq & 7)); irq = 2; } bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq)); } return FILTER_HANDLED; } static int gt_pci_probe(device_t dev) { device_set_desc(dev, "GT64120 PCI bridge"); return (0); } static int gt_pci_attach(device_t dev) { uint32_t busno; struct gt_pci_softc *sc = device_get_softc(dev); int rid; busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_st = mips_bus_space_generic; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MALTA_PCI0_IO_BASE; sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "GT64120 PCI I/O Ports"; /* * First 256 bytes are ISA's registers: e.g. i8259's * So do not use them for general purpose PCI I/O window */ if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0x100, 0xffff) != 0) { panic("gt_pci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MALTA_PCIMEM1_BASE; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "GT64120 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + MALTA_PCIMEM1_SIZE) != 0) { panic("gt_pci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "GT64120 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("gt_pci_attach: failed to set up IRQ rman"); /* * Map the PIC/ELCR registers. */ #if 0 if (bus_space_map(sc->sc_st, 0x4d0, 2, 0, &sc->sc_ioh_elcr) != 0) device_printf(dev, "unable to map ELCR registers\n"); if (bus_space_map(sc->sc_st, IO_ICU1, 2, 0, &sc->sc_ioh_icu1) != 0) device_printf(dev, "unable to map ICU1 registers\n"); if (bus_space_map(sc->sc_st, IO_ICU2, 2, 0, &sc->sc_ioh_icu2) != 0) device_printf(dev, "unable to map ICU2 registers\n"); #else sc->sc_ioh_elcr = MIPS_PHYS_TO_KSEG1(sc->sc_io + 0x4d0); sc->sc_ioh_icu1 = MIPS_PHYS_TO_KSEG1(sc->sc_io + IO_ICU1); sc->sc_ioh_icu2 = MIPS_PHYS_TO_KSEG1(sc->sc_io + IO_ICU2); #endif /* All interrupts default to "masked off". */ sc->sc_imask = 0xffff; /* All interrupts default to edge-triggered. */ sc->sc_elcr = 0; /* * Initialize the 8259s. */ /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, ICW1_RESET | ICW1_IC4); /* * XXX: values from NetBSD's */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_RR); /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, ICW1_RESET | ICW1_IC4); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_RR); /* * Default all interrupts to edge-triggered. */ bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); /* * Some ISA interrupts are reserved for devices that * we know are hard-wired to certain IRQs. */ sc->sc_reserved = (1U << 0) | /* timer */ (1U << 1) | /* keyboard controller (keyboard) */ (1U << 2) | /* PIC cascade */ (1U << 3) | /* COM 2 */ (1U << 4) | /* COM 1 */ (1U << 6) | /* floppy */ (1U << 7) | /* centronics */ (1U << 8) | /* RTC */ (1U << 9) | /* I2C */ (1U << 12) | /* keyboard controller (mouse) */ (1U << 14) | /* IDE primary */ (1U << 15); /* IDE secondary */ /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, MALTA_SOUTHBRIDGE_INTR, MALTA_SOUTHBRIDGE_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, gt_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* Initialize memory and i/o rmans. */ device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int gt_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int gt_pci_conf_setup(struct gt_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { *addr = (bus << 16) | (slot << 11) | (func << 8) | reg; return (0); } static uint32_t gt_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t addr; uint32_t shift, mask; if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return (uint32_t)(-1); /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) data = GT_PCI_DATA(GT_REGVAL(GT_PCI0_CFG_DATA)); else data = GT_REGVAL(GT_PCI0_CFG_DATA); /* Check for master abort. */ if (GT_HOST_DATA(GT_REGVAL(GT_INTR_CAUSE)) & (GTIC_MASABORT0 | GTIC_TARABORT0)) data = (uint32_t) -1; switch(reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch(bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if(reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } #if 0 printf("PCICONF_READ(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif return (data); } static void gt_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t addr; uint32_t reg_data; uint32_t shift, mask; if(bytes != 4) { reg_data = gt_pci_read_config(dev, bus, slot, func, reg, 4); shift = 8 * (reg & 3); switch(bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if(reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } } if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return; /* The galileo has problems accessing device 31. */ if (bus == 0 && slot == 31) return; /* XXX: no support for bus > 0 yet */ if (bus > 0) return; /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) GT_REGVAL(GT_PCI0_CFG_DATA) = GT_PCI_DATA(data); else GT_REGVAL(GT_PCI0_CFG_DATA) = data; #if 0 printf("PCICONF_WRITE(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif } static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; /* struct gt_pci_softc *sc = device_get_softc(pcib); */ bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); /* * XXXMIPS: We need routing logic. This is just a stub . */ switch (device) { case 9: /* * PIIX4 IDE adapter. HW IRQ0 */ return 0; case 11: /* Ethernet */ return 10; default: device_printf(pcib, "no IRQ mapping for %d/%d/%d/%d\n", bus, device, func, pin); } return (0); } static int gt_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gt_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int gt_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct gt_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * gt_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gt_pci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, gt_pci_bus_space); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); } static int gt_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int gt_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct gt_pci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; sc->sc_intr_cookies[irq].irq = irq; sc->sc_intr_cookies[irq].sc = sc; if (event == NULL) { error = intr_event_create(&event, (void *)&sc->sc_intr_cookies[irq], 0, irq, gt_pci_mask_irq, gt_pci_unmask_irq, NULL, NULL, "gt_pci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); gt_pci_unmask_irq((void *)&sc->sc_intr_cookies[irq]); return 0; } static int gt_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct gt_pci_softc *sc = device_get_softc(dev); int irq; irq = rman_get_start(res); gt_pci_mask_irq((void *)&sc->sc_intr_cookies[irq]); return (intr_event_remove_handler(cookie)); } static device_method_t gt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gt_pci_probe), DEVMETHOD(device_attach, gt_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, gt_read_ivar), DEVMETHOD(bus_write_ivar, gt_write_ivar), DEVMETHOD(bus_alloc_resource, gt_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, gt_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, gt_pci_setup_intr), DEVMETHOD(bus_teardown_intr, gt_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, gt_pci_maxslots), DEVMETHOD(pcib_read_config, gt_pci_read_config), DEVMETHOD(pcib_write_config, gt_pci_write_config), DEVMETHOD(pcib_route_interrupt, gt_pci_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; static driver_t gt_pci_driver = { "pcib", gt_pci_methods, sizeof(struct gt_pci_softc), }; static devclass_t gt_pci_devclass; DRIVER_MODULE(gt_pci, gt, gt_pci_driver, gt_pci_devclass, 0, 0); diff --git a/sys/mips/mediatek/mtk_pcie.c b/sys/mips/mediatek/mtk_pcie.c index 635fa78d87b1..6d3aa3e2550c 100644 --- a/sys/mips/mediatek/mtk_pcie.c +++ b/sys/mips/mediatek/mtk_pcie.c @@ -1,1268 +1,1268 @@ /*- * Copyright (c) 2016 Stanislav Galabov. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include "pic_if.h" /* * Note: We only support PCIe at the moment. * Most SoCs in the Ralink/Mediatek family that we target actually don't * support PCI anyway, with the notable exceptions being RT3662/RT3883, which * support both PCI and PCIe. If there exists a board based on one of them * which is of interest in the future it shouldn't be too hard to enable PCI * support for it. */ /* Chip specific function declarations */ static int mtk_pcie_phy_init(device_t); static int mtk_pcie_phy_start(device_t); static int mtk_pcie_phy_stop(device_t); static int mtk_pcie_phy_mt7621_init(device_t); static int mtk_pcie_phy_mt7628_init(device_t); static int mtk_pcie_phy_mt7620_init(device_t); static int mtk_pcie_phy_rt3883_init(device_t); static void mtk_pcie_phy_setup_slots(device_t); /* Generic declarations */ struct mtx mtk_pci_mtx; MTX_SYSINIT(mtk_pci_mtx, &mtk_pci_mtx, "MTK PCIe mutex", MTX_SPIN); static int mtk_pci_intr(void *); static struct mtk_pci_softc *mt_sc = NULL; struct mtk_pci_range { u_long base; u_long len; }; #define FDT_RANGES_CELLS ((1 + 2 + 3) * 2) static void mtk_pci_range_dump(struct mtk_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base = 0x%08lx\n", range->base); printf(" len = 0x%08lx\n", range->len); #endif } static int mtk_pci_ranges_decode(phandle_t node, struct mtk_pci_range *io_space, struct mtk_pci_range *mem_space) { struct mtk_pci_range *pci_space; pcell_t ranges[FDT_RANGES_CELLS]; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell1, cell2; int tuple_size, tuples, i, rv, len; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells != 1) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI busses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell1 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; if (cell0 & 0x02000000) { pci_space = mem_space; } else if (cell0 & 0x01000000) { pci_space = io_space; } else { rv = ERANGE; goto out; } pci_space->base = fdt_data_get((void *)rangesptr, par_addr_cells); rangesptr += par_addr_cells; pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; } rv = 0; out: return (rv); } static int mtk_pci_ranges(phandle_t node, struct mtk_pci_range *io_space, struct mtk_pci_range *mem_space) { int err; if ((err = mtk_pci_ranges_decode(node, io_space, mem_space)) != 0) { return (err); } mtk_pci_range_dump(io_space); mtk_pci_range_dump(mem_space); return (0); } static struct ofw_compat_data compat_data[] = { { "ralink,rt3883-pci", MTK_SOC_RT3883 }, { "mediatek,mt7620-pci", MTK_SOC_MT7620A }, { "mediatek,mt7628-pci", MTK_SOC_MT7628 }, { "mediatek,mt7621-pci", MTK_SOC_MT7621 }, { NULL, MTK_SOC_UNKNOWN } }; static int mtk_pci_probe(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); sc->socid = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (sc->socid == MTK_SOC_UNKNOWN) return (ENXIO); device_set_desc(dev, "MTK PCIe Controller"); return (0); } static int mtk_pci_attach(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); struct mtk_pci_range io_space, mem_space; phandle_t node; intptr_t xref; int i, rid; sc->sc_dev = dev; mt_sc = sc; sc->addr_mask = 0xffffffff; /* Request our memory */ rid = 0; sc->pci_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->pci_res[0] == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } /* See how many interrupts we need */ if (sc->socid == MTK_SOC_MT7621) sc->sc_num_irq = 3; else { sc->sc_num_irq = 1; sc->pci_res[2] = sc->pci_res[3] = NULL; sc->pci_intrhand[1] = sc->pci_intrhand[2] = NULL; } /* Request our interrupts */ for (i = 1; i <= sc->sc_num_irq ; i++) { rid = i - 1; sc->pci_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->pci_res[i] == NULL) { device_printf(dev, "could not allocate interrupt " "resource %d\n", rid); goto cleanup_res; } } /* Parse our PCI 'ranges' property */ node = ofw_bus_get_node(dev); xref = OF_xref_from_node(node); if (mtk_pci_ranges(node, &io_space, &mem_space)) { device_printf(dev, "could not retrieve 'ranges' data\n"); goto cleanup_res; } /* Memory, I/O and IRQ resource limits */ sc->sc_io_base = io_space.base; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base; sc->sc_mem_size = mem_space.len; sc->sc_irq_start = MTK_PCIE0_IRQ; sc->sc_irq_end = MTK_PCIE2_IRQ; /* Init resource managers for memory, I/O and IRQ */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "mtk pcie memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1) != 0) { device_printf(dev, "failed to setup memory rman\n"); goto cleanup_res; } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "mtk pcie io window"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1) != 0) { device_printf(dev, "failed to setup io rman\n"); goto cleanup_res; } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "mtk pcie irqs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, sc->sc_irq_start, sc->sc_irq_end) != 0) { device_printf(dev, "failed to setup irq rman\n"); goto cleanup_res; } /* Do SoC-specific PCIe initialization */ if (mtk_pcie_phy_init(dev)) { device_printf(dev, "pcie phy init failed\n"); goto cleanup_rman; } /* Register ourselves as an interrupt controller */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); goto cleanup_rman; } /* Set up our interrupt handler */ for (i = 1; i <= sc->sc_num_irq; i++) { sc->pci_intrhand[i - 1] = NULL; if (bus_setup_intr(dev, sc->pci_res[i], INTR_TYPE_MISC, mtk_pci_intr, NULL, sc, &sc->pci_intrhand[i - 1])) { device_printf(dev, "could not setup intr handler %d\n", i); goto cleanup; } } /* Attach our PCI child so bus enumeration can start */ if (device_add_child(dev, "pci", -1) == NULL) { device_printf(dev, "could not attach pci bus\n"); goto cleanup; } /* And finally, attach ourselves to the bus */ if (bus_generic_attach(dev)) { device_printf(dev, "could not attach to bus\n"); goto cleanup; } return (0); cleanup: #ifdef notyet intr_pic_unregister(dev, xref); #endif for (i = 1; i <= sc->sc_num_irq; i++) { if (sc->pci_intrhand[i - 1] != NULL) bus_teardown_intr(dev, sc->pci_res[i], sc->pci_intrhand[i - 1]); } cleanup_rman: mtk_pcie_phy_stop(dev); rman_fini(&sc->sc_irq_rman); rman_fini(&sc->sc_io_rman); rman_fini(&sc->sc_mem_rman); cleanup_res: mt_sc = NULL; if (sc->pci_res[0] != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->pci_res[0]); if (sc->pci_res[1] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pci_res[1]); if (sc->pci_res[2] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 1, sc->pci_res[2]); if (sc->pci_res[3] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 2, sc->pci_res[3]); return (ENXIO); } static int mtk_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mtk_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int mtk_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct mtk_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * mtk_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mtk_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case PCI_RES_BUS: return pci_domain_alloc_bus(0, child, rid, start, end, count, flags); case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if ((flags & RF_ACTIVE) && type != SYS_RES_IRQ) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int mtk_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (type == PCI_RES_BUS) return (pci_domain_release_bus(0, child, rid, res)); return (bus_generic_release_resource(bus, child, type, rid, res)); } static int mtk_pci_adjust_resource(device_t bus, device_t child, int type, struct resource *res, rman_res_t start, rman_res_t end) { struct mtk_pci_softc *sc = device_get_softc(bus); struct rman *rm; switch (type) { case PCI_RES_BUS: return pci_domain_adjust_bus(0, child, res, start, end); case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: rm = NULL; break; } if (rm != NULL) return (rman_adjust_resource(res, start, end)); return (bus_generic_adjust_resource(bus, child, type, res, start, end)); } static inline int mtk_idx_to_irq(int idx) { return ((idx == 0) ? MTK_PCIE0_IRQ : (idx == 1) ? MTK_PCIE1_IRQ : (idx == 2) ? MTK_PCIE2_IRQ : -1); } static inline int mtk_irq_to_idx(int irq) { return ((irq == MTK_PCIE0_IRQ) ? 0 : (irq == MTK_PCIE1_IRQ) ? 1 : (irq == MTK_PCIE2_IRQ) ? 2 : -1); } static void mtk_pci_mask_irq(void *source) { MT_WRITE32(mt_sc, MTK_PCI_PCIENA, MT_READ32(mt_sc, MTK_PCI_PCIENA) & ~(1<<((int)source))); } static void mtk_pci_unmask_irq(void *source) { MT_WRITE32(mt_sc, MTK_PCI_PCIENA, MT_READ32(mt_sc, MTK_PCI_PCIENA) | (1<<((int)source))); } static int mtk_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct mtk_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error, irqidx; irq = rman_get_start(ires); if (irq < sc->sc_irq_start || irq > sc->sc_irq_end) return (EINVAL); irqidx = irq - sc->sc_irq_start; event = sc->sc_eventstab[irqidx]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, mtk_pci_mask_irq, mtk_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irqidx] = event; } else { return (error); } } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mtk_pci_unmask_irq((void*)irq); return (0); } static int mtk_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct mtk_pci_softc *sc = device_get_softc(dev); int irq, result, irqidx; irq = rman_get_start(ires); if (irq < sc->sc_irq_start || irq > sc->sc_irq_end) return (EINVAL); irqidx = irq - sc->sc_irq_start; if (sc->sc_eventstab[irqidx] == NULL) panic("Trying to teardown unoccupied IRQ"); mtk_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irqidx] = NULL; return (result); } static inline uint32_t mtk_pci_make_addr(int bus, int slot, int func, int reg) { uint32_t addr; addr = ((((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) | (func << 8) | (reg & 0xfc) | (1 << 31)); return (addr); } static int mtk_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static inline int mtk_pci_slot_has_link(device_t dev, int slot) { struct mtk_pci_softc *sc = device_get_softc(dev); return !!(sc->pcie_link_status & (1<addr_mask; MT_WRITE32(sc, MTK_PCI_CFGADDR, addr); switch (bytes % 4) { case 0: data = MT_READ32(sc, MTK_PCI_CFGDATA); break; case 1: data = MT_READ8(sc, MTK_PCI_CFGDATA + (reg & 0x3)); break; case 2: data = MT_READ16(sc, MTK_PCI_CFGDATA + (reg & 0x3)); break; default: panic("%s(): Wrong number of bytes (%d) requested!\n", __FUNCTION__, bytes % 4); } mtx_unlock_spin(&mtk_pci_mtx); return (data); } static void mtk_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mtk_pci_softc *sc = device_get_softc(dev); uint32_t addr = 0, data = val; /* Do not write if slot has no link */ if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0) return; mtx_lock_spin(&mtk_pci_mtx); addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask; MT_WRITE32(sc, MTK_PCI_CFGADDR, addr); switch (bytes % 4) { case 0: MT_WRITE32(sc, MTK_PCI_CFGDATA, data); break; case 1: MT_WRITE8(sc, MTK_PCI_CFGDATA + (reg & 0x3), data); break; case 2: MT_WRITE16(sc, MTK_PCI_CFGDATA + (reg & 0x3), data); break; default: panic("%s(): Wrong number of bytes (%d) requested!\n", __FUNCTION__, bytes % 4); } mtx_unlock_spin(&mtk_pci_mtx); } static int mtk_pci_route_interrupt(device_t pcib, device_t device, int pin) { int bus, sl, dev; bus = pci_get_bus(device); sl = pci_get_slot(device); dev = pci_get_device(device); if (bus != 0) panic("Unexpected bus number %d\n", bus); /* PCIe only */ switch (sl) { case 0: return MTK_PCIE0_IRQ; case 1: return MTK_PCIE0_IRQ + 1; case 2: return MTK_PCIE0_IRQ + 2; default: return (-1); } return (-1); } static device_method_t mtk_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mtk_pci_probe), DEVMETHOD(device_attach, mtk_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, mtk_pci_read_ivar), DEVMETHOD(bus_write_ivar, mtk_pci_write_ivar), DEVMETHOD(bus_alloc_resource, mtk_pci_alloc_resource), DEVMETHOD(bus_release_resource, mtk_pci_release_resource), DEVMETHOD(bus_adjust_resource, mtk_pci_adjust_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, mtk_pci_setup_intr), DEVMETHOD(bus_teardown_intr, mtk_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mtk_pci_maxslots), DEVMETHOD(pcib_read_config, mtk_pci_read_config), DEVMETHOD(pcib_write_config, mtk_pci_write_config), DEVMETHOD(pcib_route_interrupt, mtk_pci_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mtk_pci_driver = { "pcib", mtk_pci_methods, sizeof(struct mtk_pci_softc), }; static devclass_t mtk_pci_devclass; DRIVER_MODULE(mtk_pci, simplebus, mtk_pci_driver, mtk_pci_devclass, 0, 0); /* Our interrupt handler */ static int mtk_pci_intr(void *arg) { struct mtk_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, irqidx; reg = MT_READ32(sc, MTK_PCI_PCIINT); for (irq = sc->sc_irq_start; irq <= sc->sc_irq_end; irq++) { if (reg & (1u<sc_irq_start; event = sc->sc_eventstab[irqidx]; - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { if (irq != 0) printf("Stray PCI IRQ %d\n", irq); continue; } intr_event_handle(event, NULL); } } return (FILTER_HANDLED); } /* PCIe SoC-specific initialization */ static int mtk_pcie_phy_init(device_t dev) { struct mtk_pci_softc *sc; /* Get our softc */ sc = device_get_softc(dev); /* We don't know how many slots we have yet */ sc->num_slots = 0; /* Handle SoC specific PCIe init */ switch (sc->socid) { case MTK_SOC_MT7628: /* Fallthrough */ case MTK_SOC_MT7688: if (mtk_pcie_phy_mt7628_init(dev)) return (ENXIO); break; case MTK_SOC_MT7621: if (mtk_pcie_phy_mt7621_init(dev)) return (ENXIO); break; case MTK_SOC_MT7620A: if (mtk_pcie_phy_mt7620_init(dev)) return (ENXIO); break; case MTK_SOC_RT3662: /* Fallthrough */ case MTK_SOC_RT3883: if (mtk_pcie_phy_rt3883_init(dev)) return (ENXIO); break; default: device_printf(dev, "unsupported device %x\n", sc->socid); return (ENXIO); } /* * If we were successful so far go and set up the PCIe slots, so we * may allocate mem/io/irq resources and enumerate busses later. */ mtk_pcie_phy_setup_slots(dev); return (0); } static int mtk_pcie_phy_start(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); if (sc->socid == MTK_SOC_MT7621 && (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) != SYSCTL_MT7621_REV_E) { if (fdt_reset_assert_all(dev)) return (ENXIO); } else { if (fdt_reset_deassert_all(dev)) return (ENXIO); } if (fdt_clock_enable_all(dev)) return (ENXIO); return (0); } static int mtk_pcie_phy_stop(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); if (sc->socid == MTK_SOC_MT7621 && (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) != SYSCTL_MT7621_REV_E) { if (fdt_reset_deassert_all(dev)) return (ENXIO); } else { if (fdt_reset_assert_all(dev)) return (ENXIO); } if (fdt_clock_disable_all(dev)) return (ENXIO); return (0); } #define mtk_pcie_phy_set(_sc, _reg, _s, _n, _v) \ MT_WRITE32((_sc), (_reg), ((MT_READ32((_sc), (_reg)) & \ (~(((1ull << (_n)) - 1) << (_s)))) | ((_v) << (_s)))) static void mtk_pcie_phy_mt7621_bypass_pipe_rst(struct mtk_pci_softc *sc, uint32_t off) { mtk_pcie_phy_set(sc, off + 0x002c, 12, 1, 1); mtk_pcie_phy_set(sc, off + 0x002c, 4, 1, 1); mtk_pcie_phy_set(sc, off + 0x012c, 12, 1, 1); mtk_pcie_phy_set(sc, off + 0x012c, 4, 1, 1); mtk_pcie_phy_set(sc, off + 0x102c, 12, 1, 1); mtk_pcie_phy_set(sc, off + 0x102c, 4, 1, 1); } static void mtk_pcie_phy_mt7621_setup_ssc(struct mtk_pci_softc *sc, uint32_t off) { uint32_t xtal_sel; xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6; xtal_sel &= 0x7; mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1); mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0); mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1); mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 1); mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0); mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 0); if (xtal_sel <= 5 && xtal_sel >= 3) { mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 1); mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x1a); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a); } else { mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0); if (xtal_sel >= 6) { mtk_pcie_phy_set(sc, off + 0x4bc, 4, 2, 0x01); mtk_pcie_phy_set(sc, off + 0x49c, 0, 31, 0x18000000); mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x18d); mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x4a); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x4a); mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x11); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x11); } else { mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x1a); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a); } } mtk_pcie_phy_set(sc, off + 0x4a0, 5, 1, 1); mtk_pcie_phy_set(sc, off + 0x490, 22, 2, 2); mtk_pcie_phy_set(sc, off + 0x490, 18, 4, 6); mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 2); mtk_pcie_phy_set(sc, off + 0x490, 8, 4, 1); mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 0); mtk_pcie_phy_set(sc, off + 0x490, 1, 3, 2); if (xtal_sel <= 5 && xtal_sel >= 3) { mtk_pcie_phy_set(sc, off + 0x414, 6, 2, 1); mtk_pcie_phy_set(sc, off + 0x414, 5, 1, 1); } mtk_pcie_phy_set(sc, off + 0x414, 28, 2, 1); mtk_pcie_phy_set(sc, off + 0x040, 17, 4, 7); mtk_pcie_phy_set(sc, off + 0x040, 16, 1, 1); mtk_pcie_phy_set(sc, off + 0x140, 17, 4, 7); mtk_pcie_phy_set(sc, off + 0x140, 16, 1, 1); mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1); mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 1); mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0); mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 0); } /* XXX: ugly, we need to fix this at some point */ #define MT7621_GPIO_CTRL0 *((volatile uint32_t *)0xbe000600) #define MT7621_GPIO_DATA0 *((volatile uint32_t *)0xbe000620) #define mtk_gpio_clr_set(_reg, _clr, _set) \ do { \ (_reg) = ((_reg) & (_clr)) | (_set); \ } while (0) static int mtk_pcie_phy_mt7621_init(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); /* First off, stop the PHY */ if (mtk_pcie_phy_stop(dev)) return (ENXIO); /* PCIe resets are GPIO pins */ mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7621_PERST_GPIO_MODE | MT7621_UARTL3_GPIO_MODE, MT7621_PERST_GPIO | MT7621_UARTL3_GPIO); /* Set GPIO pins as outputs */ mtk_gpio_clr_set(MT7621_GPIO_CTRL0, 0, MT7621_PCIE_RST); /* Assert resets to PCIe devices */ mtk_gpio_clr_set(MT7621_GPIO_DATA0, MT7621_PCIE_RST, 0); /* Give everything a chance to sink in */ DELAY(100000); /* Now start the PHY again */ if (mtk_pcie_phy_start(dev)) return (ENXIO); /* Wait for things to settle */ DELAY(100000); /* Only apply below to REV-E hardware */ if ((mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) == SYSCTL_MT7621_REV_E) mtk_pcie_phy_mt7621_bypass_pipe_rst(sc, 0x9000); /* Setup PCIe ports 0 and 1 */ mtk_pcie_phy_mt7621_setup_ssc(sc, 0x9000); /* Setup PCIe port 2 */ mtk_pcie_phy_mt7621_setup_ssc(sc, 0xa000); /* Deassert resets to PCIe devices */ mtk_gpio_clr_set(MT7621_GPIO_DATA0, 0, MT7621_PCIE_RST); /* Set number of slots supported */ sc->num_slots = 3; /* Give it a chance to sink in */ DELAY(100000); return (0); } static void mtk_pcie_phy_mt7628_setup(struct mtk_pci_softc *sc, uint32_t off) { uint32_t xtal_sel; xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6; xtal_sel &= 0x1; mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1); mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0); mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1); mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0); mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 3); if (xtal_sel == 1) { mtk_pcie_phy_set(sc, off + 0x4bc, 24, 8, 0x7d); mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 0x08); mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0x01); mtk_pcie_phy_set(sc, off + 0x4c0, 0, 32, 0x1f400000); mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x013d); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x74); mtk_pcie_phy_set(sc, off + 0x4a8, 0, 16, 0x74); } else { mtk_pcie_phy_set(sc, off + 0x4bc, 24, 8, 0x64); mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 0x0a); mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0x00); mtk_pcie_phy_set(sc, off + 0x4c0, 0, 32, 0x19000000); mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x018d); mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x4a); mtk_pcie_phy_set(sc, off + 0x4a8, 0, 16, 0x4a); } mtk_pcie_phy_set(sc, off + 0x498, 0, 8, 5); mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1); mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0); } static int mtk_pcie_phy_mt7628_init(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); /* Set PCIe reset to normal mode */ mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7628_PERST_GPIO_MODE, MT7628_PERST); /* Start the PHY */ if (mtk_pcie_phy_start(dev)) return (ENXIO); /* Give it a chance to sink in */ DELAY(100000); /* Setup the PHY */ mtk_pcie_phy_mt7628_setup(sc, 0x9000); /* Deassert PCIe device reset */ MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0); /* Set number of slots supported */ sc->num_slots = 1; return (0); } static int mtk_pcie_phy_mt7620_wait_busy(struct mtk_pci_softc *sc) { uint32_t reg_value, retry; reg_value = retry = 0; while (retry++ < MT7620_MAX_RETRIES) { reg_value = MT_READ32(sc, MT7620_PCIE_PHY_CFG); if (reg_value & PHY_BUSY) DELAY(100000); else break; } if (retry >= MT7620_MAX_RETRIES) return (ENXIO); return (0); } static int mtk_pcie_phy_mt7620_set(struct mtk_pci_softc *sc, uint32_t reg, uint32_t val) { uint32_t reg_val; if (mtk_pcie_phy_mt7620_wait_busy(sc)) return (ENXIO); reg_val = PHY_MODE_WRITE | ((reg & 0xff) << PHY_ADDR_OFFSET) | (val & 0xff); MT_WRITE32(sc, MT7620_PCIE_PHY_CFG, reg_val); DELAY(1000); if (mtk_pcie_phy_mt7620_wait_busy(sc)) return (ENXIO); return (0); } static int mtk_pcie_phy_mt7620_init(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); /* * The below sets the PCIe PHY to bypass the PCIe DLL and enables * "elastic buffer control", whatever that may be... */ if (mtk_pcie_phy_mt7620_set(sc, 0x00, 0x80) || mtk_pcie_phy_mt7620_set(sc, 0x01, 0x04) || mtk_pcie_phy_mt7620_set(sc, 0x68, 0x84)) return (ENXIO); /* Stop PCIe */ if (mtk_pcie_phy_stop(dev)) return (ENXIO); /* Restore PPLL to a sane state before going on */ mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVPD, PDRV_SW_SET); /* No PCIe on the MT7620N */ if (!(mtk_sysctl_get(SYSCTL_REVID) & MT7620_PKG_BGA)) { device_printf(dev, "PCIe disabled for MT7620N\n"); mtk_sysctl_clr_set(MT7620_PPLL_CFG0, 0, PPLL_SW_SET); mtk_sysctl_clr_set(MT7620_PPLL_CFG1, 0, PPLL_PD); return (ENXIO); } /* PCIe device reset pin is in normal mode */ mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7620_PERST_GPIO_MODE, MT7620_PERST); /* Enable PCIe now */ if (mtk_pcie_phy_start(dev)) return (ENXIO); /* Give it a chance to sink in */ DELAY(100000); /* If PLL is not locked - bail */ if (!(mtk_sysctl_get(MT7620_PPLL_CFG1) & PPLL_LOCKED)) { device_printf(dev, "no PPLL not lock\n"); mtk_pcie_phy_stop(dev); return (ENXIO); } /* Configure PCIe PLL */ mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVOHZ | LC_CKDRVHZ, LC_CKDRVPD | PDRV_SW_SET); /* and give it a chance to settle */ DELAY(100000); /* Deassert PCIe device reset */ MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0); /* MT7620 supports one PCIe slot */ sc->num_slots = 1; return (0); } static int mtk_pcie_phy_rt3883_init(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); /* Enable PCI host mode and PCIe RC mode */ mtk_sysctl_clr_set(SYSCTL_SYSCFG1, 0, RT3883_PCI_HOST_MODE | RT3883_PCIE_RC_MODE); /* Enable PCIe PHY */ if (mtk_pcie_phy_start(dev)) return (ENXIO); /* Disable PCI, we only support PCIe for now */ mtk_sysctl_clr_set(SYSCTL_RSTCTRL, 0, RT3883_PCI_RST); mtk_sysctl_clr_set(SYSCTL_CLKCFG1, RT3883_PCI_CLK, 0); /* Give things a chance to sink in */ DELAY(500000); /* Set PCIe port number to 0 and lift PCIe reset */ MT_WRITE32(sc, MTK_PCI_PCICFG, 0); /* Configure PCI Arbiter */ MT_WRITE32(sc, MTK_PCI_ARBCTL, 0x79); /* We have a single PCIe slot */ sc->num_slots = 1; return (0); } static void mtk_pcie_phy_setup_slots(device_t dev) { struct mtk_pci_softc *sc = device_get_softc(dev); uint32_t bar0_val, val; int i; /* Disable all PCIe interrupts */ MT_WRITE32(sc, MTK_PCI_PCIENA, 0); /* Default bar0_val is 64M, enabled */ bar0_val = 0x03FF0001; /* But we override it to 2G, enabled for some SoCs */ if (sc->socid == MTK_SOC_MT7620A || sc->socid == MTK_SOC_MT7628 || sc->socid == MTK_SOC_MT7688 || sc->socid == MTK_SOC_MT7621) bar0_val = 0x7FFF0001; /* We still don't know which slots have linked up */ sc->pcie_link_status = 0; /* XXX: I am not sure if this delay is really necessary */ DELAY(500000); /* * See which slots have links and mark them. * Set up all slots' BARs and make them look like PCIe bridges. */ for (i = 0; i < sc->num_slots; i++) { /* If slot has link - mark it */ if (MT_READ32(sc, MTK_PCIE_STATUS(i)) & 1) sc->pcie_link_status |= (1< * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct intr_event *hardintr_events[NHARD_IRQS]; static struct intr_event *softintr_events[NSOFT_IRQS]; static mips_intrcnt_t mips_intr_counters[NSOFT_IRQS + NHARD_IRQS]; static int intrcnt_index; static cpu_intr_mask_t hardintr_mask_func; static cpu_intr_unmask_t hardintr_unmask_func; mips_intrcnt_t mips_intrcnt_create(const char* name) { mips_intrcnt_t counter = &intrcnt[intrcnt_index++]; mips_intrcnt_setname(counter, name); return counter; } void mips_intrcnt_setname(mips_intrcnt_t counter, const char *name) { int idx = counter - intrcnt; KASSERT(counter != NULL, ("mips_intrcnt_setname: NULL counter")); snprintf(intrnames + (MAXCOMLEN + 1) * idx, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void mips_mask_hard_irq(void *source) { uintptr_t irq = (uintptr_t)source; mips_wr_status(mips_rd_status() & ~(((1 << irq) << 8) << 2)); } static void mips_unmask_hard_irq(void *source) { uintptr_t irq = (uintptr_t)source; mips_wr_status(mips_rd_status() | (((1 << irq) << 8) << 2)); } static void mips_mask_soft_irq(void *source) { uintptr_t irq = (uintptr_t)source; mips_wr_status(mips_rd_status() & ~((1 << irq) << 8)); } static void mips_unmask_soft_irq(void *source) { uintptr_t irq = (uintptr_t)source; mips_wr_status(mips_rd_status() | ((1 << irq) << 8)); } /* * Perform initialization of interrupts prior to setting * handlings */ void cpu_init_interrupts() { int i; char name[MAXCOMLEN + 1]; /* * Initialize all available vectors so spare IRQ * would show up in systat output */ for (i = 0; i < NSOFT_IRQS; i++) { snprintf(name, MAXCOMLEN + 1, "sint%d:", i); mips_intr_counters[i] = mips_intrcnt_create(name); } for (i = 0; i < NHARD_IRQS; i++) { snprintf(name, MAXCOMLEN + 1, "int%d:", i); mips_intr_counters[NSOFT_IRQS + i] = mips_intrcnt_create(name); } } void cpu_set_hardintr_mask_func(cpu_intr_mask_t func) { hardintr_mask_func = func; } void cpu_set_hardintr_unmask_func(cpu_intr_unmask_t func) { hardintr_unmask_func = func; } void cpu_establish_hardintr(const char *name, driver_filter_t *filt, void (*handler)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; /* * We have 6 levels, but thats 0 - 5 (not including 6) */ if (irq < 0 || irq >= NHARD_IRQS) panic("%s called for unknown hard intr %d", __func__, irq); if (hardintr_mask_func == NULL) hardintr_mask_func = mips_mask_hard_irq; if (hardintr_unmask_func == NULL) hardintr_unmask_func = mips_unmask_hard_irq; event = hardintr_events[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)(uintptr_t)irq, 0, irq, hardintr_mask_func, hardintr_unmask_func, NULL, NULL, "int%d", irq); if (error) return; hardintr_events[irq] = event; mips_unmask_hard_irq((void*)(uintptr_t)irq); } intr_event_add_handler(event, name, filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(mips_intr_counters[NSOFT_IRQS + irq], event->ie_fullname); } void cpu_establish_softintr(const char *name, driver_filter_t *filt, void (*handler)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; int error; #if 0 printf("Establish SOFT IRQ %d: filt %p handler %p arg %p\n", irq, filt, handler, arg); #endif if (irq < 0 || irq > NSOFT_IRQS) panic("%s called for unknown hard intr %d", __func__, irq); event = softintr_events[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)(uintptr_t)irq, 0, irq, mips_mask_soft_irq, mips_unmask_soft_irq, NULL, NULL, "sint%d:", irq); if (error) return; softintr_events[irq] = event; mips_unmask_soft_irq((void*)(uintptr_t)irq); } intr_event_add_handler(event, name, filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(mips_intr_counters[irq], event->ie_fullname); } void cpu_intr(struct trapframe *tf) { struct intr_event *event; register_t cause, status; int hard, i, intr; critical_enter(); cause = mips_rd_cause(); status = mips_rd_status(); intr = (cause & MIPS_INT_MASK) >> 8; /* * Do not handle masked interrupts. They were masked by * pre_ithread function (mips_mask_XXX_intr) and will be * unmasked once ithread is through with handler */ intr &= (status & MIPS_INT_MASK) >> 8; while ((i = fls(intr)) != 0) { intr &= ~(1 << (i - 1)); switch (i) { case 1: case 2: /* Software interrupt. */ i--; /* Get a 0-offset interrupt. */ hard = 0; event = softintr_events[i]; mips_intrcnt_inc(mips_intr_counters[i]); break; default: /* Hardware interrupt. */ i -= 2; /* Trim software interrupt bits. */ i--; /* Get a 0-offset interrupt. */ hard = 1; event = hardintr_events[i]; mips_intrcnt_inc(mips_intr_counters[NSOFT_IRQS + i]); break; } - if (!event || TAILQ_EMPTY(&event->ie_handlers)) { + if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) { printf("stray %s interrupt %d\n", hard ? "hard" : "soft", i); continue; } if (intr_event_handle(event, tf) != 0) { printf("stray %s interrupt %d\n", hard ? "hard" : "soft", i); } } KASSERT(i == 0, ("all interrupts handled")); critical_exit(); #ifdef HWPMC_HOOKS if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); #endif } diff --git a/sys/sparc64/sparc64/intr_machdep.c b/sys/sparc64/sparc64/intr_machdep.c index a3efdc3b6bd0..45196331af37 100644 --- a/sys/sparc64/sparc64/intr_machdep.c +++ b/sys/sparc64/sparc64/intr_machdep.c @@ -1,574 +1,574 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; static uint16_t pil_stray_count[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static uint16_t intr_stray_count[IV_MAX]; static const char *const pil_names[] = { "stray", "low", /* PIL_LOW */ "preempt", /* PIL_PREEMPT */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "hardclock", /* PIL_HARDCLOCK */ "stray", "stray", "stray", "stray", "filter", /* PIL_FILTER */ "bridge", /* PIL_BRIDGE */ "stop", /* PIL_STOP */ "tick", /* PIL_TICK */ }; /* protect the intr_vectors table */ static struct sx intr_table_lock; /* protect intrcnt_index */ static struct mtx intrcnt_lock; #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intr_vector *iv); static void intr_shuffle_irqs(void *arg __unused); #endif static int intr_assign_cpu(void *arg, int cpu); static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); void counter_intr_inc(void); static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; mtx_lock_spin(&intrcnt_lock); if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; mtx_unlock_spin(&intrcnt_lock); } static int intrcnt_setname(const char *name, int index) { if ((MAXCOMLEN + 1) * index >= sintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; register_t s; s = intr_disable(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } intr_handlers[pri] = ihf; intr_restore(s); snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); } static void intr_stray_level(struct trapframe *tf) { uint64_t level; level = tf->tf_level; if (pil_stray_count[level] < MAX_STRAY_LOG) { printf("stray level interrupt %ld\n", level); pil_stray_count[level]++; if (pil_stray_count[level] >= MAX_STRAY_LOG) printf("got %d stray level interrupt %ld's: not " "logging anymore\n", MAX_STRAY_LOG, level); } } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; u_int vec; iv = cookie; vec = iv->iv_vec; if (intr_stray_count[vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", vec); intr_stray_count[vec]++; if (intr_stray_count[vec] >= MAX_STRAY_LOG) printf("got %d stray vector interrupt %d's: not " "logging anymore\n", MAX_STRAY_LOG, vec); } } void intr_init1() { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; intr_vectors[i].iv_refcnt = 0; } intr_handlers[PIL_LOW] = intr_fast; } void intr_init2() { sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } static int intr_assign_cpu(void *arg, int cpu) { #ifdef SMP struct pcpu *pc; struct intr_vector *iv; /* * Don't do anything during early boot. We will pick up the * assignment once the APs are started. */ if (assign_cpu && cpu != NOCPU) { pc = pcpu_find(cpu); if (pc == NULL) return (EINVAL); iv = arg; sx_xlock(&intr_table_lock); iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); sx_xunlock(&intr_table_lock); } return (0); #else return (EOPNOTSUPP); #endif } static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; iv = cookie; if (__predict_false(intr_event_handle(iv->iv_event, NULL) != 0)) intr_stray_vector(iv); } int intr_controller_register(int vec, const struct intr_controller *ic, void *icarg) { struct intr_event *ie; struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; ie = iv->iv_event; sx_xunlock(&intr_table_lock); if (ie != NULL) return (EEXIST); error = intr_event_create(&ie, iv, 0, vec, NULL, ic->ic_clear, ic->ic_clear, intr_assign_cpu, "vec%d:", vec); if (error != 0) return (error); sx_xlock(&intr_table_lock); if (iv->iv_event != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(ie); return (EEXIST); } iv->iv_ic = ic; iv->iv_icarg = icarg; iv->iv_event = ie; iv->iv_mid = PCPU_GET(mid); sx_xunlock(&intr_table_lock); return (0); } int inthand_add(const char *name, int vec, driver_filter_t *filt, driver_intr_t *handler, void *arg, int flags, void **cookiep) { const struct intr_controller *ic; struct intr_event *ie; struct intr_handler *ih; struct intr_vector *iv; int error, filter; if (vec < 0 || vec >= IV_MAX) return (EINVAL); /* * INTR_BRIDGE filters/handlers are special purpose only, allowing * them to be shared just would complicate things unnecessarily. */ if ((flags & INTR_BRIDGE) != 0 && (flags & INTR_EXCL) == 0) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; ic = iv->iv_ic; ie = iv->iv_event; sx_xunlock(&intr_table_lock); if (ic == NULL || ie == NULL) return (EINVAL); error = intr_event_add_handler(ie, name, filt, handler, arg, intr_priority(flags), flags, cookiep); if (error != 0) return (error); sx_xlock(&intr_table_lock); /* Disable the interrupt while we fiddle with it. */ ic->ic_disable(iv); iv->iv_refcnt++; if (iv->iv_refcnt == 1) intr_setup((flags & INTR_BRIDGE) != 0 ? PIL_BRIDGE : filt != NULL ? PIL_FILTER : PIL_ITHREAD, intr_fast, vec, intr_execute_handlers, iv); else if (filt != NULL) { /* * Check if we need to upgrade from PIL_ITHREAD to PIL_FILTER. * Given that apart from the on-board SCCs and UARTs shared * interrupts are rather uncommon on sparc64 this should be * pretty rare in practice. */ filter = 0; - TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { + CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter != NULL && ih->ih_filter != filt) { filter = 1; break; } } if (filter == 0) intr_setup(PIL_FILTER, intr_fast, vec, intr_execute_handlers, iv); } intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); #ifdef SMP if (assign_cpu) intr_assign_next_cpu(iv); #endif ic->ic_enable(iv); /* Ensure the interrupt is cleared, it might have triggered before. */ if (ic->ic_clear != NULL) ic->ic_clear(iv); sx_xunlock(&intr_table_lock); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? */ sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; iv->iv_refcnt--; if (iv->iv_refcnt == 0) { /* * Don't disable the interrupt for now, so that * stray interrupts get detected... */ intr_setup(PIL_LOW, intr_fast, vec, intr_stray_vector, iv); } sx_xunlock(&intr_table_lock); } return (error); } /* Add a description to an active interrupt handler. */ int intr_describe(int vec, void *ih, const char *descr) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; if (iv == NULL) { sx_xunlock(&intr_table_lock); return (EINVAL); } error = intr_event_describe_handler(iv->iv_event, ih, descr); if (error) { sx_xunlock(&intr_table_lock); return (error); } intrcnt_updatename(vec, iv->iv_event->ie_fullname, 0); sx_xunlock(&intr_table_lock); return (error); } /* * Do VM_CNT_INC(intr), being in the interrupt context already. This is * called from assembly. * To avoid counter_enter() and appropriate assertion, unwrap VM_CNT_INC() * and hardcode the actual increment. */ void counter_intr_inc(void) { *(uint64_t *)zpcpu_get(vm_cnt.v_intr) += 1; } #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ static cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1); static int current_cpu; static void intr_assign_next_cpu(struct intr_vector *iv) { struct pcpu *pc; sx_assert(&intr_table_lock, SA_XLOCKED); /* * Assign this source to a CPU in a round-robin fashion. */ pc = pcpu_find(current_cpu); if (pc == NULL) return; iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!CPU_ISSET(current_cpu, &intr_cpus)); } /* Attempt to bind the specified IRQ to the specified CPU. */ int intr_bind(int vec, u_char cpu) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; if (iv == NULL) { sx_xunlock(&intr_table_lock); return (EINVAL); } error = intr_event_bind(iv->iv_event, cpu); sx_xunlock(&intr_table_lock); return (error); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding CPU %d as a target\n", cpu); CPU_SET(cpu, &intr_cpus); } /* * Distribute all the interrupt sources among the available CPUs once the * APs have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct pcpu *pc; struct intr_vector *iv; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < IV_MAX; i++) { iv = &intr_vectors[i]; if (iv != NULL && iv->iv_refcnt > 0) { /* * If this event is already bound to a CPU, * then assign the source to that CPU instead * of picking one via round-robin. */ if (iv->iv_event->ie_cpu != NOCPU && (pc = pcpu_find(iv->iv_event->ie_cpu)) != NULL) { iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); } else intr_assign_next_cpu(iv); } } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL); #endif