Index: stable/11/sys/arm/arm/gic.c =================================================================== --- stable/11/sys/arm/arm/gic.c (revision 308381) +++ stable/11/sys/arm/arm/gic.c (revision 308382) @@ -1,1806 +1,1806 @@ /*- * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include #endif #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include "pic_if.h" #include "msi_if.h" #endif #define GIC_DEBUG_SPURIOUS /* We are using GICv2 register naming */ /* Distributor Registers */ #define GICD_CTLR 0x000 /* v1 ICDDCR */ #define GICD_TYPER 0x004 /* v1 ICDICTR */ #define GICD_IIDR 0x008 /* v1 ICDIIDR */ #define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */ #define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */ #define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */ #define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */ #define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */ #define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */ #define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */ #define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */ #define GICD_SGI_TARGET_SHIFT 16 /* CPU Registers */ #define GICC_CTLR 0x0000 /* v1 ICCICR */ #define GICC_PMR 0x0004 /* v1 ICCPMR */ #define GICC_BPR 0x0008 /* v1 ICCBPR */ #define GICC_IAR 0x000C /* v1 ICCIAR */ #define GICC_EOIR 0x0010 /* v1 ICCEOIR */ #define GICC_RPR 0x0014 /* v1 ICCRPR */ #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */ #define GICC_ABPR 0x001C /* v1 ICCABPR */ #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/ #define GIC_FIRST_SGI 0 /* Irqs 0-15 are SGIs/IPIs. */ #define GIC_LAST_SGI 15 #define GIC_FIRST_PPI 16 /* Irqs 16-31 are private (per */ #define GIC_LAST_PPI 31 /* core) peripheral interrupts. */ #define GIC_FIRST_SPI 32 /* Irqs 32+ are shared peripherals. */ /* TYPER Registers */ #define GICD_TYPER_SECURITYEXT 0x400 #define GIC_SUPPORT_SECEXT(_sc) \ ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT) /* First bit is a polarity bit (0 - low, 1 - high) */ #define GICD_ICFGR_POL_LOW (0 << 0) #define GICD_ICFGR_POL_HIGH (1 << 0) #define GICD_ICFGR_POL_MASK 0x1 /* Second bit is a trigger bit (0 - level, 1 - edge) */ #define GICD_ICFGR_TRIG_LVL (0 << 1) #define GICD_ICFGR_TRIG_EDGE (1 << 1) #define GICD_ICFGR_TRIG_MASK 0x2 #ifndef GIC_DEFAULT_ICFGR_INIT #define GIC_DEFAULT_ICFGR_INIT 0x00000000 #endif #ifdef INTRNG struct gic_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; #define GI_FLAG_EARLY_EOI (1 << 0) #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */ /* be used for MSI/MSI-X interrupts */ #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */ /* for a MSI/MSI-X interrupt */ u_int gi_flags; }; static u_int gic_irq_cpu; static int arm_gic_intr(void *); static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc); #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif #endif #ifdef INTRNG struct arm_gic_range { uint64_t bus; uint64_t host; uint64_t size; }; struct arm_gic_devinfo { struct ofw_bus_devinfo obdinfo; struct resource_list rl; }; #endif struct arm_gic_softc { device_t gic_dev; #ifdef INTRNG void * gic_intrhand; struct gic_irqsrc * gic_irqs; #endif struct resource * gic_res[3]; bus_space_tag_t gic_c_bst; bus_space_tag_t gic_d_bst; bus_space_handle_t gic_c_bsh; bus_space_handle_t gic_d_bsh; uint8_t ver; struct mtx mutex; uint32_t nirqs; uint32_t typer; #ifdef GIC_DEBUG_SPURIOUS uint32_t last_irq[MAXCPU]; #endif #ifdef INTRNG /* FDT child data */ pcell_t addr_cells; pcell_t size_cells; int nranges; struct arm_gic_range * ranges; #endif }; #ifdef INTRNG #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc) #endif static struct resource_spec arm_gic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */ #ifdef INTRNG { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */ #endif { -1, 0 } }; static u_int arm_gic_map[MAXCPU]; static struct arm_gic_softc *gic_sc = NULL; #define gic_c_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg)) #define gic_c_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val)) #define gic_d_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg)) #define gic_d_write_1(_sc, _reg, _val) \ bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val)) #define gic_d_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val)) #ifndef INTRNG static int gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol); static void gic_post_filter(void *); #endif static struct ofw_compat_data compat_data[] = { {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */ {"arm,gic-400", true}, {"arm,cortex-a15-gic", true}, {"arm,cortex-a9-gic", true}, {"arm,cortex-a7-gic", true}, {"arm,arm11mp-gic", true}, {"brcm,brahma-b15-gic", true}, {"qcom,msm-qgic2", true}, {NULL, false} }; static int arm_gic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ARM Generic Interrupt Controller"); return (BUS_PROBE_DEFAULT); } #ifdef INTRNG static inline void gic_irq_unmask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F))); } static inline void gic_irq_mask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F))); } #endif static uint8_t gic_cpu_mask(struct arm_gic_softc *sc) { uint32_t mask; int i; /* Read the current cpuid mask by reading ITARGETSR{0..7} */ for (i = 0; i < 8; i++) { mask = gic_d_read_4(sc, GICD_ITARGETSR(i)); if (mask != 0) break; } /* No mask found, assume we are on CPU interface 0 */ if (mask == 0) return (1); /* Collect the mask in the lower byte */ mask |= mask >> 16; mask |= mask >> 8; return (mask); } #ifdef SMP #ifdef INTRNG static void arm_gic_init_secondary(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq, cpu; /* Set the mask so we can find this CPU to send it IPIs */ cpu = PCPU_GET(cpuid); arm_gic_map[cpu] = gic_cpu_mask(sc); for (irq = 0; irq < sc->nirqs; irq += 4) gic_d_write_4(sc, GICD_IPRIORITYR(irq >> 2), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) { gic_d_write_4(sc, GICD_IGROUPR(irq >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu)) gic_irq_unmask(sc, irq); /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu)) gic_irq_unmask(sc, irq); } #else static void arm_gic_init_secondary(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); int i; /* Set the mask so we can find this CPU to send it IPIs */ arm_gic_map[PCPU_GET(cpuid)] = gic_cpu_mask(sc); for (i = 0; i < sc->nirqs; i += 4) gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* * Activate the timer interrupts: virtual, secure, and non-secure. */ gic_d_write_4(sc, GICD_ISENABLER(27 >> 5), (1UL << (27 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F))); } #endif /* INTRNG */ #endif /* SMP */ #ifndef INTRNG int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt, int *trig, int *pol) { static u_int num_intr_cells; static phandle_t self; struct ofw_compat_data *ocd; if (self == 0) { for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) { if (fdt_is_compatible(iparent, ocd->ocd_str)) { self = iparent; break; } } } if (self != iparent) return (ENXIO); if (num_intr_cells == 0) { if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &num_intr_cells, sizeof(num_intr_cells)) == -1) { num_intr_cells = 1; } } if (num_intr_cells == 1) { *interrupt = fdt32_to_cpu(intr[0]); *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; } else { if (fdt32_to_cpu(intr[0]) == 0) *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI; else *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI; /* * In intr[2], bits[3:0] are trigger type and level flags. * 1 = low-to-high edge triggered * 2 = high-to-low edge triggered * 4 = active high level-sensitive * 8 = active low level-sensitive * The hardware only supports active-high-level or rising-edge * for SPIs */ if (*interrupt >= GIC_FIRST_SPI && fdt32_to_cpu(intr[2]) & 0x0a) { printf("unsupported trigger/polarity configuration " "0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f); } *pol = INTR_POLARITY_CONFORM; if (fdt32_to_cpu(intr[2]) & 0x03) *trig = INTR_TRIGGER_EDGE; else *trig = INTR_TRIGGER_LEVEL; } return (0); } #endif #ifdef INTRNG static inline intptr_t gic_xref(device_t dev) { #ifdef FDT return (OF_xref_from_node(ofw_bus_get_node(dev))); #else return (0); #endif } static int arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num) { int error; uint32_t irq; struct gic_irqsrc *irqs; struct intr_irqsrc *isrc; const char *name; irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF, M_WAITOK | M_ZERO); name = device_get_nameunit(sc->gic_dev); for (irq = 0; irq < num; irq++) { irqs[irq].gi_irq = irq; irqs[irq].gi_pol = INTR_POLARITY_CONFORM; irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { error = intr_isrc_register(isrc, sc->gic_dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { error = intr_isrc_register(isrc, sc->gic_dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { error = intr_isrc_register(isrc, sc->gic_dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (error != 0) { /* XXX call intr_isrc_deregister() */ free(irqs, M_DEVBUF); return (error); } } sc->gic_irqs = irqs; sc->nirqs = num; return (0); } static int arm_gic_fill_ranges(phandle_t node, struct arm_gic_softc *sc) { pcell_t host_cells; cell_t *base_ranges; ssize_t nbase_ranges; int i, j, k; host_cells = 1; OF_getencprop(OF_parent(node), "#address-cells", &host_cells, sizeof(host_cells)); sc->addr_cells = 2; OF_getencprop(node, "#address-cells", &sc->addr_cells, sizeof(sc->addr_cells)); sc->size_cells = 2; OF_getencprop(node, "#size-cells", &sc->size_cells, sizeof(sc->size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->addr_cells + host_cells + sc->size_cells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->addr_cells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->size_cells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } static bool arm_gic_add_children(device_t dev) { struct arm_gic_softc *sc; struct arm_gic_devinfo *dinfo; phandle_t child, node; device_t cdev; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); /* If we have no children don't probe for them */ child = OF_child(node); if (child == 0) return (false); if (arm_gic_fill_ranges(node, sc) < 0) { device_printf(dev, "Have a child, but no ranges\n"); return (false); } for (; child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } resource_list_init(&dinfo->rl); ofw_bus_reg_to_rl(dev, child, sc->addr_cells, sc->size_cells, &dinfo->rl); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obdinfo.obd_name); resource_list_free(&dinfo->rl); ofw_bus_gen_destroy_devinfo(&dinfo->obdinfo); free(dinfo, M_DEVBUF); continue; } device_set_ivars(cdev, dinfo); } return (true); } static void arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count) { struct arm_gic_softc *sc; int i; sc = device_get_softc(dev); KASSERT((start + count) < sc->nirqs, ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__, start, count, sc->nirqs)); for (i = 0; i < count; i++) { KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0, ("%s: MSI interrupt %d already has a handler", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM, ("%s: MSI interrupt %d already has a polarity", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM, ("%s: MSI interrupt %d already has a trigger", __func__, count + i)); sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH; sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE; sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI; } } #endif static int arm_gic_attach(device_t dev) { struct arm_gic_softc *sc; int i; uint32_t icciidr, mask, nirqs; #ifdef INTRNG phandle_t pxref; intptr_t xref = gic_xref(dev); #endif if (gic_sc) return (ENXIO); sc = device_get_softc(dev); if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->gic_dev = dev; gic_sc = sc; /* Initialize mutex */ mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN); /* Distributor Interface */ sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]); sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]); /* CPU Interface */ sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]); sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]); /* Disable interrupt forwarding to the CPU interface */ gic_d_write_4(sc, GICD_CTLR, 0x00); /* Get the number of interrupts */ sc->typer = gic_d_read_4(sc, GICD_TYPER); nirqs = 32 * ((sc->typer & 0x1f) + 1); #ifdef INTRNG if (arm_gic_register_isrcs(sc, nirqs)) { device_printf(dev, "could not register irqs\n"); goto cleanup; } #else sc->nirqs = nirqs; /* Set up function pointers */ arm_post_filter = gic_post_filter; arm_config_irq = gic_config_irq; #endif icciidr = gic_c_read_4(sc, GICC_IIDR); device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n", icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf, (icciidr & 0xfff), sc->nirqs); /* Set all global interrupts to be level triggered, active low. */ for (i = 32; i < sc->nirqs; i += 16) { gic_d_write_4(sc, GICD_ICFGR(i >> 4), GIC_DEFAULT_ICFGR_INIT); } /* Disable all interrupts. */ for (i = 32; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_ICENABLER(i >> 5), 0xFFFFFFFF); } /* Find the current cpu mask */ mask = gic_cpu_mask(sc); /* Set the mask so we can find this CPU to send it IPIs */ arm_gic_map[PCPU_GET(cpuid)] = mask; /* Set all four targets to this cpu */ mask |= mask << 8; mask |= mask << 16; for (i = 0; i < sc->nirqs; i += 4) { gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); if (i > 32) { gic_d_write_4(sc, GICD_ITARGETSR(i >> 2), mask); } } /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); #ifndef INTRNG return (0); #else /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); goto cleanup; } /* * Controller is root if: * - doesn't have interrupt parent * - his interrupt parent is this controller */ pxref = ofw_bus_find_iparent(ofw_bus_get_node(dev)); if (pxref == 0 || xref == pxref) { if (intr_pic_claim_root(dev, xref, arm_gic_intr, sc, GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) { device_printf(dev, "could not set PIC as a root\n"); intr_pic_deregister(dev, xref); goto cleanup; } } else { if (sc->gic_res[2] == NULL) { device_printf(dev, "not root PIC must have defined interrupt\n"); intr_pic_deregister(dev, xref); goto cleanup; } if (bus_setup_intr(dev, sc->gic_res[2], INTR_TYPE_CLK, arm_gic_intr, NULL, sc, &sc->gic_intrhand)) { device_printf(dev, "could not setup irq handler\n"); intr_pic_deregister(dev, xref); goto cleanup; } } OF_device_register_xref(xref, dev); /* If we have children probe and attach them */ if (arm_gic_add_children(dev)) { bus_generic_probe(dev); return (bus_generic_attach(dev)); } return (0); cleanup: /* * XXX - not implemented arm_gic_detach() should be called ! */ if (sc->gic_irqs != NULL) free(sc->gic_irqs, M_DEVBUF); bus_release_resources(dev, arm_gic_spec, sc->gic_res); return(ENXIO); #endif } #ifdef INTRNG static struct resource * arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct arm_gic_softc *sc; struct arm_gic_devinfo *di; struct resource_list_entry *rle; int j; KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type)); sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end); return (NULL); } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static const struct ofw_bus_devinfo * arm_gic_ofw_get_devinfo(device_t bus __unused, device_t child) { struct arm_gic_devinfo *di; di = device_get_ivars(child); return (&di->obdinfo); } static int arm_gic_intr(void *arg) { struct arm_gic_softc *sc = arg; struct gic_irqsrc *gi; uint32_t irq_active_reg, irq; struct trapframe *tf; irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; /* * 1. We do EOI here because recent read value from active interrupt * register must be used for it. Another approach is to save this * value into associated interrupt source. * 2. EOI must be done on same CPU where interrupt has fired. Thus * we must ensure that interrupted thread does not migrate to * another CPU. * 3. EOI cannot be delayed by any preemption which could happen on * critical_exit() used in MI intr code, when interrupt thread is * scheduled. See next point. * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during * an action and any use of critical_exit() could break this * assumption. See comments within smp_rendezvous_action(). * 5. We always return FILTER_HANDLED as this is an interrupt * controller dispatch function. Otherwise, in cascaded interrupt * case, the whole interrupt subtree would be masked. */ if (irq >= sc->nirqs) { #ifdef GIC_DEBUG_SPURIOUS device_printf(sc->gic_dev, "Spurious interrupt detected: last irq: %d on CPU%d\n", sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid)); #endif return (FILTER_HANDLED); } tf = curthread->td_intr_frame; dispatch_irq: gi = sc->gic_irqs + irq; /* * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement * as compiler complains that comparing u_int >= 0 is always true. */ if (irq <= GIC_LAST_SGI) { #ifdef SMP /* Call EOI for all IPI before dispatch. */ gic_c_write_4(sc, GICC_EOIR, irq_active_reg); intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf); goto next_irq; #else device_printf(sc->gic_dev, "SGI %u on UP system detected\n", irq - GIC_FIRST_SGI); gic_c_write_4(sc, GICC_EOIR, irq_active_reg); goto next_irq; #endif } #ifdef GIC_DEBUG_SPURIOUS sc->last_irq[PCPU_GET(cpuid)] = irq; #endif if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI) gic_c_write_4(sc, GICC_EOIR, irq_active_reg); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { gic_irq_mask(sc, irq); if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI) gic_c_write_4(sc, GICC_EOIR, irq_active_reg); device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq); } next_irq: arm_irq_memory_barrier(irq); irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; if (irq < sc->nirqs) goto dispatch_irq; return (FILTER_HANDLED); } static void gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig, enum intr_polarity pol) { uint32_t reg; uint32_t mask; if (irq < GIC_FIRST_SPI) return; mtx_lock_spin(&sc->mutex); reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4)); mask = (reg >> 2*(irq % 16)) & 0x3; if (pol == INTR_POLARITY_LOW) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_LOW; } else if (pol == INTR_POLARITY_HIGH) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_HIGH; } if (trig == INTR_TRIGGER_LEVEL) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_LVL; } else if (trig == INTR_TRIGGER_EDGE) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_EDGE; } /* Set mask */ reg = reg & ~(0x3 << 2*(irq % 16)); reg = reg | (mask << 2*(irq % 16)); gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg); mtx_unlock_spin(&sc->mutex); } static int gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus) { uint32_t cpu, end, mask; end = min(mp_ncpus, 8); for (cpu = end; cpu < MAXCPU; cpu++) if (CPU_ISSET(cpu, cpus)) return (EINVAL); for (mask = 0, cpu = 0; cpu < end; cpu++) if (CPU_ISSET(cpu, cpus)) mask |= arm_gic_map[cpu]; gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask); return (0); } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { if (ncells == 1) { *irqp = cells[0]; *polp = INTR_POLARITY_CONFORM; *trigp = INTR_TRIGGER_CONFORM; return (0); } if (ncells == 3) { u_int irq, tripol; /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = low-to-high edge triggered * 2 = high-to-low edge triggered * 4 = active high level-sensitive * 8 = active low level-sensitive * bits[15:8] PPI interrupt cpu mask * Each bit corresponds to each of the 8 possible cpus * attached to the GIC. A bit set to '1' indicated * the interrupt is wired to that CPU. */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } tripol = cells[2] & 0xff; if (tripol & 0xf0 || (tripol & 0x0a && cells[0] == 0)) device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", tripol); *irqp = irq; *polp = INTR_POLARITY_CONFORM; *trigp = tripol & 0x03 ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL; return (0); } return (EINVAL); } #endif static int gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; enum intr_polarity pol; enum intr_trigger trig; struct arm_gic_softc *sc; #ifdef FDT struct intr_map_data_fdt *daf; #endif sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); KASSERT(irq >= sc->nirqs || (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0, ("%s: Attempting to map a MSI interrupt from FDT", __func__)); break; #endif default: return (ENOTSUP); } if (irq >= sc->nirqs) return (EINVAL); if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW && pol != INTR_POLARITY_HIGH) return (EINVAL); if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE && trig != INTR_TRIGGER_LEVEL) return (EINVAL); *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int arm_gic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { int error; u_int irq; struct arm_gic_softc *sc; error = gic_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } static int arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; enum intr_trigger trig; enum intr_polarity pol; if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) { pol = gi->gi_pol; trig = gi->gi_trig; KASSERT(pol == INTR_POLARITY_HIGH, ("%s: MSI interrupts must be active-high", __func__)); KASSERT(trig == INTR_TRIGGER_EDGE, ("%s: MSI interrupts must be edge triggered", __func__)); } else if (data != NULL) { u_int irq; /* Get config for resource. */ if (gic_map_intr(dev, data, &irq, &pol, &trig) || gi->gi_irq != irq) return (EINVAL); } else { pol = INTR_POLARITY_CONFORM; trig = INTR_TRIGGER_CONFORM; } /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) || (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig)) return (EINVAL); else return (0); } /* For MSI/MSI-X we should have already configured these */ if ((gi->gi_flags & GI_FLAG_MSI) == 0) { if (pol == INTR_POLARITY_CONFORM) pol = INTR_POLARITY_LOW; /* just pick some */ if (trig == INTR_TRIGGER_CONFORM) trig = INTR_TRIGGER_EDGE; /* just pick some */ gi->gi_pol = pol; gi->gi_trig = trig; /* Edge triggered interrupts need an early EOI sent */ if (gi->gi_pol == INTR_TRIGGER_EDGE) gi->gi_flags |= GI_FLAG_EARLY_EOI; } /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol); arm_gic_bind_intr(dev, isrc); return (0); } static int arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; arm_irq_memory_barrier(gi->gi_irq); gic_irq_unmask(sc, gi->gi_irq); } static void arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; gic_irq_mask(sc, gi->gi_irq); } static void arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; arm_gic_disable_intr(dev, isrc); gic_c_write_4(sc, GICC_EOIR, gi->gi_irq); } static void arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { arm_irq_memory_barrier(0); arm_gic_enable_intr(dev, isrc); } static void arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; /* EOI for edge-triggered done earlier. */ if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI) return; arm_irq_memory_barrier(0); gic_c_write_4(sc, GICC_EOIR, gi->gi_irq); } static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; if (gi->gi_irq < GIC_FIRST_SPI) return (EINVAL); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); } return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu)); } #ifdef SMP static void arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; uint32_t val = 0, i; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT; gic_d_write_4(sc, GICD_SGIR(0), val | gi->gi_irq); } static int arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct arm_gic_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif #else static int arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq) { uint32_t active_irq; active_irq = gic_c_read_4(sc, GICC_IAR); /* * Immediately EOIR the SGIs, because doing so requires the other * bits (ie CPU number), not just the IRQ number, and we do not * have this information later. */ if ((active_irq & 0x3ff) <= GIC_LAST_SGI) gic_c_write_4(sc, GICC_EOIR, active_irq); active_irq &= 0x3FF; if (active_irq == 0x3FF) { if (last_irq == -1) device_printf(sc->gic_dev, "Spurious interrupt detected\n"); return -1; } return active_irq; } static int arm_gic_config(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t reg; uint32_t mask; /* Function is public-accessible, so validate input arguments */ if ((irq < 0) || (irq >= sc->nirqs)) goto invalid_args; if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) && (trig != INTR_TRIGGER_CONFORM)) goto invalid_args; if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) && (pol != INTR_POLARITY_CONFORM)) goto invalid_args; mtx_lock_spin(&sc->mutex); reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4)); mask = (reg >> 2*(irq % 16)) & 0x3; if (pol == INTR_POLARITY_LOW) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_LOW; } else if (pol == INTR_POLARITY_HIGH) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_HIGH; } if (trig == INTR_TRIGGER_LEVEL) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_LVL; } else if (trig == INTR_TRIGGER_EDGE) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_EDGE; } /* Set mask */ reg = reg & ~(0x3 << 2*(irq % 16)); reg = reg | (mask << 2*(irq % 16)); gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg); mtx_unlock_spin(&sc->mutex); return (0); invalid_args: device_printf(dev, "gic_config_irg, invalid parameters\n"); return (EINVAL); } static void arm_gic_mask(device_t dev, int irq) { struct arm_gic_softc *sc = device_get_softc(dev); gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F))); gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */ } static void arm_gic_unmask(device_t dev, int irq) { struct arm_gic_softc *sc = device_get_softc(dev); if (irq > GIC_LAST_SGI) arm_irq_memory_barrier(irq); gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F))); } #ifdef SMP static void arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t val = 0, i; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT; gic_d_write_4(sc, GICD_SGIR(0), val | ipi); } static int arm_gic_ipi_read(device_t dev, int i) { if (i != -1) { /* * The intr code will automagically give the frame pointer * if the interrupt argument is 0. */ if ((unsigned int)i > 16) return (0); return (i); } return (0x3ff); } static void arm_gic_ipi_clear(device_t dev, int ipi) { /* no-op */ } #endif static void gic_post_filter(void *arg) { struct arm_gic_softc *sc = gic_sc; uintptr_t irq = (uintptr_t) arg; if (irq > GIC_LAST_SGI) arm_irq_memory_barrier(irq); gic_c_write_4(sc, GICC_EOIR, irq); } static int gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol) { return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol)); } void arm_mask_irq(uintptr_t nb) { arm_gic_mask(gic_sc->gic_dev, nb); } void arm_unmask_irq(uintptr_t nb) { arm_gic_unmask(gic_sc->gic_dev, nb); } int arm_get_next_irq(int last_irq) { return (arm_gic_next_irq(gic_sc, last_irq)); } #ifdef SMP void intr_pic_init_secondary(void) { arm_gic_init_secondary(gic_sc->gic_dev); } void pic_ipi_send(cpuset_t cpus, u_int ipi) { arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi); } int pic_ipi_read(int i) { return (arm_gic_ipi_read(gic_sc->gic_dev, i)); } void pic_ipi_clear(int ipi) { arm_gic_ipi_clear(gic_sc->gic_dev, ipi); } #endif #endif /* INTRNG */ static device_method_t arm_gic_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arm_gic_probe), DEVMETHOD(device_attach, arm_gic_attach), #ifdef INTRNG /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, arm_gic_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, arm_gic_disable_intr), DEVMETHOD(pic_enable_intr, arm_gic_enable_intr), DEVMETHOD(pic_map_intr, arm_gic_map_intr), DEVMETHOD(pic_setup_intr, arm_gic_setup_intr), DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr), DEVMETHOD(pic_post_filter, arm_gic_post_filter), DEVMETHOD(pic_post_ithread, arm_gic_post_ithread), DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, arm_gic_bind_intr), DEVMETHOD(pic_init_secondary, arm_gic_init_secondary), DEVMETHOD(pic_ipi_send, arm_gic_ipi_send), DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup), #endif #endif { 0, 0 } }; static driver_t arm_gic_driver = { "gic", arm_gic_methods, sizeof(struct arm_gic_softc), }; static devclass_t arm_gic_devclass; EARLY_DRIVER_MODULE(gic, simplebus, arm_gic_driver, arm_gic_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(gic, ofwbus, arm_gic_driver, arm_gic_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); #ifdef INTRNG /* * GICv2m support -- the GICv2 MSI/MSI-X controller. */ #define GICV2M_MSI_TYPER 0x008 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff) #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff) #define GICv2M_MSI_SETSPI_NS 0x040 #define GICV2M_MSI_IIDR 0xFCC struct arm_gicv2m_softc { struct resource *sc_mem; struct mtx sc_mutex; u_int sc_spi_start; u_int sc_spi_end; u_int sc_spi_count; }; static struct ofw_compat_data gicv2m_compat_data[] = { {"arm,gic-v2m-frame", true}, {NULL, false} }; static int arm_gicv2m_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, gicv2m_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ARM Generic Interrupt Controller MSI/MSIX"); return (BUS_PROBE_DEFAULT); } static int arm_gicv2m_attach(device_t dev) { struct arm_gicv2m_softc *sc; struct arm_gic_softc *psc; uint32_t typer; int rid; psc = device_get_softc(device_get_parent(dev)); sc = device_get_softc(dev); rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "Unable to allocate resources\n"); return (ENXIO); } typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER); sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer); sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer); sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count; /* Reserve these interrupts for MSI/MSI-X use */ arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start, sc->sc_spi_count); mtx_init(&sc->sc_mutex, "GICv2m lock", "", MTX_DEF); intr_msi_register(dev, gic_xref(dev)); if (bootverbose) device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start, sc->sc_spi_start + sc->sc_spi_count - 1); return (0); } static int arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **srcs) { struct arm_gic_softc *psc; struct arm_gicv2m_softc *sc; int i, irq, end_irq; bool found; KASSERT(powerof2(count), ("%s: bad count", __func__)); KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__)); psc = device_get_softc(device_get_parent(dev)); sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); found = false; for (irq = sc->sc_spi_start; irq < sc->sc_spi_end && !found; irq++) { /* Start on an aligned interrupt */ if ((irq & (maxcount - 1)) != 0) continue; /* Assume we found a valid range until shown otherwise */ found = true; /* Check this range is valid */ for (end_irq = irq; end_irq != irq + count - 1; end_irq++) { /* No free interrupts */ if (end_irq == sc->sc_spi_end) { found = false; break; } KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI)!= 0, ("%s: Non-MSI interrupt found", __func__)); /* This is already used */ if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED) { found = false; break; } } } /* Not enough interrupts were found */ if (!found || irq == sc->sc_spi_end) { mtx_unlock(&sc->sc_mutex); return (ENXIO); } for (i = 0; i < count; i++) { /* Mark the interrupt as used */ psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED; } mtx_unlock(&sc->sc_mutex); for (i = 0; i < count; i++) srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i]; *pic = device_get_parent(dev); return (0); } static int arm_gicv2m_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { struct arm_gicv2m_softc *sc; struct gic_irqsrc *gi; int i; sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); for (i = 0; i < count; i++) { - gi = (struct gic_irqsrc *)isrc; + gi = (struct gic_irqsrc *)isrc[i]; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); gi->gi_flags &= ~GI_FLAG_MSI_USED; - mtx_unlock(&sc->sc_mutex); } + mtx_unlock(&sc->sc_mutex); return (0); } static int arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic, struct intr_irqsrc **isrcp) { struct arm_gicv2m_softc *sc; struct arm_gic_softc *psc; int irq; psc = device_get_softc(device_get_parent(dev)); sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); /* Find an unused interrupt */ for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) { KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0, ("%s: Non-MSI interrupt found", __func__)); if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0) break; } /* No free interrupt was found */ if (irq == sc->sc_spi_end) { mtx_unlock(&sc->sc_mutex); return (ENXIO); } /* Mark the interrupt as used */ psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED; mtx_unlock(&sc->sc_mutex); *isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq]; *pic = device_get_parent(dev); return (0); } static int arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) { struct arm_gicv2m_softc *sc; struct gic_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gic_irqsrc *)isrc; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); mtx_lock(&sc->sc_mutex); gi->gi_flags &= ~GI_FLAG_MSI_USED; mtx_unlock(&sc->sc_mutex); return (0); } static int arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct arm_gicv2m_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS; *data = gi->gi_irq; return (0); } static device_method_t arm_gicv2m_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arm_gicv2m_probe), DEVMETHOD(device_attach, arm_gicv2m_attach), /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi), DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi), DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix), DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix), DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods, sizeof(struct arm_gicv2m_softc)); static devclass_t arm_gicv2m_devclass; EARLY_DRIVER_MODULE(gicv2m, gic, arm_gicv2m_driver, arm_gicv2m_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); #endif Index: stable/11/sys/arm/arm/nexus.c =================================================================== --- stable/11/sys/arm/arm/nexus.c (revision 308381) +++ stable/11/sys/arm/arm/nexus.c (revision 308382) @@ -1,444 +1,448 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static int nexus_probe(device_t); static int nexus_attach(device_t); static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t, u_int, const char *, int); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef INTRNG #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); #ifdef INTRNG static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); #endif static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), #ifdef INTRNG DEVMETHOD(bus_describe_intr, nexus_describe_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif #endif #ifdef FDT DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif { 0, 0 } }; static devclass_t nexus_devclass; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_probe mem_rman"); /* * First, deal with the children we know about already */ bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct rman *rm; int needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rm = &mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (0); } } return (rv); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { int error; if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } return (rman_release_resource(res)); } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { #ifdef FDT return(fdtbus_bs_tag); #else return((void *)1); #endif } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { int ret = ENODEV; #ifdef INTRNG device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); ret = EOPNOTSUPP; #else if (arm_config_irq) ret = (*arm_config_irq)(irq, trig, pol); #endif return (ret); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { #ifndef INTRNG int irq; #endif if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; #ifdef INTRNG return(intr_setup_irq(child, res, filt, intr, arg, flags, cookiep)); #else for (irq = rman_get_start(res); irq <= rman_get_end(res); irq++) { arm_setup_irqhandler(device_get_nameunit(child), filt, intr, arg, irq, flags, cookiep); arm_unmask_irq(irq); } return (0); #endif } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { #ifdef INTRNG return (intr_teardown_irq(child, r, ih)); #else return (arm_remove_irqhandler(rman_get_start(r), ih)); #endif } #ifdef INTRNG static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif #endif static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int err; bus_addr_t paddr; bus_size_t psize; bus_space_handle_t vaddr; if ((err = rman_activate_resource(r)) != 0) return (err); /* * If this is a memory resource, map it into the kernel. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = (bus_addr_t)rman_get_start(r); psize = (bus_size_t)rman_get_size(r); #ifdef FDT err = bus_space_map(fdtbus_bs_tag, paddr, psize, 0, &vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_bustag(r, fdtbus_bs_tag); #else vaddr = (bus_space_handle_t)pmap_mapdev((vm_offset_t)paddr, (vm_size_t)psize); if (vaddr == 0) { rman_deactivate_resource(r); return (ENOMEM); } rman_set_bustag(r, (void *)1); #endif rman_set_virtual(r, (void *)vaddr); rman_set_bushandle(r, vaddr); return (0); } else if (type == SYS_RES_IRQ) { #ifdef INTRNG - intr_activate_irq(child, r); + err = intr_activate_irq(child, r); + if (err != 0) { + rman_deactivate_resource(r); + return (err); + } #endif } return (0); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { #ifdef FDT bus_space_unmap(fdtbus_bs_tag, vaddr, psize); #else pmap_unmapdev((vm_offset_t)vaddr, (vm_size_t)psize); #endif rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } } else if (type == SYS_RES_IRQ) { #ifdef INTRNG intr_deactivate_irq(child, r); #endif } return (rman_deactivate_resource(r)); } #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { #ifndef INTRNG return (intr_fdt_map_irq(iparent, intr, icells)); #else u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); #endif /* INTRNG */ } #endif /* FDT */ Index: stable/11/sys/arm64/arm64/nexus.c =================================================================== --- stable/11/sys/arm64/arm64/nexus.c (revision 308381) +++ stable/11/sys/arm64/arm64/nexus.c (revision 308382) @@ -1,489 +1,493 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_acpi.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif #ifdef DEV_ACPI #include #include #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static int nexus_attach(device_t); #ifdef FDT static device_probe_t nexus_fdt_probe; static device_attach_t nexus_fdt_attach; #endif #ifdef DEV_ACPI static device_probe_t nexus_acpi_probe; static device_attach_t nexus_acpi_attach; #endif static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t, u_int, const char *, int); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); static struct resource_list *nexus_get_reslist(device_t, device_t); static int nexus_set_resource(device_t, device_t, int, int, rman_res_t, rman_res_t); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif static device_method_t nexus_methods[] = { /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif { 0, 0 } }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int needactivate = flags & RF_ACTIVE; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return(NULL); start = rle->start; end = rle->end; count = rle->count; } switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_MEMORY: case SYS_RES_IOPORT: rm = &mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_bushandle(rv, rman_get_start(rv)); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* TODO: This is wrong, it's needed for ACPI */ device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); return (EOPNOTSUPP); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error) return (error); error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return(&memmap_bus); } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int err; bus_addr_t paddr; bus_size_t psize; bus_space_handle_t vaddr; if ((err = rman_activate_resource(r)) != 0) return (err); /* * If this is a memory resource, map it into the kernel. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = (bus_addr_t)rman_get_start(r); psize = (bus_size_t)rman_get_size(r); err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_bustag(r, &memmap_bus); rman_set_virtual(r, (void *)vaddr); rman_set_bushandle(r, vaddr); } else if (type == SYS_RES_IRQ) { - intr_activate_irq(child, r); + err = intr_activate_irq(child, r); + if (err != 0) { + rman_deactivate_resource(r); + return (err); + } } return (0); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; /* XXX this should return a success/failure indicator */ resource_list_add(rl, type, rid, start, start + count - 1, count); return(0); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { bus_space_unmap(&memmap_bus, vaddr, psize); rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } } else if (type == SYS_RES_IRQ) { intr_deactivate_irq(child, r); } return (rman_deactivate_resource(r)); } #ifdef FDT static device_method_t nexus_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_fdt_attach), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), }; #define nexus_baseclasses nexus_fdt_baseclasses DEFINE_CLASS_1(nexus, nexus_fdt_driver, nexus_fdt_methods, 1, nexus_driver); #undef nexus_baseclasses static devclass_t nexus_fdt_devclass; EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, nexus_fdt_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { if (OF_peer(0) == 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_fdt_attach(device_t dev) { nexus_add_child(dev, 10, "ofwbus", 0); return (nexus_attach(dev)); } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif #ifdef DEV_ACPI static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), }; #define nexus_baseclasses nexus_acpi_baseclasses DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); #undef nexus_baseclasses static devclass_t nexus_acpi_devclass; EARLY_DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, nexus_acpi_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_acpi_probe(device_t dev) { if (acpi_identify() != 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_LOW_PRIORITY); } static int nexus_acpi_attach(device_t dev) { nexus_add_child(dev, 10, "acpi", 0); return (nexus_attach(dev)); } #endif Index: stable/11/sys/kern/subr_intr.c =================================================================== --- stable/11/sys/kern/subr_intr.c (revision 308381) +++ stable/11/sys/kern/subr_intr.c (revision 308382) @@ -1,1622 +1,1654 @@ /*- * Copyright (c) 2015-2016 Svatopluk Kraus * Copyright (c) 2015-2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * New-style Interrupt Framework * * TODO: - add support for disconnected PICs. * - to support IPI (PPI) enabling on other CPUs if already started. * - to complete things for removable PICs. */ #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include #ifdef DDB #include #endif #include "pic_if.h" #include "msi_if.h" #define INTRNAME_LEN (2*MAXCOMLEN + 1) #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif MALLOC_DECLARE(M_INTRNG); MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); /* Main interrupt handler called from assembler -> 'hidden' for C code. */ void intr_irq_handler(struct trapframe *tf); /* Root interrupt controller stuff. */ device_t intr_irq_root_dev; static intr_irq_filter_t *irq_root_filter; static void *irq_root_arg; static u_int irq_root_ipicount; struct intr_pic_child { SLIST_ENTRY(intr_pic_child) pc_next; struct intr_pic *pc_pic; intr_child_irq_filter_t *pc_filter; void *pc_filter_arg; uintptr_t pc_start; uintptr_t pc_length; }; /* Interrupt controller definition. */ struct intr_pic { SLIST_ENTRY(intr_pic) pic_next; intptr_t pic_xref; /* hardware identification */ device_t pic_dev; #define FLAG_PIC (1 << 0) #define FLAG_MSI (1 << 1) u_int pic_flags; struct mtx pic_child_lock; SLIST_HEAD(, intr_pic_child) pic_children; }; static struct mtx pic_list_lock; static SLIST_HEAD(, intr_pic) pic_list; static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); /* Interrupt source definition. */ static struct mtx isrc_table_lock; static struct intr_irqsrc *irq_sources[NIRQ]; u_int irq_next_free; #ifdef SMP static boolean_t irq_assign_cpu = FALSE; #endif /* * - 2 counters for each I/O interrupt. * - MAXCPU counters for each IPI counters for SMP. */ #ifdef SMP #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) #else #define INTRCNT_COUNT (NIRQ * 2) #endif /* Data for MI statistics reporting. */ u_long intrcnt[INTRCNT_COUNT]; char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; size_t sintrcnt = sizeof(intrcnt); size_t sintrnames = sizeof(intrnames); static u_int intrcnt_index; static struct intr_irqsrc *intr_map_get_isrc(u_int res_id); static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc); +static struct intr_map_data * intr_map_get_map_data(u_int res_id); static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref, struct intr_map_data **data); /* * Interrupt framework initialization routine. */ static void intr_irq_init(void *dummy __unused) { SLIST_INIT(&pic_list); mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); } SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", INTRNAME_LEN - 1, name); } /* * Update name for interrupt source with interrupt event. */ static void intrcnt_updatename(struct intr_irqsrc *isrc) { /* QQQ: What about stray counter name? */ mtx_assert(&isrc_table_lock, MA_OWNED); intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); } /* * Virtualization for interrupt source interrupt counter increment. */ static inline void isrc_increment_count(struct intr_irqsrc *isrc) { if (isrc->isrc_flags & INTR_ISRCF_PPI) atomic_add_long(&isrc->isrc_count[0], 1); else isrc->isrc_count[0]++; } /* * Virtualization for interrupt source interrupt stray counter increment. */ static inline void isrc_increment_straycount(struct intr_irqsrc *isrc) { isrc->isrc_count[1]++; } /* * Virtualization for interrupt source interrupt name update. */ static void isrc_update_name(struct intr_irqsrc *isrc, const char *name) { char str[INTRNAME_LEN]; mtx_assert(&isrc_table_lock, MA_OWNED); if (name != NULL) { snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index + 1); } else { snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index + 1); } } /* * Virtualization for interrupt source interrupt counters setup. */ static void isrc_setup_counters(struct intr_irqsrc *isrc) { u_int index; /* * XXX - it does not work well with removable controllers and * interrupt sources !!! */ index = atomic_fetchadd_int(&intrcnt_index, 2); isrc->isrc_index = index; isrc->isrc_count = &intrcnt[index]; isrc_update_name(isrc, NULL); } /* * Virtualization for interrupt source interrupt counters release. */ static void isrc_release_counters(struct intr_irqsrc *isrc) { panic("%s: not implemented", __func__); } #ifdef SMP /* * Virtualization for interrupt source IPI counters setup. */ u_long * intr_ipi_setup_counters(const char *name) { u_int index, i; char str[INTRNAME_LEN]; index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); for (i = 0; i < MAXCPU; i++) { snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); intrcnt_setname(str, index + i); } return (&intrcnt[index]); } #endif /* * Main interrupt dispatch handler. It's called straight * from the assembler, where CPU interrupt is served. */ void intr_irq_handler(struct trapframe *tf) { struct trapframe * oldframe; struct thread * td; KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); PCPU_INC(cnt.v_intr); critical_enter(); td = curthread; oldframe = td->td_intr_frame; td->td_intr_frame = tf; irq_root_filter(irq_root_arg); td->td_intr_frame = oldframe; critical_exit(); #ifdef HWPMC_HOOKS if (pmc_hook && TRAPF_USERMODE(tf) && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); #endif } int intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) { struct intr_pic_child *child; bool found; found = false; mtx_lock_spin(&parent->pic_child_lock); SLIST_FOREACH(child, &parent->pic_children, pc_next) { if (child->pc_start <= irq && irq < (child->pc_start + child->pc_length)) { found = true; break; } } mtx_unlock_spin(&parent->pic_child_lock); if (found) return (child->pc_filter(child->pc_filter_arg, irq)); return (FILTER_STRAY); } /* * interrupt controller dispatch function for interrupts. It should * be called straight from the interrupt controller, when associated interrupt * source is learned. */ int intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) { KASSERT(isrc != NULL, ("%s: no source", __func__)); isrc_increment_count(isrc); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { int error; error = isrc->isrc_filter(isrc->isrc_arg, tf); PIC_POST_FILTER(isrc->isrc_dev, isrc); if (error == FILTER_HANDLED) return (0); } else #endif if (isrc->isrc_event != NULL) { if (intr_event_handle(isrc->isrc_event, tf) == 0) return (0); } isrc_increment_straycount(isrc); return (EINVAL); } /* * Alloc unique interrupt number (resource handle) for interrupt source. * * There could be various strategies how to allocate free interrupt number * (resource handle) for new interrupt source. * * 1. Handles are always allocated forward, so handles are not recycled * immediately. However, if only one free handle left which is reused * constantly... */ static inline int isrc_alloc_irq(struct intr_irqsrc *isrc) { u_int maxirqs, irq; mtx_assert(&isrc_table_lock, MA_OWNED); maxirqs = nitems(irq_sources); if (irq_next_free >= maxirqs) return (ENOSPC); for (irq = irq_next_free; irq < maxirqs; irq++) { if (irq_sources[irq] == NULL) goto found; } for (irq = 0; irq < irq_next_free; irq++) { if (irq_sources[irq] == NULL) goto found; } irq_next_free = maxirqs; return (ENOSPC); found: isrc->isrc_irq = irq; irq_sources[irq] = isrc; irq_next_free = irq + 1; if (irq_next_free >= maxirqs) irq_next_free = 0; return (0); } /* * Free unique interrupt number (resource handle) from interrupt source. */ static inline int isrc_free_irq(struct intr_irqsrc *isrc) { mtx_assert(&isrc_table_lock, MA_OWNED); if (isrc->isrc_irq >= nitems(irq_sources)) return (EINVAL); if (irq_sources[isrc->isrc_irq] != isrc) return (EINVAL); irq_sources[isrc->isrc_irq] = NULL; isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ return (0); } /* * Initialize interrupt source and register it into global interrupt table. */ int intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, const char *fmt, ...) { int error; va_list ap; bzero(isrc, sizeof(struct intr_irqsrc)); isrc->isrc_dev = dev; isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ isrc->isrc_flags = flags; va_start(ap, fmt); vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); va_end(ap); mtx_lock(&isrc_table_lock); error = isrc_alloc_irq(isrc); if (error != 0) { mtx_unlock(&isrc_table_lock); return (error); } /* * Setup interrupt counters, but not for IPI sources. Those are setup * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust * our counter pool. */ if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_setup_counters(isrc); mtx_unlock(&isrc_table_lock); return (0); } /* * Deregister interrupt source from global interrupt table. */ int intr_isrc_deregister(struct intr_irqsrc *isrc) { int error; mtx_lock(&isrc_table_lock); if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_release_counters(isrc); error = isrc_free_irq(isrc); mtx_unlock(&isrc_table_lock); return (error); } #ifdef SMP /* * A support function for a PIC to decide if provided ISRC should be inited * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of * struct intr_irqsrc is the following: * * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus * set in isrc_cpu. If not, the ISRC should be inited on every cpu and * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. */ bool intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) { if (isrc->isrc_handlers == 0) return (false); if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) return (false); if (isrc->isrc_flags & INTR_ISRCF_BOUND) return (CPU_ISSET(cpu, &isrc->isrc_cpu)); CPU_SET(cpu, &isrc->isrc_cpu); return (true); } #endif #ifdef INTR_SOLO /* * Setup filter into interrupt source. */ static int iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, intr_irq_filter_t *filter, void *arg, void **cookiep) { if (filter == NULL) return (EINVAL); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. */ if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { mtx_unlock(&isrc_table_lock); return (EBUSY); } isrc->isrc_filter = filter; isrc->isrc_arg = arg; isrc_update_name(isrc, name); mtx_unlock(&isrc_table_lock); *cookiep = isrc; return (0); } #endif /* * Interrupt source pre_ithread method for MI interrupt framework. */ static void intr_isrc_pre_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_ithread method for MI interrupt framework. */ static void intr_isrc_post_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_filter method for MI interrupt framework. */ static void intr_isrc_post_filter(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_FILTER(isrc->isrc_dev, isrc); } /* * Interrupt source assign_cpu method for MI interrupt framework. */ static int intr_isrc_assign_cpu(void *arg, int cpu) { #ifdef SMP struct intr_irqsrc *isrc = arg; int error; if (isrc->isrc_dev != intr_irq_root_dev) return (EINVAL); mtx_lock(&isrc_table_lock); if (cpu == NOCPU) { CPU_ZERO(&isrc->isrc_cpu); isrc->isrc_flags &= ~INTR_ISRCF_BOUND; } else { CPU_SETOF(cpu, &isrc->isrc_cpu); isrc->isrc_flags |= INTR_ISRCF_BOUND; } /* * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or * re-balance it to another CPU or enable it on more CPUs. However, * PIC is expected to change isrc_cpu appropriately to keep us well * informed if the call is successful. */ if (irq_assign_cpu) { error = PIC_BIND_INTR(isrc->isrc_dev, isrc); if (error) { CPU_ZERO(&isrc->isrc_cpu); mtx_unlock(&isrc_table_lock); return (error); } } mtx_unlock(&isrc_table_lock); return (0); #else return (EOPNOTSUPP); #endif } /* * Create interrupt event for interrupt source. */ static int isrc_event_create(struct intr_irqsrc *isrc) { struct intr_event *ie; int error; error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, intr_isrc_assign_cpu, "%s:", isrc->isrc_name); if (error) return (error); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. Let contested event wins. */ #ifdef INTR_SOLO if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { #else if (isrc->isrc_event != NULL) { #endif mtx_unlock(&isrc_table_lock); intr_event_destroy(ie); return (isrc->isrc_event != NULL ? EBUSY : 0); } isrc->isrc_event = ie; mtx_unlock(&isrc_table_lock); return (0); } #ifdef notyet /* * Destroy interrupt event for interrupt source. */ static void isrc_event_destroy(struct intr_irqsrc *isrc) { struct intr_event *ie; mtx_lock(&isrc_table_lock); ie = isrc->isrc_event; isrc->isrc_event = NULL; mtx_unlock(&isrc_table_lock); if (ie != NULL) intr_event_destroy(ie); } #endif /* * Add handler to interrupt source. */ static int isrc_add_handler(struct intr_irqsrc *isrc, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { int error; if (isrc->isrc_event == NULL) { error = isrc_event_create(isrc); if (error) return (error); } error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } /* * Lookup interrupt controller locked. */ static inline struct intr_pic * pic_lookup_locked(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_assert(&pic_list_lock, MA_OWNED); if (dev == NULL && xref == 0) return (NULL); /* Note that pic->pic_dev is never NULL on registered PIC. */ SLIST_FOREACH(pic, &pic_list, pic_next) { if (dev == NULL) { if (xref == pic->pic_xref) return (pic); } else if (xref == 0 || pic->pic_xref == 0) { if (dev == pic->pic_dev) return (pic); } else if (xref == pic->pic_xref && dev == pic->pic_dev) return (pic); } return (NULL); } /* * Lookup interrupt controller. */ static struct intr_pic * pic_lookup(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); mtx_unlock(&pic_list_lock); return (pic); } /* * Create interrupt controller. */ static struct intr_pic * pic_create(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); if (pic != NULL) { mtx_unlock(&pic_list_lock); return (pic); } pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); if (pic == NULL) { mtx_unlock(&pic_list_lock); return (NULL); } pic->pic_xref = xref; pic->pic_dev = dev; mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); SLIST_INSERT_HEAD(&pic_list, pic, pic_next); mtx_unlock(&pic_list_lock); return (pic); } #ifdef notyet /* * Destroy interrupt controller. */ static void pic_destroy(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); if (pic == NULL) { mtx_unlock(&pic_list_lock); return; } SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); mtx_unlock(&pic_list_lock); free(pic, M_INTRNG); } #endif /* * Register interrupt controller. */ struct intr_pic * intr_pic_register(device_t dev, intptr_t xref) { struct intr_pic *pic; if (dev == NULL) return (NULL); pic = pic_create(dev, xref); if (pic == NULL) return (NULL); pic->pic_flags |= FLAG_PIC; debugf("PIC %p registered for %s \n", pic, device_get_nameunit(dev), dev, xref); return (pic); } /* * Unregister interrupt controller. */ int intr_pic_deregister(device_t dev, intptr_t xref) { panic("%s: not implemented", __func__); } /* * Mark interrupt controller (itself) as a root one. * * Note that only an interrupt controller can really know its position * in interrupt controller's tree. So root PIC must claim itself as a root. * * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, * page 30: * "The root of the interrupt tree is determined when traversal * of the interrupt tree reaches an interrupt controller node without * an interrupts property and thus no explicit interrupt parent." */ int intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, void *arg, u_int ipicount) { struct intr_pic *pic; pic = pic_lookup(dev, xref); if (pic == NULL) { device_printf(dev, "not registered\n"); return (EINVAL); } KASSERT((pic->pic_flags & FLAG_PIC) != 0, ("%s: Found a non-PIC controller: %s", __func__, device_get_name(pic->pic_dev))); if (filter == NULL) { device_printf(dev, "filter missing\n"); return (EINVAL); } /* * Only one interrupt controllers could be on the root for now. * Note that we further suppose that there is not threaded interrupt * routine (handler) on the root. See intr_irq_handler(). */ if (intr_irq_root_dev != NULL) { device_printf(dev, "another root already set\n"); return (EBUSY); } intr_irq_root_dev = dev; irq_root_filter = filter; irq_root_arg = arg; irq_root_ipicount = ipicount; debugf("irq root set to %s\n", device_get_nameunit(dev)); return (0); } /* * Add a handler to manage a sub range of a parents interrupts. */ struct intr_pic * intr_pic_add_handler(device_t parent, struct intr_pic *pic, intr_child_irq_filter_t *filter, void *arg, uintptr_t start, uintptr_t length) { struct intr_pic *parent_pic; struct intr_pic_child *newchild; #ifdef INVARIANTS struct intr_pic_child *child; #endif parent_pic = pic_lookup(parent, 0); if (parent_pic == NULL) return (NULL); newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); newchild->pc_pic = pic; newchild->pc_filter = filter; newchild->pc_filter_arg = arg; newchild->pc_start = start; newchild->pc_length = length; mtx_lock_spin(&parent_pic->pic_child_lock); #ifdef INVARIANTS SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", __func__)); } #endif SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); mtx_unlock_spin(&parent_pic->pic_child_lock); return (pic); } static int intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data, struct intr_irqsrc **isrc) { struct intr_pic *pic; struct intr_map_data_msi *msi; if (data == NULL) return (EINVAL); pic = pic_lookup(dev, xref); if (pic == NULL) return (ESRCH); switch (data->type) { case INTR_MAP_DATA_MSI: KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); msi = (struct intr_map_data_msi *)data; *isrc = msi->isrc; return (0); default: KASSERT((pic->pic_flags & FLAG_PIC) != 0, ("%s: Found a non-PIC controller: %s", __func__, device_get_name(pic->pic_dev))); return (PIC_MAP_INTR(pic->pic_dev, data, isrc)); } } int intr_activate_irq(device_t dev, struct resource *res) { device_t map_dev; intptr_t map_xref; struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; int error; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); if (intr_map_get_isrc(res_id) != NULL) panic("Attempt to double activation of resource id: %u\n", res_id); intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data); error = intr_resolve_irq(map_dev, map_xref, data, &isrc); if (error != 0) { free(data, M_INTRNG); /* XXX TODO DISCONECTED PICs */ /* if (error == EINVAL) return(0); */ return (error); } intr_map_set_isrc(res_id, isrc); rman_set_virtual(res, data); return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data)); } int intr_deactivate_irq(device_t dev, struct resource *res) { struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; int error; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL) panic("Attempt to deactivate non-active resource id: %u\n", res_id); data = rman_get_virtual(res); error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data); intr_map_set_isrc(res_id, NULL); rman_set_virtual(res, NULL); free(data, M_INTRNG); return (error); } int intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, driver_intr_t hand, void *arg, int flags, void **cookiep) { int error; struct intr_map_data *data; struct intr_irqsrc *isrc; const char *name; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL) { /* XXX TODO DISCONECTED PICs */ return (EINVAL); } data = rman_get_virtual(res); name = device_get_nameunit(dev); #ifdef INTR_SOLO /* * Standard handling is done through MI interrupt framework. However, * some interrupts could request solely own special handling. This * non standard handling can be used for interrupt controllers without * handler (filter only), so in case that interrupt controllers are * chained, MI interrupt framework is called only in leaf controller. * * Note that root interrupt controller routine is served as well, * however in intr_irq_handler(), i.e. main system dispatch routine. */ if (flags & INTR_SOLO && hand != NULL) { debugf("irq %u cannot solo on %s\n", irq, name); return (EINVAL); } if (flags & INTR_SOLO) { error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, arg, cookiep); debugf("irq %u setup filter error %d on %s\n", irq, error, name); } else #endif { error = isrc_add_handler(isrc, name, filt, hand, arg, flags, cookiep); debugf("irq %u add handler error %d on %s\n", irq, error, name); } if (error != 0) return (error); mtx_lock(&isrc_table_lock); error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); if (error == 0) { isrc->isrc_handlers++; if (isrc->isrc_handlers == 1) PIC_ENABLE_INTR(isrc->isrc_dev, isrc); } mtx_unlock(&isrc_table_lock); if (error != 0) intr_event_remove_handler(*cookiep); return (error); } int intr_teardown_irq(device_t dev, struct resource *res, void *cookie) { int error; struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); data = rman_get_virtual(res); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc->isrc_filter = NULL; isrc->isrc_arg = NULL; isrc->isrc_handlers = 0; PIC_DISABLE_INTR(isrc->isrc_dev, isrc); PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); isrc_update_name(isrc, NULL); mtx_unlock(&isrc_table_lock); return (0); } #endif if (isrc != intr_handler_source(cookie)) return (EINVAL); error = intr_event_remove_handler(cookie); if (error == 0) { mtx_lock(&isrc_table_lock); isrc->isrc_handlers--; if (isrc->isrc_handlers == 0) PIC_DISABLE_INTR(isrc->isrc_dev, isrc); PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } int intr_describe_irq(device_t dev, struct resource *res, void *cookie, const char *descr) { int error; struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc_update_name(isrc, descr); mtx_unlock(&isrc_table_lock); return (0); } #endif error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } #ifdef SMP int intr_bind_irq(device_t dev, struct resource *res, int cpu) { struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) return (intr_isrc_assign_cpu(isrc, cpu)); #endif return (intr_event_bind(isrc->isrc_event, cpu)); } /* * Return the CPU that the next interrupt source should use. * For now just returns the next CPU according to round-robin. */ u_int intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) { if (!irq_assign_cpu || mp_ncpus == 1) return (PCPU_GET(cpuid)); do { last_cpu++; if (last_cpu > mp_maxid) last_cpu = 0; } while (!CPU_ISSET(last_cpu, cpumask)); return (last_cpu); } /* * Distribute all the interrupt sources among the available * CPUs once the AP's have been launched. */ static void intr_irq_shuffle(void *arg __unused) { struct intr_irqsrc *isrc; u_int i; if (mp_ncpus == 1) return; mtx_lock(&isrc_table_lock); irq_assign_cpu = TRUE; for (i = 0; i < NIRQ; i++) { isrc = irq_sources[i]; if (isrc == NULL || isrc->isrc_handlers == 0 || isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) continue; if (isrc->isrc_event != NULL && isrc->isrc_flags & INTR_ISRCF_BOUND && isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) panic("%s: CPU inconsistency", __func__); if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) CPU_ZERO(&isrc->isrc_cpu); /* start again */ /* * We are in wicked position here if the following call fails * for bound ISRC. The best thing we can do is to clear * isrc_cpu so inconsistency with ie_cpu will be detectable. */ if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) CPU_ZERO(&isrc->isrc_cpu); } mtx_unlock(&isrc_table_lock); } SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); #else u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) { return (PCPU_GET(cpuid)); } #endif /* * Allocate memory for new intr_map_data structure. * Initialize common fields. */ struct intr_map_data * intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags) { struct intr_map_data *data; data = malloc(len, M_INTRNG, flags); data->type = type; data->len = len; return (data); } void intr_free_intr_map_data(struct intr_map_data *data) { free(data, M_INTRNG); } /* * Register a MSI/MSI-X interrupt controller */ int intr_msi_register(device_t dev, intptr_t xref) { struct intr_pic *pic; if (dev == NULL) return (EINVAL); pic = pic_create(dev, xref); if (pic == NULL) return (ENOMEM); pic->pic_flags |= FLAG_MSI; debugf("PIC %p registered for %s \n", pic, device_get_nameunit(dev), dev, (uintmax_t)xref); return (0); } int intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, int maxcount, int *irqs) { struct intr_irqsrc **isrc; struct intr_pic *pic; device_t pdev; struct intr_map_data_msi *msi; int err, i; pic = pic_lookup(NULL, xref); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); if (err != 0) { free(isrc, M_INTRNG); return (err); } for (i = 0; i < count; i++) { msi = (struct intr_map_data_msi *)intr_alloc_map_data( INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); msi-> isrc = isrc[i]; irqs[i] = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); } free(isrc, M_INTRNG); return (err); } int intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, int *irqs) { struct intr_irqsrc **isrc; struct intr_pic *pic; + struct intr_map_data_msi *msi; int i, err; pic = pic_lookup(NULL, xref); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); - for (i = 0; i < count; i++) - isrc[i] = intr_map_get_isrc(irqs[i]); + for (i = 0; i < count; i++) { + msi = (struct intr_map_data_msi *) + intr_map_get_map_data(irqs[i]); + KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, + ("%s: irq %d map data is not MSI", __func__, + irqs[i])); + isrc[i] = msi->isrc; + } err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); for (i = 0; i < count; i++) { if (isrc[i] != NULL) intr_unmap_irq(irqs[i]); } free(isrc, M_INTRNG); return (err); } int intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) { struct intr_irqsrc *isrc; struct intr_pic *pic; device_t pdev; struct intr_map_data_msi *msi; int err; pic = pic_lookup(NULL, xref); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); if (err != 0) return (err); msi = (struct intr_map_data_msi *)intr_alloc_map_data( INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); msi->isrc = isrc; *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); return (0); } int intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) { struct intr_irqsrc *isrc; struct intr_pic *pic; + struct intr_map_data_msi *msi; int err; pic = pic_lookup(NULL, xref); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); - isrc = intr_map_get_isrc(irq); + msi = (struct intr_map_data_msi *) + intr_map_get_map_data(irq); + KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, + ("%s: irq %d map data is not MSI", __func__, + irq)); + isrc = msi->isrc; if (isrc == NULL) { intr_unmap_irq(irq); return (EINVAL); } err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); intr_unmap_irq(irq); return (err); } int intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, uint64_t *addr, uint32_t *data) { struct intr_irqsrc *isrc; struct intr_pic *pic; int err; pic = pic_lookup(NULL, xref); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_MSI) != 0, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); isrc = intr_map_get_isrc(irq); if (isrc == NULL) return (EINVAL); err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); return (err); } void dosoftints(void); void dosoftints(void) { } #ifdef SMP /* * Init interrupt controller on another CPU. */ void intr_pic_init_secondary(void) { /* * QQQ: Only root PIC is aware of other CPUs ??? */ KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); //mtx_lock(&isrc_table_lock); PIC_INIT_SECONDARY(intr_irq_root_dev); //mtx_unlock(&isrc_table_lock); } #endif #ifdef DDB DB_SHOW_COMMAND(irqs, db_show_irqs) { u_int i, irqsum; u_long num; struct intr_irqsrc *isrc; for (irqsum = 0, i = 0; i < NIRQ; i++) { isrc = irq_sources[i]; if (isrc == NULL) continue; num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, isrc->isrc_name, isrc->isrc_cpu.__bits[0], isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); irqsum += num; } db_printf("irq total %u\n", irqsum); } #endif /* * Interrupt mapping table functions. * * Please, keep this part separately, it can be transformed to * extension of standard resources. */ struct intr_map_entry { device_t dev; intptr_t xref; struct intr_map_data *map_data; struct intr_irqsrc *isrc; /* XXX TODO DISCONECTED PICs */ /*int flags */ }; /* XXX Convert irq_map[] to dynamicaly expandable one. */ static struct intr_map_entry *irq_map[2 * NIRQ]; static int irq_map_count = nitems(irq_map); static int irq_map_first_free_idx; static struct mtx irq_map_lock; static struct intr_irqsrc * intr_map_get_isrc(u_int res_id) { struct intr_irqsrc *isrc; mtx_lock(&irq_map_lock); if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { mtx_unlock(&irq_map_lock); return (NULL); } isrc = irq_map[res_id]->isrc; mtx_unlock(&irq_map_lock); return (isrc); } static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc) { mtx_lock(&irq_map_lock); if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { mtx_unlock(&irq_map_lock); return; } irq_map[res_id]->isrc = isrc; mtx_unlock(&irq_map_lock); +} + +/* + * Get a copy of intr_map_entry data + */ +static struct intr_map_data * +intr_map_get_map_data(u_int res_id) +{ + struct intr_map_data *data; + + data = NULL; + mtx_lock(&irq_map_lock); + if (res_id >= irq_map_count || irq_map[res_id] == NULL) + panic("Attempt to copy invalid resource id: %u\n", res_id); + data = irq_map[res_id]->map_data; + mtx_unlock(&irq_map_lock); + + return (data); } /* * Get a copy of intr_map_entry data */ static void intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref, struct intr_map_data **data) { size_t len; len = 0; mtx_lock(&irq_map_lock); if (res_id >= irq_map_count || irq_map[res_id] == NULL) panic("Attempt to copy invalid resource id: %u\n", res_id); if (irq_map[res_id]->map_data != NULL) len = irq_map[res_id]->map_data->len; mtx_unlock(&irq_map_lock); if (len == 0) *data = NULL; else *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO); mtx_lock(&irq_map_lock); if (irq_map[res_id] == NULL) panic("Attempt to copy invalid resource id: %u\n", res_id); if (len != 0) { if (len != irq_map[res_id]->map_data->len) panic("Resource id: %u has changed.\n", res_id); memcpy(*data, irq_map[res_id]->map_data, len); } *map_dev = irq_map[res_id]->dev; *map_xref = irq_map[res_id]->xref; mtx_unlock(&irq_map_lock); } /* * Allocate and fill new entry in irq_map table. */ u_int intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data) { u_int i; struct intr_map_entry *entry; /* Prepare new entry first. */ entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO); entry->dev = dev; entry->xref = xref; entry->map_data = data; entry->isrc = NULL; mtx_lock(&irq_map_lock); for (i = irq_map_first_free_idx; i < irq_map_count; i++) { if (irq_map[i] == NULL) { irq_map[i] = entry; irq_map_first_free_idx = i + 1; mtx_unlock(&irq_map_lock); return (i); } } mtx_unlock(&irq_map_lock); /* XXX Expand irq_map table */ panic("IRQ mapping table is full."); } /* * Remove and free mapping entry. */ void intr_unmap_irq(u_int res_id) { struct intr_map_entry *entry; mtx_lock(&irq_map_lock); if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) panic("Attempt to unmap invalid resource id: %u\n", res_id); entry = irq_map[res_id]; irq_map[res_id] = NULL; irq_map_first_free_idx = res_id; mtx_unlock(&irq_map_lock); intr_free_intr_map_data(entry->map_data); free(entry, M_INTRNG); } /* * Clone mapping entry. */ u_int intr_map_clone_irq(u_int old_res_id) { device_t map_dev; intptr_t map_xref; struct intr_map_data *data; intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data); return (intr_map_irq(map_dev, map_xref, data)); } static void intr_map_init(void *dummy __unused) { mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF); } SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL); Index: stable/11/sys/mips/mips/nexus.c =================================================================== --- stable/11/sys/mips/mips/nexus.c (revision 308381) +++ stable/11/sys/mips/mips/nexus.c (revision 308382) @@ -1,618 +1,622 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for MIPS Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and memory address space. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INTRNG #include #else #include #endif #ifdef FDT #include #include #include "ofw_bus_if.h" #endif #undef NEXUS_DEBUG #ifdef NEXUS_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* NEXUS_DEBUG */ #define NUM_MIPS_IRQS 6 static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman irq_rman; static struct rman mem_rman; static struct resource * nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static device_t nexus_add_child(device_t, u_int, const char *, int); static int nexus_attach(device_t); static void nexus_delete_resource(device_t, device_t, int, int); static struct resource_list * nexus_get_reslist(device_t, device_t); static int nexus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); static int nexus_print_child(device_t, device_t); static int nexus_print_all_resources(device_t dev); static int nexus_probe(device_t); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_set_resource(device_t, device_t, int, int, rman_res_t, rman_res_t); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static void nexus_hinted_child(device_t, const char *, int); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); #ifdef INTRNG #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_delete_resource, nexus_delete_resource), DEVMETHOD(bus_get_resource, nexus_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_activate_resource,nexus_activate_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_hinted_child, nexus_hinted_child), #ifdef INTRNG DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif #ifdef FDT DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif #endif { 0, 0 } }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static devclass_t nexus_devclass; static int nexus_probe(device_t dev) { device_set_desc(dev, "MIPS32 root nexus"); irq_rman.rm_start = 0; irq_rman.rm_end = NUM_MIPS_IRQS - 1; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Hardware IRQs"; if (rman_init(&irq_rman) != 0 || rman_manage_region(&irq_rman, 0, NUM_MIPS_IRQS - 1) != 0) { panic("%s: irq_rman", __func__); } mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "Memory addresses"; if (rman_init(&mem_rman) != 0 || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR) != 0) { panic("%s: mem_rman", __func__); } return (0); } static int nexus_attach(device_t dev) { bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { device_printf(bus, "failed to add child: %s%d\n", name, unit); return (0); } /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int isdefault, needactivate, passthrough; dprintf("%s: entry (%p, %p, %d, %p, %p, %p, %jd, %d)\n", __func__, bus, child, type, rid, (void *)(intptr_t)start, (void *)(intptr_t)end, count, flags); dprintf("%s: requested rid is %d\n", __func__, *rid); isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; /* * If this is an allocation of the "default" range for a given RID, * and we know what the resources for this device are (ie. they aren't * maintained by a child bus), then work out the start/end values. */ if (isdefault) { rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) { panic("%s: resource entry is busy", __func__); } start = rle->start; end = rle->end; count = rle->count; } switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_MEMORY: rm = &mem_rman; break; default: printf("%s: unknown resource type %d\n", __func__, type); return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) { printf("%s: could not reserve resource for %s\n", __func__, device_get_nameunit(child)); return (0); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { printf("%s: could not activate resource\n", __func__); rman_release_resource(rv); return (0); } } return (rv); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; dprintf("%s: entry (%p, %p, %d, %d, %p, %jd)\n", __func__, dev, child, type, rid, (void *)(intptr_t)start, count); rle = resource_list_add(rl, type, rid, start, start + count - 1, count); if (rle == NULL) return (ENXIO); return (0); } static int nexus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } static void nexus_delete_resource(device_t dev, device_t child, int type, int rid) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; dprintf("%s: entry\n", __func__); resource_list_delete(rl, type, rid); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { dprintf("%s: entry\n", __func__); if (rman_get_flags(r) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, r); if (error) return error; } return (rman_release_resource(r)); } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { void *vaddr; vm_paddr_t paddr; vm_size_t psize; int err; /* * If this is a memory resource, use pmap_mapdev to map it. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = rman_get_start(r); psize = rman_get_size(r); rman_set_bustag(r, mips_bus_space_generic); err = bus_space_map(rman_get_bustag(r), paddr, psize, 0, (bus_space_handle_t *)&vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_virtual(r, vaddr); rman_set_bushandle(r, (bus_space_handle_t)(uintptr_t)vaddr); } else if (type == SYS_RES_IRQ) { #ifdef INTRNG #ifdef FDT - intr_activate_irq(child, r); + err = intr_activate_irq(child, r); + if (err != 0) { + rman_deactivate_resource(r); + return (err); + } #else /* * INTRNG without FDT needs to have the interrupt properly * mapped first. cpu_create_intr_map() will do that and * call intr_activate_irq() at the end. */ cpu_create_intr_map(rman_get_start(r)); #endif #endif } return (rman_activate_resource(r)); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t vaddr; bus_size_t psize; vaddr = rman_get_bushandle(r); if (type == SYS_RES_MEMORY && vaddr != 0) { psize = (bus_size_t)rman_get_size(r); bus_space_unmap(rman_get_bustag(r), vaddr, psize); rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } else if (type == SYS_RES_IRQ) { #ifdef INTRNG intr_deactivate_irq(child, r); #endif } return (rman_deactivate_resource(r)); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { #ifdef INTRNG struct resource *r = res; #ifndef FDT r = cpu_get_irq_resource(rman_get_start(r)); #endif return (intr_setup_irq(child, r, filt, intr, arg, flags, cookiep)); #else int irq; register_t s; s = intr_disable(); irq = rman_get_start(res); if (irq >= NUM_MIPS_IRQS) { intr_restore(s); return (0); } cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg, irq, flags, cookiep); intr_restore(s); return (0); #endif } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { #ifdef INTRNG return (intr_teardown_irq(child, r, ih)); #else printf("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__); return (0); #endif } #ifdef INTRNG static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); return (EOPNOTSUPP); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif #endif /* INTRNG */ static void nexus_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; long maddr; int msize; int order; int result; int irq; int mem_hints_count; if ((resource_int_value(dname, dunit, "order", &order)) != 0) order = 1000; child = BUS_ADD_CHILD(bus, order, dname, dunit); if (child == NULL) return; /* * Set hard-wired resources for hinted child using * specific RIDs. */ mem_hints_count = 0; if (resource_long_value(dname, dunit, "maddr", &maddr) == 0) mem_hints_count++; if (resource_int_value(dname, dunit, "msize", &msize) == 0) mem_hints_count++; /* check if all info for mem resource has been provided */ if ((mem_hints_count > 0) && (mem_hints_count < 2)) { printf("Either maddr or msize hint is missing for %s%d\n", dname, dunit); } else if (mem_hints_count) { dprintf("%s: discovered hinted child %s at maddr %p(%d)\n", __func__, device_get_nameunit(child), (void *)(intptr_t)maddr, msize); result = bus_set_resource(child, SYS_RES_MEMORY, 0, (u_long) maddr, msize); if (result != 0) { device_printf(bus, "warning: bus_set_resource() failed\n"); } } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { result = bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); Index: stable/11 =================================================================== --- stable/11 (revision 308381) +++ stable/11 (revision 308382) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r306899,307059,307151