Index: head/sys/arm/arm/gic.c =================================================================== --- head/sys/arm/arm/gic.c (revision 297229) +++ head/sys/arm/arm/gic.c (revision 297230) @@ -1,1165 +1,1185 @@ /*- * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ARM_INTRNG #include #endif #include #include #include #include #include #include #include #ifdef ARM_INTRNG #include "pic_if.h" #endif #define GIC_DEBUG_SPURIOUS /* We are using GICv2 register naming */ /* Distributor Registers */ #define GICD_CTLR 0x000 /* v1 ICDDCR */ #define GICD_TYPER 0x004 /* v1 ICDICTR */ #define GICD_IIDR 0x008 /* v1 ICDIIDR */ #define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */ #define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */ #define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */ #define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */ #define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */ #define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */ #define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */ #define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */ /* CPU Registers */ #define GICC_CTLR 0x0000 /* v1 ICCICR */ #define GICC_PMR 0x0004 /* v1 ICCPMR */ #define GICC_BPR 0x0008 /* v1 ICCBPR */ #define GICC_IAR 0x000C /* v1 ICCIAR */ #define GICC_EOIR 0x0010 /* v1 ICCEOIR */ #define GICC_RPR 0x0014 /* v1 ICCRPR */ #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */ #define GICC_ABPR 0x001C /* v1 ICCABPR */ #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/ #define GIC_FIRST_SGI 0 /* Irqs 0-15 are SGIs/IPIs. */ #define GIC_LAST_SGI 15 #define GIC_FIRST_PPI 16 /* Irqs 16-31 are private (per */ #define GIC_LAST_PPI 31 /* core) peripheral interrupts. */ #define GIC_FIRST_SPI 32 /* Irqs 32+ are shared peripherals. */ /* First bit is a polarity bit (0 - low, 1 - high) */ #define GICD_ICFGR_POL_LOW (0 << 0) #define GICD_ICFGR_POL_HIGH (1 << 0) #define GICD_ICFGR_POL_MASK 0x1 /* Second bit is a trigger bit (0 - level, 1 - edge) */ #define GICD_ICFGR_TRIG_LVL (0 << 1) #define GICD_ICFGR_TRIG_EDGE (1 << 1) #define GICD_ICFGR_TRIG_MASK 0x2 #ifndef GIC_DEFAULT_ICFGR_INIT #define GIC_DEFAULT_ICFGR_INIT 0x00000000 #endif #ifdef ARM_INTRNG static u_int gic_irq_cpu; static int arm_gic_intr(void *); static int arm_gic_bind(device_t dev, struct intr_irqsrc *isrc); + +#ifdef SMP +u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; +#define ISRC_IPI(isrc) sgi_to_ipi[isrc->isrc_data - GIC_FIRST_SGI] #endif +#endif struct arm_gic_softc { device_t gic_dev; #ifdef ARM_INTRNG void * gic_intrhand; struct intr_irqsrc ** gic_irqs; #endif struct resource * gic_res[3]; bus_space_tag_t gic_c_bst; bus_space_tag_t gic_d_bst; bus_space_handle_t gic_c_bsh; bus_space_handle_t gic_d_bsh; uint8_t ver; struct mtx mutex; uint32_t nirqs; #ifdef GIC_DEBUG_SPURIOUS uint32_t last_irq[MAXCPU]; #endif }; static struct resource_spec arm_gic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */ #ifdef ARM_INTRNG { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */ #endif { -1, 0 } }; static struct arm_gic_softc *gic_sc = NULL; #define gic_c_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg)) #define gic_c_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val)) #define gic_d_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg)) #define gic_d_write_1(_sc, _reg, _val) \ bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val)) #define gic_d_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val)) #ifndef ARM_INTRNG static int gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol); static void gic_post_filter(void *); #endif static struct ofw_compat_data compat_data[] = { {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */ {"arm,gic-400", true}, {"arm,cortex-a15-gic", true}, {"arm,cortex-a9-gic", true}, {"arm,cortex-a7-gic", true}, {"arm,arm11mp-gic", true}, {"brcm,brahma-b15-gic", true}, {NULL, false} }; static int arm_gic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ARM Generic Interrupt Controller"); return (BUS_PROBE_DEFAULT); } #ifdef ARM_INTRNG static inline void gic_irq_unmask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F))); } static inline void gic_irq_mask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F))); } #endif #ifdef SMP #ifdef ARM_INTRNG static void arm_gic_init_secondary(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); struct intr_irqsrc *isrc; u_int irq; for (irq = 0; irq < sc->nirqs; irq += 4) gic_d_write_4(sc, GICD_IPRIORITYR(irq >> 2), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (irq = 0; irq < sc->nirqs; irq += 32) { gic_d_write_4(sc, GICD_IGROUPR(irq >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { isrc = sc->gic_irqs[irq]; if (isrc != NULL && isrc->isrc_handlers != 0) { CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); gic_irq_unmask(sc, irq); } } /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { isrc = sc->gic_irqs[irq]; if (isrc == NULL || isrc->isrc_handlers == 0) continue; if (isrc->isrc_flags & INTR_ISRCF_BOUND) { if (CPU_ISSET(PCPU_GET(cpuid), &isrc->isrc_cpu)) gic_irq_unmask(sc, irq); } else { CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); gic_irq_unmask(sc, irq); } } } #else static void arm_gic_init_secondary(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->nirqs; i += 4) gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* * Activate the timer interrupts: virtual, secure, and non-secure. */ gic_d_write_4(sc, GICD_ISENABLER(27 >> 5), (1UL << (27 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F))); } #endif /* ARM_INTRNG */ #endif /* SMP */ #ifndef ARM_INTRNG int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt, int *trig, int *pol) { static u_int num_intr_cells; static phandle_t self; struct ofw_compat_data *ocd; if (self == 0) { for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) { if (fdt_is_compatible(iparent, ocd->ocd_str)) { self = iparent; break; } } } if (self != iparent) return (ENXIO); if (num_intr_cells == 0) { if (OF_searchencprop(OF_node_from_xref(iparent), "#interrupt-cells", &num_intr_cells, sizeof(num_intr_cells)) == -1) { num_intr_cells = 1; } } if (num_intr_cells == 1) { *interrupt = fdt32_to_cpu(intr[0]); *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; } else { if (fdt32_to_cpu(intr[0]) == 0) *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI; else *interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI; /* * In intr[2], bits[3:0] are trigger type and level flags. * 1 = low-to-high edge triggered * 2 = high-to-low edge triggered * 4 = active high level-sensitive * 8 = active low level-sensitive * The hardware only supports active-high-level or rising-edge * for SPIs */ if (*interrupt >= GIC_FIRST_SPI && fdt32_to_cpu(intr[2]) & 0x0a) { printf("unsupported trigger/polarity configuration " "0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f); } *pol = INTR_POLARITY_CONFORM; if (fdt32_to_cpu(intr[2]) & 0x03) *trig = INTR_TRIGGER_EDGE; else *trig = INTR_TRIGGER_LEVEL; } return (0); } #endif #ifdef ARM_INTRNG static inline intptr_t gic_xref(device_t dev) { #ifdef FDT return (OF_xref_from_node(ofw_bus_get_node(dev))); #else return (0); #endif } #endif static int arm_gic_attach(device_t dev) { struct arm_gic_softc *sc; int i; uint32_t icciidr; #ifdef ARM_INTRNG phandle_t pxref; intptr_t xref = gic_xref(dev); #endif if (gic_sc) return (ENXIO); sc = device_get_softc(dev); if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->gic_dev = dev; gic_sc = sc; /* Initialize mutex */ mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN); /* Distributor Interface */ sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]); sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]); /* CPU Interface */ sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]); sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]); /* Disable interrupt forwarding to the CPU interface */ gic_d_write_4(sc, GICD_CTLR, 0x00); /* Get the number of interrupts */ sc->nirqs = gic_d_read_4(sc, GICD_TYPER); sc->nirqs = 32 * ((sc->nirqs & 0x1f) + 1); #ifdef ARM_INTRNG sc->gic_irqs = malloc(sc->nirqs * sizeof (*sc->gic_irqs), M_DEVBUF, M_WAITOK | M_ZERO); #else /* Set up function pointers */ arm_post_filter = gic_post_filter; arm_config_irq = gic_config_irq; #endif icciidr = gic_c_read_4(sc, GICC_IIDR); device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n", icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf, (icciidr & 0xfff), sc->nirqs); /* Set all global interrupts to be level triggered, active low. */ for (i = 32; i < sc->nirqs; i += 16) { gic_d_write_4(sc, GICD_ICFGR(i >> 4), GIC_DEFAULT_ICFGR_INIT); } /* Disable all interrupts. */ for (i = 32; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_ICENABLER(i >> 5), 0xFFFFFFFF); } for (i = 0; i < sc->nirqs; i += 4) { gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); gic_d_write_4(sc, GICD_ITARGETSR(i >> 2), 1 << 0 | 1 << 8 | 1 << 16 | 1 << 24); } /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); #ifndef ARM_INTRNG return (0); #else /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) != 0) { device_printf(dev, "could not register PIC\n"); goto cleanup; } /* * Controller is root if: * - doesn't have interrupt parent * - his interrupt parent is this controller */ pxref = ofw_bus_find_iparent(ofw_bus_get_node(dev)); if (pxref == 0 || xref == pxref) { if (intr_pic_claim_root(dev, xref, arm_gic_intr, sc, GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) { device_printf(dev, "could not set PIC as a root\n"); intr_pic_unregister(dev, xref); goto cleanup; } } else { if (sc->gic_res[2] == NULL) { device_printf(dev, "not root PIC must have defined interrupt\n"); intr_pic_unregister(dev, xref); goto cleanup; } if (bus_setup_intr(dev, sc->gic_res[2], INTR_TYPE_CLK, arm_gic_intr, NULL, sc, &sc->gic_intrhand)) { device_printf(dev, "could not setup irq handler\n"); intr_pic_unregister(dev, xref); goto cleanup; } } OF_device_register_xref(xref, dev); return (0); cleanup: /* * XXX - not implemented arm_gic_detach() should be called ! */ if (sc->gic_irqs != NULL) free(sc->gic_irqs, M_DEVBUF); bus_release_resources(dev, arm_gic_spec, sc->gic_res); return(ENXIO); #endif } #ifdef ARM_INTRNG static int arm_gic_intr(void *arg) { struct arm_gic_softc *sc = arg; struct intr_irqsrc *isrc; uint32_t irq_active_reg, irq; struct trapframe *tf; irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; /* * 1. We do EOI here because recent read value from active interrupt * register must be used for it. Another approach is to save this * value into associated interrupt source. * 2. EOI must be done on same CPU where interrupt has fired. Thus * we must ensure that interrupted thread does not migrate to * another CPU. * 3. EOI cannot be delayed by any preemption which could happen on * critical_exit() used in MI intr code, when interrupt thread is * scheduled. See next point. * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during * an action and any use of critical_exit() could break this * assumption. See comments within smp_rendezvous_action(). * 5. We always return FILTER_HANDLED as this is an interrupt * controller dispatch function. Otherwise, in cascaded interrupt * case, the whole interrupt subtree would be masked. */ if (irq >= sc->nirqs) { #ifdef GIC_DEBUG_SPURIOUS device_printf(sc->gic_dev, "Spurious interrupt detected: last irq: %d on CPU%d\n", sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid)); #endif return (FILTER_HANDLED); } tf = curthread->td_intr_frame; dispatch_irq: isrc = sc->gic_irqs[irq]; if (isrc == NULL) { device_printf(sc->gic_dev, "Stray interrupt %u detected\n", irq); gic_irq_mask(sc, irq); gic_c_write_4(sc, GICC_EOIR, irq_active_reg); goto next_irq; } /* * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement * as compiler complains that comparing u_int >= 0 is always true. */ if (irq <= GIC_LAST_SGI) { #ifdef SMP /* Call EOI for all IPI before dispatch. */ gic_c_write_4(sc, GICC_EOIR, irq_active_reg); - intr_ipi_dispatch(isrc, tf); + intr_ipi_dispatch(ISRC_IPI(isrc), tf); goto next_irq; #else device_printf(sc->gic_dev, "SGI %u on UP system detected\n", irq - GIC_FIRST_SGI); gic_c_write_4(sc, GICC_EOIR, irq_active_reg); goto next_irq; #endif } #ifdef GIC_DEBUG_SPURIOUS sc->last_irq[PCPU_GET(cpuid)] = irq; #endif if (isrc->isrc_trig == INTR_TRIGGER_EDGE) gic_c_write_4(sc, GICC_EOIR, irq_active_reg); intr_irq_dispatch(isrc, tf); next_irq: arm_irq_memory_barrier(irq); irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; if (irq < sc->nirqs) goto dispatch_irq; return (FILTER_HANDLED); } static int gic_attach_isrc(struct arm_gic_softc *sc, struct intr_irqsrc *isrc, u_int irq) { const char *name; /* * 1. The link between ISRC and controller must be set atomically. * 2. Just do things only once in rare case when consumers * of shared interrupt came here at the same moment. */ mtx_lock_spin(&sc->mutex); if (sc->gic_irqs[irq] != NULL) { mtx_unlock_spin(&sc->mutex); return (sc->gic_irqs[irq] == isrc ? 0 : EEXIST); } sc->gic_irqs[irq] = isrc; isrc->isrc_data = irq; mtx_unlock_spin(&sc->mutex); name = device_get_nameunit(sc->gic_dev); if (irq <= GIC_LAST_SGI) intr_irq_set_name(isrc, "%s,i%u", name, irq - GIC_FIRST_SGI); else if (irq <= GIC_LAST_PPI) intr_irq_set_name(isrc, "%s,p%u", name, irq - GIC_FIRST_PPI); else intr_irq_set_name(isrc, "%s,s%u", name, irq - GIC_FIRST_SPI); return (0); } static int gic_detach_isrc(struct arm_gic_softc *sc, struct intr_irqsrc *isrc, u_int irq) { mtx_lock_spin(&sc->mutex); if (sc->gic_irqs[irq] != isrc) { mtx_unlock_spin(&sc->mutex); return (sc->gic_irqs[irq] == NULL ? 0 : EINVAL); } sc->gic_irqs[irq] = NULL; isrc->isrc_data = 0; mtx_unlock_spin(&sc->mutex); intr_irq_set_name(isrc, ""); return (0); } static void gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig, enum intr_polarity pol) { uint32_t reg; uint32_t mask; if (irq < GIC_FIRST_SPI) return; mtx_lock_spin(&sc->mutex); reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4)); mask = (reg >> 2*(irq % 16)) & 0x3; if (pol == INTR_POLARITY_LOW) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_LOW; } else if (pol == INTR_POLARITY_HIGH) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_HIGH; } if (trig == INTR_TRIGGER_LEVEL) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_LVL; } else if (trig == INTR_TRIGGER_EDGE) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_EDGE; } /* Set mask */ reg = reg & ~(0x3 << 2*(irq % 16)); reg = reg | (mask << 2*(irq % 16)); gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg); mtx_unlock_spin(&sc->mutex); } static int gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus) { uint32_t cpu, end, mask; end = min(mp_ncpus, 8); for (cpu = end; cpu < MAXCPU; cpu++) if (CPU_ISSET(cpu, cpus)) return (EINVAL); for (mask = 0, cpu = 0; cpu < end; cpu++) if (CPU_ISSET(cpu, cpus)) mask |= 1 << cpu; gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask); return (0); } static int gic_irq_from_nspc(struct arm_gic_softc *sc, u_int type, u_int num, u_int *irqp) { switch (type) { case INTR_IRQ_NSPC_PLAIN: *irqp = num; return (*irqp < sc->nirqs ? 0 : EINVAL); case INTR_IRQ_NSPC_IRQ: *irqp = num + GIC_FIRST_PPI; return (*irqp < sc->nirqs ? 0 : EINVAL); case INTR_IRQ_NSPC_IPI: *irqp = num + GIC_FIRST_SGI; return (*irqp < GIC_LAST_SGI ? 0 : EINVAL); default: return (EINVAL); } } static int gic_map_nspc(struct arm_gic_softc *sc, struct intr_irqsrc *isrc, u_int *irqp) { int error; error = gic_irq_from_nspc(sc, isrc->isrc_nspc_type, isrc->isrc_nspc_num, irqp); if (error != 0) return (error); return (gic_attach_isrc(sc, isrc, *irqp)); } #ifdef FDT static int gic_map_fdt(struct arm_gic_softc *sc, struct intr_irqsrc *isrc, u_int *irqp) { u_int irq, tripol; enum intr_trigger trig; enum intr_polarity pol; int error; if (isrc->isrc_ncells == 1) { irq = isrc->isrc_cells[0]; pol = INTR_POLARITY_CONFORM; trig = INTR_TRIGGER_CONFORM; } else if (isrc->isrc_ncells == 3) { if (isrc->isrc_cells[0] == 0) irq = isrc->isrc_cells[1] + GIC_FIRST_SPI; else irq = isrc->isrc_cells[1] + GIC_FIRST_PPI; /* * In intr[2], bits[3:0] are trigger type and level flags. * 1 = low-to-high edge triggered * 2 = high-to-low edge triggered * 4 = active high level-sensitive * 8 = active low level-sensitive * The hardware only supports active-high-level or rising-edge. */ tripol = isrc->isrc_cells[2]; if (tripol & 0x0a && irq >= GIC_FIRST_SPI) { device_printf(sc->gic_dev, "unsupported trigger/polarity configuration " "0x%02x\n", tripol & 0x0f); } pol = INTR_POLARITY_CONFORM; if (tripol & 0x03) trig = INTR_TRIGGER_EDGE; else trig = INTR_TRIGGER_LEVEL; } else return (EINVAL); if (irq >= sc->nirqs) return (EINVAL); error = gic_attach_isrc(sc, isrc, irq); if (error != 0) return (error); isrc->isrc_nspc_type = INTR_IRQ_NSPC_PLAIN; isrc->isrc_nspc_num = irq; isrc->isrc_trig = trig; isrc->isrc_pol = pol; *irqp = irq; return (0); } #endif static int arm_gic_register(device_t dev, struct intr_irqsrc *isrc, boolean_t *is_percpu) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq; int error; if (isrc->isrc_type == INTR_ISRCT_NAMESPACE) error = gic_map_nspc(sc, isrc, &irq); #ifdef FDT else if (isrc->isrc_type == INTR_ISRCT_FDT) error = gic_map_fdt(sc, isrc, &irq); #endif else return (EINVAL); if (error == 0) *is_percpu = irq < GIC_FIRST_SPI ? TRUE : FALSE; return (error); } static void arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq = isrc->isrc_data; if (isrc->isrc_trig == INTR_TRIGGER_CONFORM) isrc->isrc_trig = INTR_TRIGGER_LEVEL; /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PERCPU) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); gic_config(sc, irq, isrc->isrc_trig, isrc->isrc_pol); arm_gic_bind(dev, isrc); } static void arm_gic_enable_source(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq = isrc->isrc_data; arm_irq_memory_barrier(irq); gic_irq_unmask(sc, irq); } static void arm_gic_disable_source(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq = isrc->isrc_data; gic_irq_mask(sc, irq); } static int arm_gic_unregister(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq = isrc->isrc_data; return (gic_detach_isrc(sc, isrc, irq)); } static void arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); arm_gic_disable_source(dev, isrc); gic_c_write_4(sc, GICC_EOIR, isrc->isrc_data); } static void arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { arm_irq_memory_barrier(0); arm_gic_enable_source(dev, isrc); } static void arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); /* EOI for edge-triggered done earlier. */ if (isrc->isrc_trig == INTR_TRIGGER_EDGE) return; arm_irq_memory_barrier(0); gic_c_write_4(sc, GICC_EOIR, isrc->isrc_data); } static int arm_gic_bind(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t irq = isrc->isrc_data; if (irq < GIC_FIRST_SPI) return (EINVAL); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); } return (gic_bind(sc, irq, &isrc->isrc_cpu)); } #ifdef SMP static void arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t irq, val = 0, i; irq = isrc->isrc_data; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= 1 << (16 + i); gic_d_write_4(sc, GICD_SGIR(0), val | irq); } + +static int +arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc *isrc) +{ + struct arm_gic_softc *sc = device_get_softc(dev); + u_int irq; + int error; + + error = gic_map_nspc(sc, isrc, &irq); + if (error != 0) + return (error); + sgi_to_ipi[irq - GIC_FIRST_SGI] = ipi; + return (0); +} #endif #else static int arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq) { uint32_t active_irq; active_irq = gic_c_read_4(sc, GICC_IAR); /* * Immediatly EOIR the SGIs, because doing so requires the other * bits (ie CPU number), not just the IRQ number, and we do not * have this information later. */ if ((active_irq & 0x3ff) <= GIC_LAST_SGI) gic_c_write_4(sc, GICC_EOIR, active_irq); active_irq &= 0x3FF; if (active_irq == 0x3FF) { if (last_irq == -1) device_printf(sc->gic_dev, "Spurious interrupt detected\n"); return -1; } return active_irq; } static int arm_gic_config(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t reg; uint32_t mask; /* Function is public-accessible, so validate input arguments */ if ((irq < 0) || (irq >= sc->nirqs)) goto invalid_args; if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) && (trig != INTR_TRIGGER_CONFORM)) goto invalid_args; if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) && (pol != INTR_POLARITY_CONFORM)) goto invalid_args; mtx_lock_spin(&sc->mutex); reg = gic_d_read_4(sc, GICD_ICFGR(irq >> 4)); mask = (reg >> 2*(irq % 16)) & 0x3; if (pol == INTR_POLARITY_LOW) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_LOW; } else if (pol == INTR_POLARITY_HIGH) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_HIGH; } if (trig == INTR_TRIGGER_LEVEL) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_LVL; } else if (trig == INTR_TRIGGER_EDGE) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_EDGE; } /* Set mask */ reg = reg & ~(0x3 << 2*(irq % 16)); reg = reg | (mask << 2*(irq % 16)); gic_d_write_4(sc, GICD_ICFGR(irq >> 4), reg); mtx_unlock_spin(&sc->mutex); return (0); invalid_args: device_printf(dev, "gic_config_irg, invalid parameters\n"); return (EINVAL); } static void arm_gic_mask(device_t dev, int irq) { struct arm_gic_softc *sc = device_get_softc(dev); gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F))); gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */ } static void arm_gic_unmask(device_t dev, int irq) { struct arm_gic_softc *sc = device_get_softc(dev); if (irq > GIC_LAST_SGI) arm_irq_memory_barrier(irq); gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F))); } #ifdef SMP static void arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t val = 0, i; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= 1 << (16 + i); gic_d_write_4(sc, GICD_SGIR(0), val | ipi); } static int arm_gic_ipi_read(device_t dev, int i) { if (i != -1) { /* * The intr code will automagically give the frame pointer * if the interrupt argument is 0. */ if ((unsigned int)i > 16) return (0); return (i); } return (0x3ff); } static void arm_gic_ipi_clear(device_t dev, int ipi) { /* no-op */ } #endif static void gic_post_filter(void *arg) { struct arm_gic_softc *sc = gic_sc; uintptr_t irq = (uintptr_t) arg; if (irq > GIC_LAST_SGI) arm_irq_memory_barrier(irq); gic_c_write_4(sc, GICC_EOIR, irq); } static int gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol) { return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol)); } void arm_mask_irq(uintptr_t nb) { arm_gic_mask(gic_sc->gic_dev, nb); } void arm_unmask_irq(uintptr_t nb) { arm_gic_unmask(gic_sc->gic_dev, nb); } int arm_get_next_irq(int last_irq) { return (arm_gic_next_irq(gic_sc, last_irq)); } #ifdef SMP void intr_pic_init_secondary(void) { arm_gic_init_secondary(gic_sc->gic_dev); } void pic_ipi_send(cpuset_t cpus, u_int ipi) { arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi); } int pic_ipi_read(int i) { return (arm_gic_ipi_read(gic_sc->gic_dev, i)); } void pic_ipi_clear(int ipi) { arm_gic_ipi_clear(gic_sc->gic_dev, ipi); } #endif #endif /* ARM_INTRNG */ static device_method_t arm_gic_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arm_gic_probe), DEVMETHOD(device_attach, arm_gic_attach), #ifdef ARM_INTRNG /* Interrupt controller interface */ DEVMETHOD(pic_disable_source, arm_gic_disable_source), DEVMETHOD(pic_enable_intr, arm_gic_enable_intr), DEVMETHOD(pic_enable_source, arm_gic_enable_source), DEVMETHOD(pic_post_filter, arm_gic_post_filter), DEVMETHOD(pic_post_ithread, arm_gic_post_ithread), DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread), DEVMETHOD(pic_register, arm_gic_register), DEVMETHOD(pic_unregister, arm_gic_unregister), #ifdef SMP DEVMETHOD(pic_bind, arm_gic_bind), DEVMETHOD(pic_init_secondary, arm_gic_init_secondary), DEVMETHOD(pic_ipi_send, arm_gic_ipi_send), + DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup), #endif #endif { 0, 0 } }; static driver_t arm_gic_driver = { "gic", arm_gic_methods, sizeof(struct arm_gic_softc), }; static devclass_t arm_gic_devclass; EARLY_DRIVER_MODULE(gic, simplebus, arm_gic_driver, arm_gic_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(gic, ofwbus, arm_gic_driver, arm_gic_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); Index: head/sys/arm/arm/machdep_intr.c =================================================================== --- head/sys/arm/arm/machdep_intr.c (revision 297229) +++ head/sys/arm/arm/machdep_intr.c (revision 297230) @@ -1,258 +1,251 @@ /* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard. * Copyright (c) 1994-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Soft interrupt and other generic interrupt functions. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ARM_INTRNG #include "pic_if.h" #ifdef SMP -static struct intr_irqsrc ipi_sources[INTR_IPI_COUNT]; -static u_int ipi_next_num; +#define INTR_IPI_NAMELEN (MAXCOMLEN + 1) + +struct intr_ipi { + intr_ipi_handler_t * ii_handler; + void * ii_handler_arg; + intr_ipi_send_t * ii_send; + void * ii_send_arg; + char ii_name[INTR_IPI_NAMELEN]; + u_long * ii_count; +}; + +static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; +u_int ipi_next_num; #endif #endif /* * arm_irq_memory_barrier() * * Ensure all writes to device memory have reached devices before proceeding. * * This is intended to be called from the post-filter and post-thread routines * of an interrupt controller implementation. A peripheral device driver should * use bus_space_barrier() if it needs to ensure a write has reached the * hardware for some reason other than clearing interrupt conditions. * * The need for this function arises from the ARM weak memory ordering model. * Writes to locations mapped with the Device attribute bypass any caches, but * are buffered. Multiple writes to the same device will be observed by that * device in the order issued by the cpu. Writes to different devices may * appear at those devices in a different order than issued by the cpu. That * is, if the cpu writes to device A then device B, the write to device B could * complete before the write to device A. * * Consider a typical device interrupt handler which services the interrupt and * writes to a device status-acknowledge register to clear the interrupt before * returning. That write is posted to the L2 controller which "immediately" * places it in a store buffer and automatically drains that buffer. This can * be less immediate than you'd think... There may be no free slots in the store * buffers, so an existing buffer has to be drained first to make room. The * target bus may be busy with other traffic (such as DMA for various devices), * delaying the drain of the store buffer for some indeterminate time. While * all this delay is happening, execution proceeds on the CPU, unwinding its way * out of the interrupt call stack to the point where the interrupt driver code * is ready to EOI and unmask the interrupt. The interrupt controller may be * accessed via a faster bus than the hardware whose handler just ran; the write * to unmask and EOI the interrupt may complete quickly while the device write * to ack and clear the interrupt source is still lingering in a store buffer * waiting for access to a slower bus. With the interrupt unmasked at the * interrupt controller but still active at the device, as soon as interrupts * are enabled on the core the device re-interrupts immediately: now you've got * a spurious interrupt on your hands. * * The right way to fix this problem is for every device driver to use the * proper bus_space_barrier() calls in its interrupt handler. For ARM a single * barrier call at the end of the handler would work. This would have to be * done to every driver in the system, not just arm-specific drivers. * * Another potential fix is to map all device memory as Strongly-Ordered rather * than Device memory, which takes the store buffers out of the picture. This * has a pretty big impact on overall system performance, because each strongly * ordered memory access causes all L2 store buffers to be drained. * * A compromise solution is to have the interrupt controller implementation call * this function to establish a barrier between writes to the interrupt-source * device and writes to the interrupt controller device. * * This takes the interrupt number as an argument, and currently doesn't use it. * The plan is that maybe some day there is a way to flag certain interrupts as * "memory barrier safe" and we can avoid this overhead with them. */ void arm_irq_memory_barrier(uintptr_t irq) { dsb(); cpu_l2cache_drain_writebuf(); } #ifdef ARM_INTRNG #ifdef SMP -/* - * Lookup IPI source. - */ -static struct intr_irqsrc * +static inline struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } -/* - * interrupt controller dispatch function for IPIs. It should - * be called straight from the interrupt controller, when associated - * interrupt source is learned. Or from anybody who has an interrupt - * source mapped. - */ void -intr_ipi_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) +intr_ipi_dispatch(u_int ipi, struct trapframe *tf) { void *arg; + struct intr_ipi *ii; - KASSERT(isrc != NULL, ("%s: no source", __func__)); + ii = intr_ipi_lookup(ipi); + if (ii->ii_count == NULL) + panic("%s: not setup IPI %u", __func__, ipi); - intr_ipi_increment_count(isrc->isrc_count, PCPU_GET(cpuid)); + intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); /* * Supply ipi filter with trapframe argument * if none is registered. */ - arg = isrc->isrc_arg != NULL ? isrc->isrc_arg : tf; - isrc->isrc_ipifilter(arg); + arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf; + ii->ii_handler(arg); } -/* - * Map IPI into interrupt controller. - * - * Not SMP coherent. - */ -static int -ipi_map(struct intr_irqsrc *isrc, u_int ipi) +void +intr_ipi_send(cpuset_t cpus, u_int ipi) { - boolean_t is_percpu; - int error; + struct intr_ipi *ii; - if (ipi >= INTR_IPI_COUNT) - panic("%s: no such IPI %u", __func__, ipi); + ii = intr_ipi_lookup(ipi); + if (ii->ii_count == NULL) + panic("%s: not setup IPI %u", __func__, ipi); - KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); + ii->ii_send(ii->ii_send_arg, cpus); +} - isrc->isrc_type = INTR_ISRCT_NAMESPACE; - isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; - isrc->isrc_nspc_num = ipi_next_num; +void +intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, + void *h_arg, intr_ipi_send_t *send, void *s_arg) +{ + struct intr_ipi *ii; - error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu); - if (error == 0) { - isrc->isrc_dev = intr_irq_root_dev; - ipi_next_num++; - } - return (error); + ii = intr_ipi_lookup(ipi); + + KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); + KASSERT(send != NULL, ("%s: ipi %u no sender", __func__, ipi)); + KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); + + ii->ii_handler = hand; + ii->ii_handler_arg = h_arg; + ii->ii_send = send; + ii->ii_send_arg = s_arg; + strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); + ii->ii_count = intr_ipi_setup_counters(name); } /* - * Setup IPI handler to interrupt source. + * Send IPI thru interrupt controller. + */ +static void +pic_ipi_send(void *arg, cpuset_t cpus) +{ + + KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); + PIC_IPI_SEND(intr_irq_root_dev, arg, cpus); +} + +/* + * Setup IPI handler on interrupt controller. * - * Note that there could be more ways how to send and receive IPIs - * on a platform like fast interrupts for example. In that case, - * one can call this function with ASIF_NOALLOC flag set and then - * call intr_ipi_dispatch() when appropriate. - * * Not SMP coherent. */ int -intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, - void *arg, u_int flags) +intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, + void *arg) { - struct intr_irqsrc *isrc; int error; + struct intr_irqsrc *isrc; - if (filter == NULL) - return(EINVAL); + KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); - isrc = intr_ipi_lookup(ipi); - if (isrc->isrc_ipifilter != NULL) - return (EEXIST); + isrc = intr_isrc_alloc(INTR_ISRCT_NAMESPACE, 0); + isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; + isrc->isrc_nspc_num = ipi_next_num; - if ((flags & AISHF_NOALLOC) == 0) { - error = ipi_map(isrc, ipi); - if (error != 0) - return (error); - } + error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, isrc); + if (error != 0) + return (error); - isrc->isrc_ipifilter = filter; - isrc->isrc_arg = arg; - isrc->isrc_handlers = 1; - isrc->isrc_count = intr_ipi_setup_counters(name); - isrc->isrc_index = 0; /* it should not be used in IPI case */ + ipi_next_num++; - if (isrc->isrc_dev != NULL) { - PIC_ENABLE_INTR(isrc->isrc_dev, isrc); - PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); - } + isrc->isrc_dev = intr_irq_root_dev; + isrc->isrc_handlers = 1; + intr_ipi_setup(ipi, name, hand, arg, pic_ipi_send, isrc); return (0); -} - -/* - * Send IPI thru interrupt controller. - */ -void -pic_ipi_send(cpuset_t cpus, u_int ipi) -{ - struct intr_irqsrc *isrc; - - isrc = intr_ipi_lookup(ipi); - - KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); - PIC_IPI_SEND(intr_irq_root_dev, isrc, cpus); } #endif #endif Index: head/sys/arm/arm/mp_machdep.c =================================================================== --- head/sys/arm/arm/mp_machdep.c (revision 297229) +++ head/sys/arm/arm/mp_machdep.c (revision 297230) @@ -1,527 +1,537 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef CPU_MV_PJ4B #include #include #endif #include "opt_smp.h" extern struct pcpu __pcpu[]; /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; /* # of Applications processors */ volatile int mp_naps; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; #ifndef ARM_INTRNG static int ipi_handler(void *arg); #endif void set_stackptrs(int cpu); /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; /* Determine if we running MP machine */ int cpu_mp_probe(void) { KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); CPU_SETOF(0, &all_cpus); return (mp_ncpus > 1); } /* Start Application Processor via platform specific function */ static int check_ap(void) { uint32_t ms; for (ms = 0; ms < 2000; ++ms) { if ((mp_naps + 1) == mp_ncpus) return (0); /* success */ else DELAY(1000); } return (-2); } extern unsigned char _end[]; /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { int error, i; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); /* Initialize boot code and start up processors */ platform_mp_start_ap(); /* Check if ap's started properly */ error = check_ap(); if (error) printf("WARNING: Some AP's failed to start\n"); else for (i = 1; i < mp_ncpus; i++) CPU_SET(i, &all_cpus); } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } extern vm_paddr_t pmap_pa; void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif uint32_t actlr_mask, actlr_set; pmap_set_tex(); cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP vfp_init(); #endif mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); #ifndef ARM_INTRNG /* Enable ipi */ #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) arm_unmask_irq(i); #endif /* INTRNG */ enable_interrupts(PSR_I); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); intr_pic_init_secondary(); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } #ifdef ARM_INTRNG static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_stop(void *dummy __unused) { u_int cpu; /* * IPI_STOP_HARD is mapped to IPI_STOP. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); } static void ipi_preempt(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(td); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } static void ipi_hardclock(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } #else static int ipi_handler(void *arg) { u_int cpu, ipi; cpu = PCPU_GET(cpuid); ipi = pic_ipi_read((int)arg); while ((ipi != 0x3ff)) { switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } pic_ipi_clear(ipi); ipi = pic_ipi_read(-1); } return (FILTER_HANDLED); } #endif static void release_aps(void *dummy __unused) { uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif if (mp_ncpus == 1) return; #ifdef ARM_INTRNG - intr_ipi_set_handler(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL, 0); - intr_ipi_set_handler(IPI_AST, "ast", ipi_ast, NULL, 0); - intr_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0); - intr_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0); - intr_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0); - + intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); + intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); + intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); + intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); + intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); #else #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) { /* * IPI handler */ /* * Use 0xdeadbeef as the argument value for irq 0, * if we used 0, the intr code will give the trap frame * pointer instead. */ arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i, INTR_TYPE_MISC | INTR_EXCL, NULL); /* Enable ipi */ arm_unmask_irq(i); } #endif atomic_store_rel_int(&aps_ready, 1); /* Wake the other threads up */ #if __ARM_ARCH >= 7 armv7_sev(); #endif printf("Release APs\n"); for (loop_counter = 0; loop_counter < 2000; loop_counter++) { if (smp_started) return; DELAY(1000); } printf("AP's not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); struct cpu_group * cpu_topo(void) { return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); } void cpu_mp_setmaxid(void) { platform_mp_setmaxid(); } /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); +#ifdef ARM_INTRNG + intr_ipi_send(other_cpus, ipi); +#else pic_ipi_send(other_cpus, ipi); +#endif } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); +#ifdef ARM_INTRNG + intr_ipi_send(cpus, ipi); +#else pic_ipi_send(cpus, ipi); +#endif } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); +#ifdef ARM_INTRNG + intr_ipi_send(cpus, ipi); +#else pic_ipi_send(cpus, ipi); +#endif } - Index: head/sys/arm/include/intr.h =================================================================== --- head/sys/arm/include/intr.h (revision 297229) +++ head/sys/arm/include/intr.h (revision 297230) @@ -1,115 +1,118 @@ /* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */ /*- * Copyright (c) 1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef FDT #include #endif #ifdef ARM_INTRNG #ifndef NIRQ #define NIRQ 1024 /* XXX - It should be an option. */ #endif #include #ifdef SMP -void intr_ipi_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf); +typedef void intr_ipi_send_t(void *, cpuset_t); +typedef void intr_ipi_handler_t(void *); -#define AISHF_NOALLOC 0x0001 +void intr_ipi_dispatch(u_int, struct trapframe *); +void intr_ipi_send(cpuset_t, u_int); -int intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, - void *arg, u_int flags); -#endif +void intr_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *, + intr_ipi_send_t *, void *); +int intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); +#endif #else /* ARM_INTRNG */ /* XXX move to std.* files? */ #ifdef CPU_XSCALE_81342 #define NIRQ 128 #elif defined(CPU_XSCALE_PXA2X0) #include #define NIRQ IRQ_GPIO_MAX #elif defined(SOC_MV_DISCOVERY) #define NIRQ 96 #elif defined(CPU_ARM9) || defined(SOC_MV_KIRKWOOD) || \ defined(CPU_XSCALE_IXP435) #define NIRQ 64 #elif defined(CPU_CORTEXA) #define NIRQ 1020 #elif defined(CPU_KRAIT) #define NIRQ 288 #elif defined(CPU_ARM1176) #define NIRQ 128 #elif defined(SOC_MV_ARMADAXP) #define MAIN_IRQ_NUM 116 #define ERR_IRQ_NUM 32 #define ERR_IRQ (MAIN_IRQ_NUM) #define MSI_IRQ_NUM 32 #define MSI_IRQ (ERR_IRQ + ERR_IRQ_NUM) #define NIRQ (MAIN_IRQ_NUM + ERR_IRQ_NUM + MSI_IRQ_NUM) #else #define NIRQ 32 #endif int arm_get_next_irq(int); void arm_mask_irq(uintptr_t); void arm_unmask_irq(uintptr_t); void arm_intrnames_init(void); void arm_setup_irqhandler(const char *, int (*)(void*), void (*)(void*), void *, int, int, void **); int arm_remove_irqhandler(int, void *); extern void (*arm_post_filter)(void *); extern int (*arm_config_irq)(int irq, enum intr_trigger trig, enum intr_polarity pol); void intr_pic_init_secondary(void); #ifdef FDT int gic_decode_fdt(phandle_t, pcell_t *, int *, int *, int *); int intr_fdt_map_irq(phandle_t, pcell_t *, int); #endif #endif /* ARM_INTRNG */ void arm_irq_memory_barrier(uintptr_t); #endif /* _MACHINE_INTR_H */ Index: head/sys/arm/include/smp.h =================================================================== --- head/sys/arm/include/smp.h (revision 297229) +++ head/sys/arm/include/smp.h (revision 297230) @@ -1,53 +1,53 @@ /* $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #include #include #ifdef ARM_INTRNG enum { IPI_AST, IPI_PREEMPT, IPI_RENDEZVOUS, IPI_STOP, IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */ IPI_HARDCLOCK, IPI_TLB, /* Not used now, but keep it reserved. */ IPI_CACHE, /* Not used now, but keep it reserved. */ INTR_IPI_COUNT }; #else #define IPI_AST 0 #define IPI_PREEMPT 2 #define IPI_RENDEZVOUS 3 #define IPI_STOP 4 #define IPI_STOP_HARD 4 #define IPI_HARDCLOCK 6 #define IPI_TLB 7 /* Not used now, but keep it reserved. */ #define IPI_CACHE 8 /* Not used now, but keep it reserved. */ #endif /* INTRNG */ void init_secondary(int cpu); void mpentry(void); void ipi_all_but_self(u_int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, u_int ipi); /* PIC interface */ -void pic_ipi_send(cpuset_t cpus, u_int ipi); #ifndef ARM_INTRNG +void pic_ipi_send(cpuset_t cpus, u_int ipi); void pic_ipi_clear(int ipi); int pic_ipi_read(int arg); #endif /* Platform interface */ void platform_mp_setmaxid(void); void platform_mp_start_ap(void); /* global data in mp_machdep.c */ extern struct pcb stoppcbs[]; #endif /* !_MACHINE_SMP_H_ */ Index: head/sys/kern/pic_if.m =================================================================== --- head/sys/kern/pic_if.m (revision 297229) +++ head/sys/kern/pic_if.m (revision 297230) @@ -1,124 +1,137 @@ #- # Copyright (c) 2012 Jakub Wojciech Klama # Copyright (c) 2015 Svatopluk Kraus # Copyright (c) 2015 Michal Meloun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # #include #include #include #include INTERFACE pic; CODE { static int null_pic_bind(device_t dev, struct intr_irqsrc *isrc) { return (EOPNOTSUPP); } static void null_pic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { return; } static void null_pic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { return; } static void null_pic_init_secondary(device_t dev) { return; } static void null_pic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi) { return; } + + static int + dflt_pic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc *isrc) + { + + return (EOPNOTSUPP); + } }; METHOD int register { device_t dev; struct intr_irqsrc *isrc; boolean_t *is_percpu; }; METHOD int unregister { device_t dev; struct intr_irqsrc *isrc; }; METHOD void disable_intr { device_t dev; struct intr_irqsrc *isrc; } DEFAULT null_pic_disable_intr; METHOD void disable_source { device_t dev; struct intr_irqsrc *isrc; }; METHOD void enable_source { device_t dev; struct intr_irqsrc *isrc; }; METHOD void enable_intr { device_t dev; struct intr_irqsrc *isrc; } DEFAULT null_pic_enable_intr; METHOD void pre_ithread { device_t dev; struct intr_irqsrc *isrc; }; METHOD void post_ithread { device_t dev; struct intr_irqsrc *isrc; }; METHOD void post_filter { device_t dev; struct intr_irqsrc *isrc; }; METHOD int bind { device_t dev; struct intr_irqsrc *isrc; } DEFAULT null_pic_bind; METHOD void init_secondary { device_t dev; } DEFAULT null_pic_init_secondary; METHOD void ipi_send { device_t dev; struct intr_irqsrc *isrc; cpuset_t cpus; } DEFAULT null_pic_ipi_send; + +METHOD int ipi_setup { + device_t dev; + u_int ipi; + struct intr_irqsrc *isrc; +} DEFAULT dflt_pic_ipi_setup; Index: head/sys/kern/subr_intr.c =================================================================== --- head/sys/kern/subr_intr.c (revision 297229) +++ head/sys/kern/subr_intr.c (revision 297230) @@ -1,1214 +1,1214 @@ /*- * Copyright (c) 2012-2014 Jakub Wojciech Klama . * Copyright (c) 2015 Svatopluk Kraus * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* * New-style Interrupt Framework * * TODO: - to support IPI (PPI) enabling on other CPUs if already started * - to complete things for removable PICs */ #include "opt_ddb.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #ifdef DDB #include #endif #include "pic_if.h" #define INTRNAME_LEN (2*MAXCOMLEN + 1) #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif MALLOC_DECLARE(M_INTRNG); MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); /* Main interrupt handler called from assembler -> 'hidden' for C code. */ void intr_irq_handler(struct trapframe *tf); /* Root interrupt controller stuff. */ device_t intr_irq_root_dev; static intr_irq_filter_t *irq_root_filter; static void *irq_root_arg; static u_int irq_root_ipicount; /* Interrupt controller definition. */ struct intr_pic { SLIST_ENTRY(intr_pic) pic_next; intptr_t pic_xref; /* hardware identification */ device_t pic_dev; }; static struct mtx pic_list_lock; static SLIST_HEAD(, intr_pic) pic_list; static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); /* Interrupt source definition. */ static struct mtx isrc_table_lock; static struct intr_irqsrc *irq_sources[NIRQ]; u_int irq_next_free; #define IRQ_INVALID nitems(irq_sources) #ifdef SMP static boolean_t irq_assign_cpu = FALSE; #endif /* * - 2 counters for each I/O interrupt. * - MAXCPU counters for each IPI counters for SMP. */ #ifdef SMP #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) #else #define INTRCNT_COUNT (NIRQ * 2) #endif /* Data for MI statistics reporting. */ u_long intrcnt[INTRCNT_COUNT]; char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; size_t sintrcnt = sizeof(intrcnt); size_t sintrnames = sizeof(intrnames); static u_int intrcnt_index; /* * Interrupt framework initialization routine. */ static void intr_irq_init(void *dummy __unused) { SLIST_INIT(&pic_list); mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); } SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", INTRNAME_LEN - 1, name); } /* * Update name for interrupt source with interrupt event. */ static void intrcnt_updatename(struct intr_irqsrc *isrc) { /* QQQ: What about stray counter name? */ mtx_assert(&isrc_table_lock, MA_OWNED); intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); } /* * Virtualization for interrupt source interrupt counter increment. */ static inline void isrc_increment_count(struct intr_irqsrc *isrc) { /* * XXX - It should be atomic for PPI interrupts. It was proven that * the lost is measurable easily for timer PPI interrupts. */ isrc->isrc_count[0]++; /*atomic_add_long(&isrc->isrc_count[0], 1);*/ } /* * Virtualization for interrupt source interrupt stray counter increment. */ static inline void isrc_increment_straycount(struct intr_irqsrc *isrc) { isrc->isrc_count[1]++; } /* * Virtualization for interrupt source interrupt name update. */ static void isrc_update_name(struct intr_irqsrc *isrc, const char *name) { char str[INTRNAME_LEN]; mtx_assert(&isrc_table_lock, MA_OWNED); if (name != NULL) { snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index + 1); } else { snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index + 1); } } /* * Virtualization for interrupt source interrupt counters setup. */ static void isrc_setup_counters(struct intr_irqsrc *isrc) { u_int index; /* * XXX - it does not work well with removable controllers and * interrupt sources !!! */ index = atomic_fetchadd_int(&intrcnt_index, 2); isrc->isrc_index = index; isrc->isrc_count = &intrcnt[index]; isrc_update_name(isrc, NULL); } #ifdef SMP /* * Virtualization for interrupt source IPI counters setup. */ u_long * intr_ipi_setup_counters(const char *name) { u_int index, i; char str[INTRNAME_LEN]; index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); for (i = 0; i < MAXCPU; i++) { snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); intrcnt_setname(str, index + i); } return (&intrcnt[index]); } #endif /* * Main interrupt dispatch handler. It's called straight * from the assembler, where CPU interrupt is served. */ void intr_irq_handler(struct trapframe *tf) { struct trapframe * oldframe; struct thread * td; KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); PCPU_INC(cnt.v_intr); critical_enter(); td = curthread; oldframe = td->td_intr_frame; td->td_intr_frame = tf; irq_root_filter(irq_root_arg); td->td_intr_frame = oldframe; critical_exit(); } /* * interrupt controller dispatch function for interrupts. It should * be called straight from the interrupt controller, when associated interrupt * source is learned. */ void intr_irq_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) { KASSERT(isrc != NULL, ("%s: no source", __func__)); isrc_increment_count(isrc); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { int error; error = isrc->isrc_filter(isrc->isrc_arg, tf); PIC_POST_FILTER(isrc->isrc_dev, isrc); if (error == FILTER_HANDLED) return; } else #endif if (isrc->isrc_event != NULL) { if (intr_event_handle(isrc->isrc_event, tf) == 0) return; } isrc_increment_straycount(isrc); PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); device_printf(isrc->isrc_dev, "stray irq <%s> disabled", isrc->isrc_name); } /* * Allocate interrupt source. */ -static struct intr_irqsrc * -isrc_alloc(u_int type, u_int extsize) +struct intr_irqsrc * +intr_isrc_alloc(u_int type, u_int extsize) { struct intr_irqsrc *isrc; isrc = malloc(sizeof(*isrc) + extsize, M_INTRNG, M_WAITOK | M_ZERO); isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ isrc->isrc_type = type; isrc->isrc_nspc_type = INTR_IRQ_NSPC_NONE; isrc->isrc_trig = INTR_TRIGGER_CONFORM; isrc->isrc_pol = INTR_POLARITY_CONFORM; CPU_ZERO(&isrc->isrc_cpu); return (isrc); } /* * Free interrupt source. */ -static void -isrc_free(struct intr_irqsrc *isrc) +void +intr_isrc_free(struct intr_irqsrc *isrc) { free(isrc, M_INTRNG); } void intr_irq_set_name(struct intr_irqsrc *isrc, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); va_end(ap); } /* * Alloc unique interrupt number (resource handle) for interrupt source. * * There could be various strategies how to allocate free interrupt number * (resource handle) for new interrupt source. * * 1. Handles are always allocated forward, so handles are not recycled * immediately. However, if only one free handle left which is reused * constantly... */ static int isrc_alloc_irq_locked(struct intr_irqsrc *isrc) { u_int maxirqs, irq; mtx_assert(&isrc_table_lock, MA_OWNED); maxirqs = nitems(irq_sources); if (irq_next_free >= maxirqs) return (ENOSPC); for (irq = irq_next_free; irq < maxirqs; irq++) { if (irq_sources[irq] == NULL) goto found; } for (irq = 0; irq < irq_next_free; irq++) { if (irq_sources[irq] == NULL) goto found; } irq_next_free = maxirqs; return (ENOSPC); found: isrc->isrc_irq = irq; irq_sources[irq] = isrc; intr_irq_set_name(isrc, "irq%u", irq); isrc_setup_counters(isrc); irq_next_free = irq + 1; if (irq_next_free >= maxirqs) irq_next_free = 0; return (0); } #ifdef notyet /* * Free unique interrupt number (resource handle) from interrupt source. */ static int isrc_free_irq(struct intr_irqsrc *isrc) { u_int maxirqs; mtx_assert(&isrc_table_lock, MA_NOTOWNED); maxirqs = nitems(irq_sources); if (isrc->isrc_irq >= maxirqs) return (EINVAL); mtx_lock(&isrc_table_lock); if (irq_sources[isrc->isrc_irq] != isrc) { mtx_unlock(&isrc_table_lock); return (EINVAL); } irq_sources[isrc->isrc_irq] = NULL; isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ mtx_unlock(&isrc_table_lock); return (0); } #endif /* * Lookup interrupt source by interrupt number (resource handle). */ static struct intr_irqsrc * isrc_lookup(u_int irq) { if (irq < nitems(irq_sources)) return (irq_sources[irq]); return (NULL); } /* * Lookup interrupt source by namespace description. */ static struct intr_irqsrc * isrc_namespace_lookup(device_t dev, uint16_t type, uint16_t num) { u_int irq; struct intr_irqsrc *isrc; mtx_assert(&isrc_table_lock, MA_OWNED); for (irq = 0; irq < nitems(irq_sources); irq++) { isrc = irq_sources[irq]; if (isrc != NULL && isrc->isrc_dev == dev && isrc->isrc_nspc_type == type && isrc->isrc_nspc_num == num) return (isrc); } return (NULL); } /* * Map interrupt source according to namespace into framework. If such mapping * does not exist, create it. Return unique interrupt number (resource handle) * associated with mapped interrupt source. */ u_int intr_namespace_map_irq(device_t dev, uint16_t type, uint16_t num) { struct intr_irqsrc *isrc, *new_isrc; int error; - new_isrc = isrc_alloc(INTR_ISRCT_NAMESPACE, 0); + new_isrc = intr_isrc_alloc(INTR_ISRCT_NAMESPACE, 0); mtx_lock(&isrc_table_lock); isrc = isrc_namespace_lookup(dev, type, num); if (isrc != NULL) { mtx_unlock(&isrc_table_lock); - isrc_free(new_isrc); + intr_isrc_free(new_isrc); return (isrc->isrc_irq); /* already mapped */ } error = isrc_alloc_irq_locked(new_isrc); if (error != 0) { mtx_unlock(&isrc_table_lock); - isrc_free(new_isrc); + intr_isrc_free(new_isrc); return (IRQ_INVALID); /* no space left */ } new_isrc->isrc_dev = dev; new_isrc->isrc_nspc_type = type; new_isrc->isrc_nspc_num = num; mtx_unlock(&isrc_table_lock); return (new_isrc->isrc_irq); } #ifdef FDT /* * Lookup interrupt source by FDT description. */ static struct intr_irqsrc * isrc_fdt_lookup(intptr_t xref, pcell_t *cells, u_int ncells) { u_int irq, cellsize; struct intr_irqsrc *isrc; mtx_assert(&isrc_table_lock, MA_OWNED); cellsize = ncells * sizeof(*cells); for (irq = 0; irq < nitems(irq_sources); irq++) { isrc = irq_sources[irq]; if (isrc != NULL && isrc->isrc_type == INTR_ISRCT_FDT && isrc->isrc_xref == xref && isrc->isrc_ncells == ncells && memcmp(isrc->isrc_cells, cells, cellsize) == 0) return (isrc); } return (NULL); } /* * Map interrupt source according to FDT data into framework. If such mapping * does not exist, create it. Return unique interrupt number (resource handle) * associated with mapped interrupt source. */ u_int intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells) { struct intr_irqsrc *isrc, *new_isrc; u_int cellsize; intptr_t xref; int error; xref = (intptr_t)node; /* It's so simple for now. */ cellsize = ncells * sizeof(*cells); - new_isrc = isrc_alloc(INTR_ISRCT_FDT, cellsize); + new_isrc = intr_isrc_alloc(INTR_ISRCT_FDT, cellsize); mtx_lock(&isrc_table_lock); isrc = isrc_fdt_lookup(xref, cells, ncells); if (isrc != NULL) { mtx_unlock(&isrc_table_lock); - isrc_free(new_isrc); + intr_isrc_free(new_isrc); return (isrc->isrc_irq); /* already mapped */ } error = isrc_alloc_irq_locked(new_isrc); if (error != 0) { mtx_unlock(&isrc_table_lock); - isrc_free(new_isrc); + intr_isrc_free(new_isrc); return (IRQ_INVALID); /* no space left */ } new_isrc->isrc_xref = xref; new_isrc->isrc_ncells = ncells; memcpy(new_isrc->isrc_cells, cells, cellsize); mtx_unlock(&isrc_table_lock); return (new_isrc->isrc_irq); } #endif /* * Register interrupt source into interrupt controller. */ static int isrc_register(struct intr_irqsrc *isrc) { struct intr_pic *pic; boolean_t is_percpu; int error; if (isrc->isrc_flags & INTR_ISRCF_REGISTERED) return (0); if (isrc->isrc_dev == NULL) { pic = pic_lookup(NULL, isrc->isrc_xref); if (pic == NULL || pic->pic_dev == NULL) return (ESRCH); isrc->isrc_dev = pic->pic_dev; } error = PIC_REGISTER(isrc->isrc_dev, isrc, &is_percpu); if (error != 0) return (error); mtx_lock(&isrc_table_lock); isrc->isrc_flags |= INTR_ISRCF_REGISTERED; if (is_percpu) isrc->isrc_flags |= INTR_ISRCF_PERCPU; isrc_update_name(isrc, NULL); mtx_unlock(&isrc_table_lock); return (0); } #ifdef INTR_SOLO /* * Setup filter into interrupt source. */ static int iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, intr_irq_filter_t *filter, void *arg, void **cookiep) { if (filter == NULL) return (EINVAL); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. */ if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { mtx_unlock(&isrc_table_lock); return (EBUSY); } isrc->isrc_filter = filter; isrc->isrc_arg = arg; isrc_update_name(isrc, name); mtx_unlock(&isrc_table_lock); *cookiep = isrc; return (0); } #endif /* * Interrupt source pre_ithread method for MI interrupt framework. */ static void intr_isrc_pre_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_ithread method for MI interrupt framework. */ static void intr_isrc_post_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_filter method for MI interrupt framework. */ static void intr_isrc_post_filter(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_FILTER(isrc->isrc_dev, isrc); } /* * Interrupt source assign_cpu method for MI interrupt framework. */ static int intr_isrc_assign_cpu(void *arg, int cpu) { #ifdef SMP struct intr_irqsrc *isrc = arg; int error; if (isrc->isrc_dev != intr_irq_root_dev) return (EINVAL); mtx_lock(&isrc_table_lock); if (cpu == NOCPU) { CPU_ZERO(&isrc->isrc_cpu); isrc->isrc_flags &= ~INTR_ISRCF_BOUND; } else { CPU_SETOF(cpu, &isrc->isrc_cpu); isrc->isrc_flags |= INTR_ISRCF_BOUND; } /* * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or * re-balance it to another CPU or enable it on more CPUs. However, * PIC is expected to change isrc_cpu appropriately to keep us well * informed if the call is successfull. */ if (irq_assign_cpu) { error = PIC_BIND(isrc->isrc_dev, isrc); if (error) { CPU_ZERO(&isrc->isrc_cpu); mtx_unlock(&isrc_table_lock); return (error); } } mtx_unlock(&isrc_table_lock); return (0); #else return (EOPNOTSUPP); #endif } /* * Create interrupt event for interrupt source. */ static int isrc_event_create(struct intr_irqsrc *isrc) { struct intr_event *ie; int error; error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, intr_isrc_assign_cpu, "%s:", isrc->isrc_name); if (error) return (error); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. Let contested event wins. */ #ifdef INTR_SOLO if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { #else if (isrc->isrc_event != NULL) { #endif mtx_unlock(&isrc_table_lock); intr_event_destroy(ie); return (isrc->isrc_event != NULL ? EBUSY : 0); } isrc->isrc_event = ie; mtx_unlock(&isrc_table_lock); return (0); } #ifdef notyet /* * Destroy interrupt event for interrupt source. */ static void isrc_event_destroy(struct intr_irqsrc *isrc) { struct intr_event *ie; mtx_lock(&isrc_table_lock); ie = isrc->isrc_event; isrc->isrc_event = NULL; mtx_unlock(&isrc_table_lock); if (ie != NULL) intr_event_destroy(ie); } #endif /* * Add handler to interrupt source. */ static int isrc_add_handler(struct intr_irqsrc *isrc, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { int error; if (isrc->isrc_event == NULL) { error = isrc_event_create(isrc); if (error) return (error); } error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } /* * Lookup interrupt controller locked. */ static struct intr_pic * pic_lookup_locked(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_assert(&pic_list_lock, MA_OWNED); SLIST_FOREACH(pic, &pic_list, pic_next) { if (pic->pic_xref != xref) continue; if (pic->pic_xref != 0 || pic->pic_dev == dev) return (pic); } return (NULL); } /* * Lookup interrupt controller. */ static struct intr_pic * pic_lookup(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); mtx_unlock(&pic_list_lock); return (pic); } /* * Create interrupt controller. */ static struct intr_pic * pic_create(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); if (pic != NULL) { mtx_unlock(&pic_list_lock); return (pic); } pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); pic->pic_xref = xref; pic->pic_dev = dev; SLIST_INSERT_HEAD(&pic_list, pic, pic_next); mtx_unlock(&pic_list_lock); return (pic); } #ifdef notyet /* * Destroy interrupt controller. */ static void pic_destroy(device_t dev, intptr_t xref) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref); if (pic == NULL) { mtx_unlock(&pic_list_lock); return; } SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); mtx_unlock(&pic_list_lock); free(pic, M_INTRNG); } #endif /* * Register interrupt controller. */ int intr_pic_register(device_t dev, intptr_t xref) { struct intr_pic *pic; pic = pic_create(dev, xref); if (pic == NULL) return (ENOMEM); if (pic->pic_dev != dev) return (EINVAL); /* XXX it could be many things. */ debugf("PIC %p registered for %s \n", pic, device_get_nameunit(dev), xref); return (0); } /* * Unregister interrupt controller. */ int intr_pic_unregister(device_t dev, intptr_t xref) { panic("%s: not implemented", __func__); } /* * Mark interrupt controller (itself) as a root one. * * Note that only an interrupt controller can really know its position * in interrupt controller's tree. So root PIC must claim itself as a root. * * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, * page 30: * "The root of the interrupt tree is determined when traversal * of the interrupt tree reaches an interrupt controller node without * an interrupts property and thus no explicit interrupt parent." */ int intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, void *arg, u_int ipicount) { if (pic_lookup(dev, xref) == NULL) { device_printf(dev, "not registered\n"); return (EINVAL); } if (filter == NULL) { device_printf(dev, "filter missing\n"); return (EINVAL); } /* * Only one interrupt controllers could be on the root for now. * Note that we further suppose that there is not threaded interrupt * routine (handler) on the root. See intr_irq_handler(). */ if (intr_irq_root_dev != NULL) { device_printf(dev, "another root already set\n"); return (EBUSY); } intr_irq_root_dev = dev; irq_root_filter = filter; irq_root_arg = arg; irq_root_ipicount = ipicount; debugf("irq root set to %s\n", device_get_nameunit(dev)); return (0); } int intr_irq_add_handler(device_t dev, driver_filter_t filt, driver_intr_t hand, void *arg, u_int irq, int flags, void **cookiep) { const char *name; struct intr_irqsrc *isrc; int error; name = device_get_nameunit(dev); #ifdef INTR_SOLO /* * Standard handling is done thru MI interrupt framework. However, * some interrupts could request solely own special handling. This * non standard handling can be used for interrupt controllers without * handler (filter only), so in case that interrupt controllers are * chained, MI interrupt framework is called only in leaf controller. * * Note that root interrupt controller routine is served as well, * however in intr_irq_handler(), i.e. main system dispatch routine. */ if (flags & INTR_SOLO && hand != NULL) { debugf("irq %u cannot solo on %s\n", irq, name); return (EINVAL); } #endif isrc = isrc_lookup(irq); if (isrc == NULL) { debugf("irq %u without source on %s\n", irq, name); return (EINVAL); } error = isrc_register(isrc); if (error != 0) { debugf("irq %u map error %d on %s\n", irq, error, name); return (error); } #ifdef INTR_SOLO if (flags & INTR_SOLO) { error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, arg, cookiep); debugf("irq %u setup filter error %d on %s\n", irq, error, name); } else #endif { error = isrc_add_handler(isrc, name, filt, hand, arg, flags, cookiep); debugf("irq %u add handler error %d on %s\n", irq, error, name); } if (error != 0) return (error); mtx_lock(&isrc_table_lock); isrc->isrc_handlers++; if (isrc->isrc_handlers == 1) { PIC_ENABLE_INTR(isrc->isrc_dev, isrc); PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); } mtx_unlock(&isrc_table_lock); return (0); } int intr_irq_remove_handler(device_t dev, u_int irq, void *cookie) { struct intr_irqsrc *isrc; int error; isrc = isrc_lookup(irq); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc->isrc_filter = NULL; isrc->isrc_arg = NULL; isrc->isrc_handlers = 0; PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); PIC_DISABLE_INTR(isrc->isrc_dev, isrc); isrc_update_name(isrc, NULL); mtx_unlock(&isrc_table_lock); return (0); } #endif if (isrc != intr_handler_source(cookie)) return (EINVAL); error = intr_event_remove_handler(cookie); if (error == 0) { mtx_lock(&isrc_table_lock); isrc->isrc_handlers--; if (isrc->isrc_handlers == 0) { PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); PIC_DISABLE_INTR(isrc->isrc_dev, isrc); } intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } int intr_irq_config(u_int irq, enum intr_trigger trig, enum intr_polarity pol) { struct intr_irqsrc *isrc; isrc = isrc_lookup(irq); if (isrc == NULL) return (EINVAL); if (isrc->isrc_handlers != 0) return (EBUSY); /* interrrupt is enabled (active) */ /* * Once an interrupt is enabled, we do not change its configuration. * A controller PIC_ENABLE_INTR() method is called when an interrupt * is going to be enabled. In this method, a controller should setup * the interrupt according to saved configuration parameters. */ isrc->isrc_trig = trig; isrc->isrc_pol = pol; return (0); } int intr_irq_describe(u_int irq, void *cookie, const char *descr) { struct intr_irqsrc *isrc; int error; isrc = isrc_lookup(irq); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc_update_name(isrc, descr); mtx_unlock(&isrc_table_lock); return (0); } #endif error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } #ifdef SMP int intr_irq_bind(u_int irq, int cpu) { struct intr_irqsrc *isrc; isrc = isrc_lookup(irq); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) return (intr_isrc_assign_cpu(isrc, cpu)); #endif return (intr_event_bind(isrc->isrc_event, cpu)); } /* * Return the CPU that the next interrupt source should use. * For now just returns the next CPU according to round-robin. */ u_int intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) { if (!irq_assign_cpu || mp_ncpus == 1) return (PCPU_GET(cpuid)); do { last_cpu++; if (last_cpu > mp_maxid) last_cpu = 0; } while (!CPU_ISSET(last_cpu, cpumask)); return (last_cpu); } /* * Distribute all the interrupt sources among the available * CPUs once the AP's have been launched. */ static void intr_irq_shuffle(void *arg __unused) { struct intr_irqsrc *isrc; u_int i; if (mp_ncpus == 1) return; mtx_lock(&isrc_table_lock); irq_assign_cpu = TRUE; for (i = 0; i < NIRQ; i++) { isrc = irq_sources[i]; if (isrc == NULL || isrc->isrc_handlers == 0 || isrc->isrc_flags & INTR_ISRCF_PERCPU) continue; if (isrc->isrc_event != NULL && isrc->isrc_flags & INTR_ISRCF_BOUND && isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) panic("%s: CPU inconsistency", __func__); if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) CPU_ZERO(&isrc->isrc_cpu); /* start again */ /* * We are in wicked position here if the following call fails * for bound ISRC. The best thing we can do is to clear * isrc_cpu so inconsistency with ie_cpu will be detectable. */ if (PIC_BIND(isrc->isrc_dev, isrc) != 0) CPU_ZERO(&isrc->isrc_cpu); } mtx_unlock(&isrc_table_lock); } SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); #else u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) { return (PCPU_GET(cpuid)); } #endif void dosoftints(void); void dosoftints(void) { } #ifdef SMP /* * Init interrupt controller on another CPU. */ void intr_pic_init_secondary(void) { /* * QQQ: Only root PIC is aware of other CPUs ??? */ KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); //mtx_lock(&isrc_table_lock); PIC_INIT_SECONDARY(intr_irq_root_dev); //mtx_unlock(&isrc_table_lock); } #endif #ifdef DDB DB_SHOW_COMMAND(irqs, db_show_irqs) { u_int i, irqsum; struct intr_irqsrc *isrc; for (irqsum = 0, i = 0; i < NIRQ; i++) { isrc = irq_sources[i]; if (isrc == NULL) continue; db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, isrc->isrc_name, isrc->isrc_cpu.__bits[0], isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", isrc->isrc_count[0]); irqsum += isrc->isrc_count[0]; } db_printf("irq total %u\n", irqsum); } #endif Index: head/sys/sys/intr.h =================================================================== --- head/sys/sys/intr.h (revision 297229) +++ head/sys/sys/intr.h (revision 297230) @@ -1,141 +1,141 @@ /* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */ /*- * Copyright (c) 1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _SYS_INTR_H_ #define _SYS_INTR_H_ #include #ifdef notyet #define INTR_SOLO INTR_MD1 typedef int intr_irq_filter_t(void *arg, struct trapframe *tf); #else typedef int intr_irq_filter_t(void *arg); #endif #define INTR_ISRC_NAMELEN (MAXCOMLEN + 1) -typedef void intr_ipi_filter_t(void *arg); - enum intr_isrc_type { INTR_ISRCT_NAMESPACE, INTR_ISRCT_FDT }; #define INTR_ISRCF_REGISTERED 0x01 /* registered in a controller */ #define INTR_ISRCF_PERCPU 0x02 /* per CPU interrupt */ #define INTR_ISRCF_BOUND 0x04 /* bound to a CPU */ /* Interrupt source definition. */ struct intr_irqsrc { device_t isrc_dev; /* where isrc is mapped */ intptr_t isrc_xref; /* device reference key */ uintptr_t isrc_data; /* device data for isrc */ u_int isrc_irq; /* unique identificator */ enum intr_isrc_type isrc_type; /* how is isrc decribed */ u_int isrc_flags; char isrc_name[INTR_ISRC_NAMELEN]; uint16_t isrc_nspc_type; uint16_t isrc_nspc_num; enum intr_trigger isrc_trig; enum intr_polarity isrc_pol; cpuset_t isrc_cpu; /* on which CPUs is enabled */ u_int isrc_index; u_long * isrc_count; u_int isrc_handlers; struct intr_event * isrc_event; #ifdef INTR_SOLO intr_irq_filter_t * isrc_filter; -#endif - intr_ipi_filter_t * isrc_ipifilter; void * isrc_arg; +#endif #ifdef FDT u_int isrc_ncells; pcell_t isrc_cells[]; /* leave it last */ #endif }; + +struct intr_irqsrc *intr_isrc_alloc(u_int type, u_int extsize); +void intr_isrc_free(struct intr_irqsrc *isrc); void intr_irq_set_name(struct intr_irqsrc *isrc, const char *fmt, ...) __printflike(2, 3); void intr_irq_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf); #define INTR_IRQ_NSPC_NONE 0 #define INTR_IRQ_NSPC_PLAIN 1 #define INTR_IRQ_NSPC_IRQ 2 #define INTR_IRQ_NSPC_IPI 3 u_int intr_namespace_map_irq(device_t dev, uint16_t type, uint16_t num); #ifdef FDT u_int intr_fdt_map_irq(phandle_t, pcell_t *, u_int); #endif extern device_t intr_irq_root_dev; int intr_pic_register(device_t dev, intptr_t xref); int intr_pic_unregister(device_t dev, intptr_t xref); int intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, void *arg, u_int ipicount); int intr_irq_add_handler(device_t dev, driver_filter_t, driver_intr_t, void *, u_int, int, void **); int intr_irq_remove_handler(device_t dev, u_int, void *); int intr_irq_config(u_int, enum intr_trigger, enum intr_polarity); int intr_irq_describe(u_int, void *, const char *); u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask); #ifdef SMP int intr_irq_bind(u_int, int); void intr_pic_init_secondary(void); /* Virtualization for interrupt source IPI counter increment. */ static inline void intr_ipi_increment_count(u_long *counter, u_int cpu) { KASSERT(cpu < MAXCPU, ("%s: too big cpu %u", __func__, cpu)); counter[cpu]++; } /* Virtualization for interrupt source IPI counters setup. */ u_long * intr_ipi_setup_counters(const char *name); #endif #endif /* _SYS_INTR_H */