diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index 2cf1861b1178..9d6232739022 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -1,143 +1,143 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include +#include #include #include #include +#include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include /* For KERNVIRTADDR */ #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); ASSYM(KERNVIRTADDR, KERNVIRTADDR); ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); ASSYM(PCB_TPIDRURW, offsetof(struct pcb, pcb_regs.sf_tpidrurw)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_AST, offsetof(struct thread, td_ast)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); #endif ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_BP_HARDEN_KIND, offsetof(struct pcpu, pc_bp_harden_kind)); ASSYM(PCPU_BP_HARDEN_KIND_NONE, PCPU_BP_HARDEN_KIND_NONE); ASSYM(PCPU_BP_HARDEN_KIND_BPIALL, PCPU_BP_HARDEN_KIND_BPIALL); ASSYM(PCPU_BP_HARDEN_KIND_ICIALLU, PCPU_BP_HARDEN_KIND_ICIALLU); ASSYM(PAGE_SIZE, PAGE_SIZE); #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); ASSYM(INTR_ROOT_IRQ, INTR_ROOT_IRQ); /* * Emit the LOCORE_MAP_MB option as a #define only if the option was set. */ #include "opt_locore.h" #ifdef LOCORE_MAP_MB ASSYM(LOCORE_MAP_MB, LOCORE_MAP_MB); #endif diff --git a/sys/arm/arm/gic.c b/sys/arm/arm/gic.c index ffce86e62128..b1b7aacd63ab 100644 --- a/sys/arm/arm/gic.c +++ b/sys/arm/arm/gic.c @@ -1,1427 +1,1427 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_ddb.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef DDB #include #include #endif #include #include #include "gic_if.h" #include "pic_if.h" #include "msi_if.h" /* We are using GICv2 register naming */ /* Distributor Registers */ /* CPU Registers */ #define GICC_CTLR 0x0000 /* v1 ICCICR */ #define GICC_PMR 0x0004 /* v1 ICCPMR */ #define GICC_BPR 0x0008 /* v1 ICCBPR */ #define GICC_IAR 0x000C /* v1 ICCIAR */ #define GICC_EOIR 0x0010 /* v1 ICCEOIR */ #define GICC_RPR 0x0014 /* v1 ICCRPR */ #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */ #define GICC_ABPR 0x001C /* v1 ICCABPR */ #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/ /* TYPER Registers */ #define GICD_TYPER_SECURITYEXT 0x400 #define GIC_SUPPORT_SECEXT(_sc) \ ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT) #ifndef GIC_DEFAULT_ICFGR_INIT #define GIC_DEFAULT_ICFGR_INIT 0x00000000 #endif struct gic_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; #define GI_FLAG_EARLY_EOI (1 << 0) #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */ /* be used for MSI/MSI-X interrupts */ #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */ /* for a MSI/MSI-X interrupt */ u_int gi_flags; }; static u_int gic_irq_cpu; static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc); #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc) static struct resource_spec arm_gic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */ { -1, 0 } }; #if defined(__arm__) && defined(INVARIANTS) static int gic_debug_spurious = 1; #else static int gic_debug_spurious = 0; #endif TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious); static u_int arm_gic_map[GIC_MAXCPU]; static struct arm_gic_softc *gic_sc = NULL; /* CPU Interface */ #define gic_c_read_4(_sc, _reg) \ bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg)) #define gic_c_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val)) /* Distributor Interface */ #define gic_d_read_4(_sc, _reg) \ bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg)) #define gic_d_write_1(_sc, _reg, _val) \ bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val)) #define gic_d_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val)) static inline void gic_irq_unmask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq)); } static inline void gic_irq_mask(struct arm_gic_softc *sc, u_int irq) { gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq)); } static uint8_t gic_cpu_mask(struct arm_gic_softc *sc) { uint32_t mask; int i; /* Read the current cpuid mask by reading ITARGETSR{0..7} */ for (i = 0; i < 8; i++) { mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i)); if (mask != 0) break; } /* No mask found, assume we are on CPU interface 0 */ if (mask == 0) return (1); /* Collect the mask in the lower byte */ mask |= mask >> 16; mask |= mask >> 8; return (mask); } #ifdef SMP static void -arm_gic_init_secondary(device_t dev, enum root_type root_type) +arm_gic_init_secondary(device_t dev, uint32_t rootnum) { struct arm_gic_softc *sc = device_get_softc(dev); u_int irq, cpu; /* Set the mask so we can find this CPU to send it IPIs */ cpu = PCPU_GET(cpuid); MPASS(cpu < GIC_MAXCPU); arm_gic_map[cpu] = gic_cpu_mask(sc); for (irq = 0; irq < sc->nirqs; irq += 4) gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) { gic_d_write_4(sc, GICD_IGROUPR(irq), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu)) gic_irq_unmask(sc, irq); /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu)) gic_irq_unmask(sc, irq); } #endif /* SMP */ static int arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num) { int error; uint32_t irq; struct gic_irqsrc *irqs; struct intr_irqsrc *isrc; const char *name; irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF, M_WAITOK | M_ZERO); name = device_get_nameunit(sc->gic_dev); for (irq = 0; irq < num; irq++) { irqs[irq].gi_irq = irq; irqs[irq].gi_pol = INTR_POLARITY_CONFORM; irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { error = intr_isrc_register(isrc, sc->gic_dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { error = intr_isrc_register(isrc, sc->gic_dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { error = intr_isrc_register(isrc, sc->gic_dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (error != 0) { /* XXX call intr_isrc_deregister() */ free(irqs, M_DEVBUF); return (error); } } sc->gic_irqs = irqs; sc->nirqs = num; return (0); } static void arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count) { struct arm_gic_softc *sc; int i; sc = device_get_softc(dev); KASSERT((start + count) <= sc->nirqs, ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__, start, count, sc->nirqs)); for (i = 0; i < count; i++) { KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0, ("%s: MSI interrupt %d already has a handler", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM, ("%s: MSI interrupt %d already has a polarity", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM, ("%s: MSI interrupt %d already has a trigger", __func__, count + i)); sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH; sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE; sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI; } } int arm_gic_attach(device_t dev) { struct arm_gic_softc *sc; int i; uint32_t icciidr, mask, nirqs; if (gic_sc) return (ENXIO); if (mp_ncpus > GIC_MAXCPU) { device_printf(dev, "Too many CPUs for IPIs to work (%d > %d)\n", mp_ncpus, GIC_MAXCPU); return (ENXIO); } sc = device_get_softc(dev); if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->gic_dev = dev; gic_sc = sc; /* Initialize mutex */ mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN); /* Disable interrupt forwarding to the CPU interface */ gic_d_write_4(sc, GICD_CTLR, 0x00); /* Get the number of interrupts */ sc->typer = gic_d_read_4(sc, GICD_TYPER); nirqs = GICD_TYPER_I_NUM(sc->typer); if (arm_gic_register_isrcs(sc, nirqs)) { device_printf(dev, "could not register irqs\n"); goto cleanup; } icciidr = gic_c_read_4(sc, GICC_IIDR); device_printf(dev, "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n", GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr), GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs); sc->gic_iidr = icciidr; /* Set all global interrupts to be level triggered, active low. */ for (i = 32; i < sc->nirqs; i += 16) { gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT); } /* Disable all interrupts. */ for (i = 32; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF); } /* Find the current cpu mask */ mask = gic_cpu_mask(sc); /* Set the mask so we can find this CPU to send it IPIs */ MPASS(PCPU_GET(cpuid) < GIC_MAXCPU); arm_gic_map[PCPU_GET(cpuid)] = mask; /* Set all four targets to this cpu */ mask |= mask << 8; mask |= mask << 16; for (i = 0; i < sc->nirqs; i += 4) { gic_d_write_4(sc, GICD_IPRIORITYR(i), 0); if (i > 32) { gic_d_write_4(sc, GICD_ITARGETSR(i), mask); } } /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); return (0); cleanup: arm_gic_detach(dev); return(ENXIO); } int arm_gic_detach(device_t dev) { struct arm_gic_softc *sc; sc = device_get_softc(dev); if (sc->gic_irqs != NULL) free(sc->gic_irqs, M_DEVBUF); bus_release_resources(dev, arm_gic_spec, sc->gic_res); return (0); } static int arm_gic_print_child(device_t bus, device_t child) { struct resource_list *rl; int rv; rv = bus_print_child_header(bus, child); rl = BUS_GET_RESOURCE_LIST(bus, child); if (rl != NULL) { rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } rv += bus_print_child_footer(bus, child); return (rv); } static struct resource * arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct arm_gic_softc *sc; struct resource_list_entry *rle; struct resource_list *rl; int j; KASSERT(type == SYS_RES_MEMORY, ("Invalid resource type %x", type)); sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { rl = BUS_GET_RESOURCE_LIST(bus, child); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; rle = resource_list_find(rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end); return (NULL); } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct arm_gic_softc *sc; sc = device_get_softc(dev); switch(which) { case GIC_IVAR_HW_REV: KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3, ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)", GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr)); *result = GICD_IIDR_VAR(sc->gic_iidr); return (0); case GIC_IVAR_BUS: KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN, ("arm_gic_read_ivar: Unknown bus type")); KASSERT(sc->gic_bus <= GIC_BUS_MAX, ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus)); *result = sc->gic_bus; return (0); } return (ENOENT); } static int arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch(which) { case GIC_IVAR_HW_REV: case GIC_IVAR_BUS: return (EINVAL); } return (ENOENT); } int arm_gic_intr(void *arg) { struct arm_gic_softc *sc = arg; struct gic_irqsrc *gi; uint32_t irq_active_reg, irq; struct trapframe *tf; irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; /* * 1. We do EOI here because recent read value from active interrupt * register must be used for it. Another approach is to save this * value into associated interrupt source. * 2. EOI must be done on same CPU where interrupt has fired. Thus * we must ensure that interrupted thread does not migrate to * another CPU. * 3. EOI cannot be delayed by any preemption which could happen on * critical_exit() used in MI intr code, when interrupt thread is * scheduled. See next point. * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during * an action and any use of critical_exit() could break this * assumption. See comments within smp_rendezvous_action(). * 5. We always return FILTER_HANDLED as this is an interrupt * controller dispatch function. Otherwise, in cascaded interrupt * case, the whole interrupt subtree would be masked. */ if (irq >= sc->nirqs) { if (gic_debug_spurious) device_printf(sc->gic_dev, "Spurious interrupt detected: last irq: %d on CPU%d\n", sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid)); return (FILTER_HANDLED); } tf = curthread->td_intr_frame; dispatch_irq: gi = sc->gic_irqs + irq; /* * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement * as compiler complains that comparing u_int >= 0 is always true. */ if (irq <= GIC_LAST_SGI) { #ifdef SMP /* Call EOI for all IPI before dispatch. */ gic_c_write_4(sc, GICC_EOIR, irq_active_reg); intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]); goto next_irq; #else device_printf(sc->gic_dev, "SGI %u on UP system detected\n", irq - GIC_FIRST_SGI); gic_c_write_4(sc, GICC_EOIR, irq_active_reg); goto next_irq; #endif } if (gic_debug_spurious) sc->last_irq[PCPU_GET(cpuid)] = irq; if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI) gic_c_write_4(sc, GICC_EOIR, irq_active_reg); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { gic_irq_mask(sc, irq); if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI) gic_c_write_4(sc, GICC_EOIR, irq_active_reg); device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq); } next_irq: arm_irq_memory_barrier(irq); irq_active_reg = gic_c_read_4(sc, GICC_IAR); irq = irq_active_reg & 0x3FF; if (irq < sc->nirqs) goto dispatch_irq; return (FILTER_HANDLED); } static void gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig, enum intr_polarity pol) { uint32_t reg; uint32_t mask; if (irq < GIC_FIRST_SPI) return; mtx_lock_spin(&sc->mutex); reg = gic_d_read_4(sc, GICD_ICFGR(irq)); mask = (reg >> 2*(irq % 16)) & 0x3; if (pol == INTR_POLARITY_LOW) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_LOW; } else if (pol == INTR_POLARITY_HIGH) { mask &= ~GICD_ICFGR_POL_MASK; mask |= GICD_ICFGR_POL_HIGH; } if (trig == INTR_TRIGGER_LEVEL) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_LVL; } else if (trig == INTR_TRIGGER_EDGE) { mask &= ~GICD_ICFGR_TRIG_MASK; mask |= GICD_ICFGR_TRIG_EDGE; } /* Set mask */ reg = reg & ~(0x3 << 2*(irq % 16)); reg = reg | (mask << 2*(irq % 16)); gic_d_write_4(sc, GICD_ICFGR(irq), reg); mtx_unlock_spin(&sc->mutex); } static int gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus) { uint32_t cpu, end, mask; end = min(mp_ncpus, GIC_MAXCPU); for (cpu = end; cpu < MAXCPU; cpu++) if (CPU_ISSET(cpu, cpus)) return (EINVAL); for (mask = 0, cpu = 0; cpu < end; cpu++) if (CPU_ISSET(cpu, cpus)) mask |= arm_gic_map[cpu]; gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask); return (0); } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { if (ncells == 1) { *irqp = cells[0]; *polp = INTR_POLARITY_CONFORM; *trigp = INTR_TRIGGER_CONFORM; return (0); } if (ncells == 3) { u_int irq, tripol; /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = low-to-high edge triggered * 2 = high-to-low edge triggered * 4 = active high level-sensitive * 8 = active low level-sensitive * bits[15:8] PPI interrupt cpu mask * Each bit corresponds to each of the 8 possible cpus * attached to the GIC. A bit set to '1' indicated * the interrupt is wired to that CPU. */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } tripol = cells[2] & 0xff; if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK && cells[0] == 0)) device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", tripol); *irqp = irq; *polp = INTR_POLARITY_CONFORM; *trigp = tripol & FDT_INTR_EDGE_MASK ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL; return (0); } return (EINVAL); } #endif static int gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_irqsrc *gi; /* Map a non-GICv2m MSI */ gi = (struct gic_irqsrc *)msi_data->isrc; if (gi == NULL) return (ENXIO); *irqp = gi->gi_irq; /* MSI/MSI-X interrupts are always edge triggered with high polarity */ *polp = INTR_POLARITY_HIGH; *trigp = INTR_TRIGGER_EDGE; return (0); } static int gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; enum intr_polarity pol; enum intr_trigger trig; struct arm_gic_softc *sc; struct intr_map_data_msi *dam; #ifdef FDT struct intr_map_data_fdt *daf; #endif #ifdef DEV_ACPI struct intr_map_data_acpi *daa; #endif sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); KASSERT(irq >= sc->nirqs || (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0, ("%s: Attempting to map a MSI interrupt from FDT", __func__)); break; #endif #ifdef DEV_ACPI case INTR_MAP_DATA_ACPI: daa = (struct intr_map_data_acpi *)data; irq = daa->irq; pol = daa->pol; trig = daa->trig; break; #endif case INTR_MAP_DATA_MSI: /* Non-GICv2m MSI */ dam = (struct intr_map_data_msi *)data; if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0) return (EINVAL); break; default: return (ENOTSUP); } if (irq >= sc->nirqs) return (EINVAL); if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW && pol != INTR_POLARITY_HIGH) return (EINVAL); if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE && trig != INTR_TRIGGER_LEVEL) return (EINVAL); *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int arm_gic_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { int error; u_int irq; struct arm_gic_softc *sc; error = gic_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } static int arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; enum intr_trigger trig; enum intr_polarity pol; if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) { /* GICv2m MSI */ pol = gi->gi_pol; trig = gi->gi_trig; KASSERT(pol == INTR_POLARITY_HIGH, ("%s: MSI interrupts must be active-high", __func__)); KASSERT(trig == INTR_TRIGGER_EDGE, ("%s: MSI interrupts must be edge triggered", __func__)); } else if (data != NULL) { u_int irq; /* Get config for resource. */ if (gic_map_intr(dev, data, &irq, &pol, &trig) || gi->gi_irq != irq) return (EINVAL); } else { pol = INTR_POLARITY_CONFORM; trig = INTR_TRIGGER_CONFORM; } /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) || (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig)) return (EINVAL); else return (0); } /* For MSI/MSI-X we should have already configured these */ if ((gi->gi_flags & GI_FLAG_MSI) == 0) { if (pol == INTR_POLARITY_CONFORM) pol = INTR_POLARITY_LOW; /* just pick some */ if (trig == INTR_TRIGGER_CONFORM) trig = INTR_TRIGGER_EDGE; /* just pick some */ gi->gi_pol = pol; gi->gi_trig = trig; /* Edge triggered interrupts need an early EOI sent */ if (gi->gi_trig == INTR_TRIGGER_EDGE) gi->gi_flags |= GI_FLAG_EARLY_EOI; } /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol); arm_gic_bind_intr(dev, isrc); return (0); } static int arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; arm_irq_memory_barrier(gi->gi_irq); gic_irq_unmask(sc, gi->gi_irq); } static void arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; gic_irq_mask(sc, gi->gi_irq); } static void arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; arm_gic_disable_intr(dev, isrc); gic_c_write_4(sc, GICC_EOIR, gi->gi_irq); } static void arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc) { arm_irq_memory_barrier(0); arm_gic_enable_intr(dev, isrc); } static void arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; /* EOI for edge-triggered done earlier. */ if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI) return; arm_irq_memory_barrier(0); gic_c_write_4(sc, GICC_EOIR, gi->gi_irq); } static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; if (gi->gi_irq < GIC_FIRST_SPI) return (EINVAL); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); } return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu)); } #ifdef SMP static void arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct arm_gic_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; uint32_t val = 0, i; for (i = 0; i < MAXCPU; i++) { if (CPU_ISSET(i, &cpus)) { MPASS(i < GIC_MAXCPU); val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT; } } gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq); } static int arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct arm_gic_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif static int arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count, int maxcount, struct intr_irqsrc **isrc) { struct arm_gic_softc *sc; int i, irq, end_irq; bool found; KASSERT(powerof2(count), ("%s: bad count", __func__)); KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__)); sc = device_get_softc(dev); mtx_lock_spin(&sc->mutex); found = false; for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) { /* Start on an aligned interrupt */ if ((irq & (maxcount - 1)) != 0) continue; /* Assume we found a valid range until shown otherwise */ found = true; /* Check this range is valid */ for (end_irq = irq; end_irq != irq + count; end_irq++) { /* No free interrupts */ if (end_irq == mbi_start + mbi_count) { found = false; break; } KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0, ("%s: Non-MSI interrupt found", __func__)); /* This is already used */ if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED) { found = false; break; } } if (found) break; } /* Not enough interrupts were found */ if (!found || irq == mbi_start + mbi_count) { mtx_unlock_spin(&sc->mutex); return (ENXIO); } for (i = 0; i < count; i++) { /* Mark the interrupt as used */ sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED; } mtx_unlock_spin(&sc->mutex); for (i = 0; i < count; i++) isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i]; return (0); } static int arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc) { struct arm_gic_softc *sc; struct gic_irqsrc *gi; int i; sc = device_get_softc(dev); mtx_lock_spin(&sc->mutex); for (i = 0; i < count; i++) { gi = (struct gic_irqsrc *)isrc[i]; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); gi->gi_flags &= ~GI_FLAG_MSI_USED; } mtx_unlock_spin(&sc->mutex); return (0); } static int arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count, struct intr_irqsrc **isrc) { struct arm_gic_softc *sc; int irq; sc = device_get_softc(dev); mtx_lock_spin(&sc->mutex); /* Find an unused interrupt */ for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) { KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0, ("%s: Non-MSI interrupt found", __func__)); if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0) break; } /* No free interrupt was found */ if (irq == mbi_start + mbi_count) { mtx_unlock_spin(&sc->mutex); return (ENXIO); } /* Mark the interrupt as used */ sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED; mtx_unlock_spin(&sc->mutex); *isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq]; return (0); } static int arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc) { struct arm_gic_softc *sc; struct gic_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gic_irqsrc *)isrc; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); mtx_lock_spin(&sc->mutex); gi->gi_flags &= ~GI_FLAG_MSI_USED; mtx_unlock_spin(&sc->mutex); return (0); } #ifdef DDB static void arm_gic_db_show(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t val; u_int i; db_printf("%s CPU registers:\n", device_get_nameunit(dev)); db_printf(" CTLR: %08x PMR: %08x BPR: %08x RPR: %08x\n", gic_c_read_4(sc, GICC_CTLR), gic_c_read_4(sc, GICC_PMR), gic_c_read_4(sc, GICC_BPR), gic_c_read_4(sc, GICC_RPR)); db_printf("HPPIR: %08x IIDR: %08x\n", gic_c_read_4(sc, GICC_HPPIR), gic_c_read_4(sc, GICC_IIDR)); db_printf("%s Distributor registers:\n", device_get_nameunit(dev)); db_printf(" CTLR: %08x TYPER: %08x IIDR: %08x\n", gic_d_read_4(sc, GICD_CTLR), gic_d_read_4(sc, GICD_TYPER), gic_d_read_4(sc, GICD_IIDR)); for (i = 0; i < sc->nirqs; i++) { if (i <= GIC_LAST_SGI) db_printf("SGI %2u ", i); else if (i <= GIC_LAST_PPI) db_printf("PPI %2u ", i - GIC_FIRST_PPI); else db_printf("SPI %2u ", i - GIC_FIRST_SPI); db_printf(" grp:%u", !!(gic_d_read_4(sc, GICD_IGROUPR(i)) & GICD_I_MASK(i))); db_printf(" enable:%u pend:%u active:%u", !!(gic_d_read_4(sc, GICD_ISENABLER(i)) & GICD_I_MASK(i)), !!(gic_d_read_4(sc, GICD_ISPENDR(i)) & GICD_I_MASK(i)), !!(gic_d_read_4(sc, GICD_ISACTIVER(i)) & GICD_I_MASK(i))); db_printf(" pri:%u", (gic_d_read_4(sc, GICD_IPRIORITYR(i)) >> 8 * (i & 0x3)) & 0xff); db_printf(" trg:%u", (gic_d_read_4(sc, GICD_ITARGETSR(i)) >> 8 * (i & 0x3)) & 0xff); val = gic_d_read_4(sc, GICD_ICFGR(i)) >> 2 * (i & 0xf); if ((val & GICD_ICFGR_POL_MASK) == GICD_ICFGR_POL_LOW) db_printf(" LO"); else db_printf(" HI"); if ((val & GICD_ICFGR_TRIG_MASK) == GICD_ICFGR_TRIG_LVL) db_printf(" LV"); else db_printf(" ED"); db_printf("\n"); } } #endif static device_method_t arm_gic_methods[] = { /* Bus interface */ DEVMETHOD(bus_print_child, arm_gic_print_child), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, arm_gic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), DEVMETHOD(bus_read_ivar, arm_gic_read_ivar), DEVMETHOD(bus_write_ivar, arm_gic_write_ivar), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, arm_gic_disable_intr), DEVMETHOD(pic_enable_intr, arm_gic_enable_intr), DEVMETHOD(pic_map_intr, arm_gic_map_intr), DEVMETHOD(pic_setup_intr, arm_gic_setup_intr), DEVMETHOD(pic_teardown_intr, arm_gic_teardown_intr), DEVMETHOD(pic_post_filter, arm_gic_post_filter), DEVMETHOD(pic_post_ithread, arm_gic_post_ithread), DEVMETHOD(pic_pre_ithread, arm_gic_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, arm_gic_bind_intr), DEVMETHOD(pic_init_secondary, arm_gic_init_secondary), DEVMETHOD(pic_ipi_send, arm_gic_ipi_send), DEVMETHOD(pic_ipi_setup, arm_gic_ipi_setup), #endif /* GIC */ DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range), DEVMETHOD(gic_alloc_msi, arm_gic_alloc_msi), DEVMETHOD(gic_release_msi, arm_gic_release_msi), DEVMETHOD(gic_alloc_msix, arm_gic_alloc_msix), DEVMETHOD(gic_release_msix, arm_gic_release_msix), #ifdef DDB DEVMETHOD(gic_db_show, arm_gic_db_show), #endif { 0, 0 } }; DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods, sizeof(struct arm_gic_softc)); #ifdef DDB DB_SHOW_COMMAND_FLAGS(gic, db_show_gic, CS_OWN) { device_t dev; int t; bool valid; valid = false; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); valid = true; } db_skip_to_eol(); if (!valid) { db_printf("usage: show gic \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } GIC_DB_SHOW(dev); } DB_SHOW_ALL_COMMAND(gics, db_show_all_gics) { devclass_t dc; device_t dev; int i; dc = devclass_find("gic"); if (dc == NULL) return; for (i = 0; i < devclass_get_maxunit(dc); i++) { dev = devclass_get_device(dc, i); if (dev != NULL) GIC_DB_SHOW(dev); if (db_pager_quit) break; } } #endif /* * GICv2m support -- the GICv2 MSI/MSI-X controller. */ #define GICV2M_MSI_TYPER 0x008 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff) #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff) #define GICv2M_MSI_SETSPI_NS 0x040 #define GICV2M_MSI_IIDR 0xFCC int arm_gicv2m_attach(device_t dev) { struct arm_gicv2m_softc *sc; uint32_t typer; int rid; sc = device_get_softc(dev); rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "Unable to allocate resources\n"); return (ENXIO); } typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER); sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer); sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer); /* Reserve these interrupts for MSI/MSI-X use */ GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start, sc->sc_spi_count); intr_msi_register(dev, sc->sc_xref); if (bootverbose) device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start, sc->sc_spi_start + sc->sc_spi_count - 1); return (0); } static int arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **srcs) { struct arm_gicv2m_softc *sc; int error; sc = device_get_softc(dev); error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start, sc->sc_spi_count, count, maxcount, srcs); if (error != 0) return (error); *pic = dev; return (0); } static int arm_gicv2m_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc)); } static int arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic, struct intr_irqsrc **isrcp) { struct arm_gicv2m_softc *sc; int error; sc = device_get_softc(dev); error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start, sc->sc_spi_count, isrcp); if (error != 0) return (error); *pic = dev; return (0); } static int arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) { return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc)); } static int arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct arm_gicv2m_softc *sc = device_get_softc(dev); struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc; *addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS; *data = gi->gi_irq; return (0); } static device_method_t arm_gicv2m_methods[] = { /* Device interface */ DEVMETHOD(device_attach, arm_gicv2m_attach), /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, arm_gicv2m_alloc_msi), DEVMETHOD(msi_release_msi, arm_gicv2m_release_msi), DEVMETHOD(msi_alloc_msix, arm_gicv2m_alloc_msix), DEVMETHOD(msi_release_msix, arm_gicv2m_release_msix), DEVMETHOD(msi_map_msi, arm_gicv2m_map_msi), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods, sizeof(struct arm_gicv2m_softc)); diff --git a/sys/arm/broadcom/bcm2835/bcm2836.c b/sys/arm/broadcom/bcm2835/bcm2836.c index fd34ff8818ad..7ed9dedaa77e 100644 --- a/sys/arm/broadcom/bcm2835/bcm2836.c +++ b/sys/arm/broadcom/bcm2835/bcm2836.c @@ -1,752 +1,752 @@ /* * Copyright 2015 Andrew Turner. * Copyright 2016 Svatopluk Kraus * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include #include #ifdef SMP #include #endif #include #include #include "pic_if.h" #define BCM_LINTC_CONTROL_REG 0x00 #define BCM_LINTC_PRESCALER_REG 0x08 #define BCM_LINTC_GPU_ROUTING_REG 0x0c #define BCM_LINTC_PMU_ROUTING_SET_REG 0x10 #define BCM_LINTC_PMU_ROUTING_CLR_REG 0x14 #define BCM_LINTC_TIMER_CFG_REG(n) (0x40 + (n) * 4) #define BCM_LINTC_MBOX_CFG_REG(n) (0x50 + (n) * 4) #define BCM_LINTC_PENDING_REG(n) (0x60 + (n) * 4) #define BCM_LINTC_MBOX0_SET_REG(n) (0x80 + (n) * 16) #define BCM_LINTC_MBOX1_SET_REG(n) (0x84 + (n) * 16) #define BCM_LINTC_MBOX2_SET_REG(n) (0x88 + (n) * 16) #define BCM_LINTC_MBOX3_SET_REG(n) (0x8C + (n) * 16) #define BCM_LINTC_MBOX0_CLR_REG(n) (0xC0 + (n) * 16) #define BCM_LINTC_MBOX1_CLR_REG(n) (0xC4 + (n) * 16) #define BCM_LINTC_MBOX2_CLR_REG(n) (0xC8 + (n) * 16) #define BCM_LINTC_MBOX3_CLR_REG(n) (0xCC + (n) * 16) /* Prescaler Register */ #define BCM_LINTC_PSR_19_2 0x80000000 /* 19.2 MHz */ /* GPU Interrupt Routing Register */ #define BCM_LINTC_GIRR_IRQ_CORE(n) (n) #define BCM_LINTC_GIRR_FIQ_CORE(n) ((n) << 2) /* PMU Interrupt Routing Register */ #define BCM_LINTC_PIRR_IRQ_EN_CORE(n) (1 << (n)) #define BCM_LINTC_PIRR_FIQ_EN_CORE(n) (1 << ((n) + 4)) /* Timer Config Register */ #define BCM_LINTC_TCR_IRQ_EN_TIMER(n) (1 << (n)) #define BCM_LINTC_TCR_FIQ_EN_TIMER(n) (1 << ((n) + 4)) /* MBOX Config Register */ #define BCM_LINTC_MCR_IRQ_EN_MBOX(n) (1 << (n)) #define BCM_LINTC_MCR_FIQ_EN_MBOX(n) (1 << ((n) + 4)) #define BCM_LINTC_CNTPSIRQ_IRQ 0 #define BCM_LINTC_CNTPNSIRQ_IRQ 1 #define BCM_LINTC_CNTHPIRQ_IRQ 2 #define BCM_LINTC_CNTVIRQ_IRQ 3 #define BCM_LINTC_MBOX0_IRQ 4 #define BCM_LINTC_MBOX1_IRQ 5 #define BCM_LINTC_MBOX2_IRQ 6 #define BCM_LINTC_MBOX3_IRQ 7 #define BCM_LINTC_GPU_IRQ 8 #define BCM_LINTC_PMU_IRQ 9 #define BCM_LINTC_AXI_IRQ 10 #define BCM_LINTC_LTIMER_IRQ 11 #define BCM_LINTC_NIRQS 12 #define BCM_LINTC_TIMER0_IRQ BCM_LINTC_CNTPSIRQ_IRQ #define BCM_LINTC_TIMER1_IRQ BCM_LINTC_CNTPNSIRQ_IRQ #define BCM_LINTC_TIMER2_IRQ BCM_LINTC_CNTHPIRQ_IRQ #define BCM_LINTC_TIMER3_IRQ BCM_LINTC_CNTVIRQ_IRQ #define BCM_LINTC_TIMER0_IRQ_MASK (1 << BCM_LINTC_TIMER0_IRQ) #define BCM_LINTC_TIMER1_IRQ_MASK (1 << BCM_LINTC_TIMER1_IRQ) #define BCM_LINTC_TIMER2_IRQ_MASK (1 << BCM_LINTC_TIMER2_IRQ) #define BCM_LINTC_TIMER3_IRQ_MASK (1 << BCM_LINTC_TIMER3_IRQ) #define BCM_LINTC_MBOX0_IRQ_MASK (1 << BCM_LINTC_MBOX0_IRQ) #define BCM_LINTC_GPU_IRQ_MASK (1 << BCM_LINTC_GPU_IRQ) #define BCM_LINTC_PMU_IRQ_MASK (1 << BCM_LINTC_PMU_IRQ) #define BCM_LINTC_UP_PENDING_MASK \ (BCM_LINTC_TIMER0_IRQ_MASK | \ BCM_LINTC_TIMER1_IRQ_MASK | \ BCM_LINTC_TIMER2_IRQ_MASK | \ BCM_LINTC_TIMER3_IRQ_MASK | \ BCM_LINTC_GPU_IRQ_MASK | \ BCM_LINTC_PMU_IRQ_MASK) #define BCM_LINTC_SMP_PENDING_MASK \ (BCM_LINTC_UP_PENDING_MASK | \ BCM_LINTC_MBOX0_IRQ_MASK) #ifdef SMP #define BCM_LINTC_PENDING_MASK BCM_LINTC_SMP_PENDING_MASK #else #define BCM_LINTC_PENDING_MASK BCM_LINTC_UP_PENDING_MASK #endif struct bcm_lintc_irqsrc { struct intr_irqsrc bli_isrc; u_int bli_irq; union { u_int bli_mask; /* for timers */ u_int bli_value; /* for GPU */ }; }; struct bcm_lintc_softc { device_t bls_dev; struct mtx bls_mtx; struct resource * bls_mem; bus_space_tag_t bls_bst; bus_space_handle_t bls_bsh; struct bcm_lintc_irqsrc bls_isrcs[BCM_LINTC_NIRQS]; }; static struct bcm_lintc_softc *bcm_lintc_sc; #ifdef SMP #define BCM_LINTC_NIPIS 32 /* only mailbox 0 is used for IPI */ CTASSERT(INTR_IPI_COUNT <= BCM_LINTC_NIPIS); #endif #define BCM_LINTC_LOCK(sc) mtx_lock_spin(&(sc)->bls_mtx) #define BCM_LINTC_UNLOCK(sc) mtx_unlock_spin(&(sc)->bls_mtx) #define BCM_LINTC_LOCK_INIT(sc) mtx_init(&(sc)->bls_mtx, \ device_get_nameunit((sc)->bls_dev), "bmc_local_intc", MTX_SPIN) #define BCM_LINTC_LOCK_DESTROY(sc) mtx_destroy(&(sc)->bls_mtx) #define bcm_lintc_read_4(sc, reg) \ bus_space_read_4((sc)->bls_bst, (sc)->bls_bsh, (reg)) #define bcm_lintc_write_4(sc, reg, val) \ bus_space_write_4((sc)->bls_bst, (sc)->bls_bsh, (reg), (val)) static inline void bcm_lintc_rwreg_clr(struct bcm_lintc_softc *sc, uint32_t reg, uint32_t mask) { bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) & ~mask); } static inline void bcm_lintc_rwreg_set(struct bcm_lintc_softc *sc, uint32_t reg, uint32_t mask) { bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) | mask); } static void bcm_lintc_timer_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { cpuset_t *cpus; uint32_t cpu; cpus = &bli->bli_isrc.isrc_cpu; BCM_LINTC_LOCK(sc); for (cpu = 0; cpu < 4; cpu++) if (CPU_ISSET(cpu, cpus)) bcm_lintc_rwreg_clr(sc, BCM_LINTC_TIMER_CFG_REG(cpu), bli->bli_mask); BCM_LINTC_UNLOCK(sc); } static void bcm_lintc_timer_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { cpuset_t *cpus; uint32_t cpu; cpus = &bli->bli_isrc.isrc_cpu; BCM_LINTC_LOCK(sc); for (cpu = 0; cpu < 4; cpu++) if (CPU_ISSET(cpu, cpus)) bcm_lintc_rwreg_set(sc, BCM_LINTC_TIMER_CFG_REG(cpu), bli->bli_mask); BCM_LINTC_UNLOCK(sc); } static inline void bcm_lintc_gpu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { /* It's accessed just and only by one core. */ bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, 0); } static inline void bcm_lintc_gpu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { /* It's accessed just and only by one core. */ bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, bli->bli_value); } static inline void bcm_lintc_pmu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { cpuset_t *cpus; uint32_t cpu, mask; mask = 0; cpus = &bli->bli_isrc.isrc_cpu; BCM_LINTC_LOCK(sc); for (cpu = 0; cpu < 4; cpu++) if (CPU_ISSET(cpu, cpus)) mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu); /* Write-clear register. */ bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_CLR_REG, mask); BCM_LINTC_UNLOCK(sc); } static inline void bcm_lintc_pmu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { cpuset_t *cpus; uint32_t cpu, mask; mask = 0; cpus = &bli->bli_isrc.isrc_cpu; BCM_LINTC_LOCK(sc); for (cpu = 0; cpu < 4; cpu++) if (CPU_ISSET(cpu, cpus)) mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu); /* Write-set register. */ bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG, mask); BCM_LINTC_UNLOCK(sc); } static void bcm_lintc_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { switch (bli->bli_irq) { case BCM_LINTC_TIMER0_IRQ: case BCM_LINTC_TIMER1_IRQ: case BCM_LINTC_TIMER2_IRQ: case BCM_LINTC_TIMER3_IRQ: bcm_lintc_timer_mask(sc, bli); return; case BCM_LINTC_MBOX0_IRQ: case BCM_LINTC_MBOX1_IRQ: case BCM_LINTC_MBOX2_IRQ: case BCM_LINTC_MBOX3_IRQ: return; case BCM_LINTC_GPU_IRQ: bcm_lintc_gpu_mask(sc, bli); return; case BCM_LINTC_PMU_IRQ: bcm_lintc_pmu_mask(sc, bli); return; default: panic("%s: not implemented for irq %u", __func__, bli->bli_irq); } } static void bcm_lintc_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli) { switch (bli->bli_irq) { case BCM_LINTC_TIMER0_IRQ: case BCM_LINTC_TIMER1_IRQ: case BCM_LINTC_TIMER2_IRQ: case BCM_LINTC_TIMER3_IRQ: bcm_lintc_timer_unmask(sc, bli); return; case BCM_LINTC_MBOX0_IRQ: case BCM_LINTC_MBOX1_IRQ: case BCM_LINTC_MBOX2_IRQ: case BCM_LINTC_MBOX3_IRQ: return; case BCM_LINTC_GPU_IRQ: bcm_lintc_gpu_unmask(sc, bli); return; case BCM_LINTC_PMU_IRQ: bcm_lintc_pmu_unmask(sc, bli); return; default: panic("%s: not implemented for irq %u", __func__, bli->bli_irq); } } #ifdef SMP static inline void bcm_lintc_ipi_write(struct bcm_lintc_softc *sc, cpuset_t cpus, u_int ipi) { u_int cpu; uint32_t mask; mask = 1 << ipi; for (cpu = 0; cpu < mp_ncpus; cpu++) if (CPU_ISSET(cpu, &cpus)) bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_SET_REG(cpu), mask); } static inline void bcm_lintc_ipi_dispatch(struct bcm_lintc_softc *sc, u_int cpu, struct trapframe *tf) { u_int ipi; uint32_t mask; mask = bcm_lintc_read_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu)); if (mask == 0) { device_printf(sc->bls_dev, "Spurious ipi detected\n"); return; } for (ipi = 0; mask != 0; mask >>= 1, ipi++) { if ((mask & 0x01) == 0) continue; /* * Clear an IPI before dispatching to not miss anyone * and make sure that it's observed by everybody. */ bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu), 1 << ipi); #if defined(__aarch64__) dsb(sy); #else dsb(); #endif intr_ipi_dispatch(ipi); } } #endif static inline void bcm_lintc_irq_dispatch(struct bcm_lintc_softc *sc, u_int irq, struct trapframe *tf) { struct bcm_lintc_irqsrc *bli; bli = &sc->bls_isrcs[irq]; if (intr_isrc_dispatch(&bli->bli_isrc, tf) != 0) device_printf(sc->bls_dev, "Stray irq %u detected\n", irq); } static int bcm_lintc_intr(void *arg) { struct bcm_lintc_softc *sc; u_int cpu; uint32_t num, reg; struct trapframe *tf; sc = arg; cpu = PCPU_GET(cpuid); tf = curthread->td_intr_frame; for (num = 0; ; num++) { reg = bcm_lintc_read_4(sc, BCM_LINTC_PENDING_REG(cpu)); if ((reg & BCM_LINTC_PENDING_MASK) == 0) break; #ifdef SMP if (reg & BCM_LINTC_MBOX0_IRQ_MASK) bcm_lintc_ipi_dispatch(sc, cpu, tf); #endif if (reg & BCM_LINTC_TIMER0_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER0_IRQ, tf); if (reg & BCM_LINTC_TIMER1_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER1_IRQ, tf); if (reg & BCM_LINTC_TIMER2_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER2_IRQ, tf); if (reg & BCM_LINTC_TIMER3_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER3_IRQ, tf); if (reg & BCM_LINTC_GPU_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_GPU_IRQ, tf); if (reg & BCM_LINTC_PMU_IRQ_MASK) bcm_lintc_irq_dispatch(sc, BCM_LINTC_PMU_IRQ, tf); arm_irq_memory_barrier(0); /* XXX */ } reg &= ~BCM_LINTC_PENDING_MASK; if (reg != 0) device_printf(sc->bls_dev, "Unknown interrupt(s) %x\n", reg); else if (num == 0 && bootverbose) device_printf(sc->bls_dev, "Spurious interrupt detected\n"); return (FILTER_HANDLED); } static void bcm_lintc_disable_intr(device_t dev, struct intr_irqsrc *isrc) { bcm_lintc_mask(device_get_softc(dev), (struct bcm_lintc_irqsrc *)isrc); } static void bcm_lintc_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc; arm_irq_memory_barrier(bli->bli_irq); bcm_lintc_unmask(device_get_softc(dev), bli); } static int bcm_lintc_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct intr_map_data_fdt *daf; struct bcm_lintc_softc *sc; if (data->type != INTR_MAP_DATA_FDT) return (ENOTSUP); daf = (struct intr_map_data_fdt *)data; if (daf->ncells > 2 || daf->cells[0] >= BCM_LINTC_NIRQS) return (EINVAL); /* TODO: handle IRQ type here */ sc = device_get_softc(dev); *isrcp = &sc->bls_isrcs[daf->cells[0]].bli_isrc; return (0); } static void bcm_lintc_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc; if (bli->bli_irq == BCM_LINTC_GPU_IRQ) bcm_lintc_gpu_mask(device_get_softc(dev), bli); else { /* * Handler for PPI interrupt does not make sense much unless * there is one bound ithread for each core for it. Thus the * interrupt can be masked on current core only while ithread * bounded to this core ensures unmasking on the same core. */ panic ("%s: handlers are not supported", __func__); } } static void bcm_lintc_post_ithread(device_t dev, struct intr_irqsrc *isrc) { struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc; if (bli->bli_irq == BCM_LINTC_GPU_IRQ) bcm_lintc_gpu_unmask(device_get_softc(dev), bli); else { /* See comment in bcm_lintc_pre_ithread(). */ panic ("%s: handlers are not supported", __func__); } } static void bcm_lintc_post_filter(device_t dev, struct intr_irqsrc *isrc) { } static int bcm_lintc_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct bcm_lintc_softc *sc; if (isrc->isrc_handlers == 0 && isrc->isrc_flags & INTR_ISRCF_PPI) { sc = device_get_softc(dev); BCM_LINTC_LOCK(sc); CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); BCM_LINTC_UNLOCK(sc); } return (0); } #ifdef SMP static void bcm_lintc_init_rwreg_on_ap(struct bcm_lintc_softc *sc, u_int cpu, u_int irq, uint32_t reg, uint32_t mask) { if (intr_isrc_init_on_cpu(&sc->bls_isrcs[irq].bli_isrc, cpu)) bcm_lintc_rwreg_set(sc, reg, mask); } static void bcm_lintc_init_pmu_on_ap(struct bcm_lintc_softc *sc, u_int cpu) { struct intr_irqsrc *isrc = &sc->bls_isrcs[BCM_LINTC_PMU_IRQ].bli_isrc; if (intr_isrc_init_on_cpu(isrc, cpu)) { /* Write-set register. */ bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG, BCM_LINTC_PIRR_IRQ_EN_CORE(cpu)); } } static void -bcm_lintc_init_secondary(device_t dev, enum root_type root_type) +bcm_lintc_init_secondary(device_t dev, uint32_t rootnum) { u_int cpu; struct bcm_lintc_softc *sc; cpu = PCPU_GET(cpuid); sc = device_get_softc(dev); BCM_LINTC_LOCK(sc); bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER0_IRQ, BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(0)); bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER1_IRQ, BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(1)); bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER2_IRQ, BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(2)); bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER3_IRQ, BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(3)); bcm_lintc_init_pmu_on_ap(sc, cpu); BCM_LINTC_UNLOCK(sc); } static void bcm_lintc_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct bcm_lintc_softc *sc = device_get_softc(dev); KASSERT(isrc == &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc, ("%s: bad ISRC %p argument", __func__, isrc)); bcm_lintc_ipi_write(sc, cpus, ipi); } static int bcm_lintc_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct bcm_lintc_softc *sc = device_get_softc(dev); KASSERT(ipi < BCM_LINTC_NIPIS, ("%s: too high ipi %u", __func__, ipi)); *isrcp = &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc; return (0); } #endif static int bcm_lintc_pic_attach(struct bcm_lintc_softc *sc) { struct bcm_lintc_irqsrc *bisrcs; struct intr_pic *pic; int error; u_int flags; uint32_t irq; const char *name; intptr_t xref; bisrcs = sc->bls_isrcs; name = device_get_nameunit(sc->bls_dev); for (irq = 0; irq < BCM_LINTC_NIRQS; irq++) { bisrcs[irq].bli_irq = irq; switch (irq) { case BCM_LINTC_TIMER0_IRQ: bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(0); flags = INTR_ISRCF_PPI; break; case BCM_LINTC_TIMER1_IRQ: bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(1); flags = INTR_ISRCF_PPI; break; case BCM_LINTC_TIMER2_IRQ: bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(2); flags = INTR_ISRCF_PPI; break; case BCM_LINTC_TIMER3_IRQ: bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(3); flags = INTR_ISRCF_PPI; break; case BCM_LINTC_MBOX0_IRQ: case BCM_LINTC_MBOX1_IRQ: case BCM_LINTC_MBOX2_IRQ: case BCM_LINTC_MBOX3_IRQ: bisrcs[irq].bli_value = 0; /* not used */ flags = INTR_ISRCF_IPI; break; case BCM_LINTC_GPU_IRQ: bisrcs[irq].bli_value = BCM_LINTC_GIRR_IRQ_CORE(0); flags = 0; break; case BCM_LINTC_PMU_IRQ: bisrcs[irq].bli_value = 0; /* not used */ flags = INTR_ISRCF_PPI; break; default: bisrcs[irq].bli_value = 0; /* not used */ flags = 0; break; } error = intr_isrc_register(&bisrcs[irq].bli_isrc, sc->bls_dev, flags, "%s,%u", name, irq); if (error != 0) return (error); } xref = OF_xref_from_node(ofw_bus_get_node(sc->bls_dev)); pic = intr_pic_register(sc->bls_dev, xref); if (pic == NULL) return (ENXIO); error = intr_pic_claim_root(sc->bls_dev, xref, bcm_lintc_intr, sc, INTR_ROOT_IRQ); if (error != 0) return (error); #ifdef SMP error = intr_ipi_pic_register(sc->bls_dev, 0); if (error != 0) return (error); #endif return (0); } static int bcm_lintc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "brcm,bcm2836-l1-intc")) return (ENXIO); if (!ofw_bus_has_prop(dev, "interrupt-controller")) return (ENXIO); device_set_desc(dev, "BCM2836 Interrupt Controller"); return (BUS_PROBE_DEFAULT); } static int bcm_lintc_attach(device_t dev) { struct bcm_lintc_softc *sc; int cpu, rid; sc = device_get_softc(dev); sc->bls_dev = dev; if (bcm_lintc_sc != NULL) return (ENXIO); rid = 0; sc->bls_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bls_mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } sc->bls_bst = rman_get_bustag(sc->bls_mem); sc->bls_bsh = rman_get_bushandle(sc->bls_mem); bcm_lintc_write_4(sc, BCM_LINTC_CONTROL_REG, 0); bcm_lintc_write_4(sc, BCM_LINTC_PRESCALER_REG, BCM_LINTC_PSR_19_2); /* Disable all timers on all cores. */ for (cpu = 0; cpu < 4; cpu++) bcm_lintc_write_4(sc, BCM_LINTC_TIMER_CFG_REG(cpu), 0); #ifdef SMP /* Enable mailbox 0 on all cores used for IPI. */ for (cpu = 0; cpu < 4; cpu++) bcm_lintc_write_4(sc, BCM_LINTC_MBOX_CFG_REG(cpu), BCM_LINTC_MCR_IRQ_EN_MBOX(0)); #endif if (bcm_lintc_pic_attach(sc) != 0) { device_printf(dev, "could not attach PIC\n"); return (ENXIO); } BCM_LINTC_LOCK_INIT(sc); bcm_lintc_sc = sc; return (0); } static device_method_t bcm_lintc_methods[] = { DEVMETHOD(device_probe, bcm_lintc_probe), DEVMETHOD(device_attach, bcm_lintc_attach), DEVMETHOD(pic_disable_intr, bcm_lintc_disable_intr), DEVMETHOD(pic_enable_intr, bcm_lintc_enable_intr), DEVMETHOD(pic_map_intr, bcm_lintc_map_intr), DEVMETHOD(pic_post_filter, bcm_lintc_post_filter), DEVMETHOD(pic_post_ithread, bcm_lintc_post_ithread), DEVMETHOD(pic_pre_ithread, bcm_lintc_pre_ithread), DEVMETHOD(pic_setup_intr, bcm_lintc_setup_intr), #ifdef SMP DEVMETHOD(pic_init_secondary, bcm_lintc_init_secondary), DEVMETHOD(pic_ipi_send, bcm_lintc_ipi_send), DEVMETHOD(pic_ipi_setup, bcm_lintc_ipi_setup), #endif DEVMETHOD_END }; static driver_t bcm_lintc_driver = { "lintc", bcm_lintc_methods, sizeof(struct bcm_lintc_softc), }; EARLY_DRIVER_MODULE(lintc, simplebus, bcm_lintc_driver, 0, 0, BUS_PASS_INTERRUPT); diff --git a/sys/arm/include/intr.h b/sys/arm/include/intr.h index 32297f656392..e64edd47dad2 100644 --- a/sys/arm/include/intr.h +++ b/sys/arm/include/intr.h @@ -1,58 +1,52 @@ /* $NetBSD: intr.h,v 1.7 2003/06/16 20:01:00 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef FDT #include #endif -enum root_type { - INTR_ROOT_IRQ = 0, - - INTR_ROOT_COUNT /* MUST BE LAST */ -}; - #ifndef NIRQ #define NIRQ 1024 /* XXX - It should be an option. */ #endif void arm_irq_memory_barrier(uintptr_t); #endif /* _MACHINE_INTR_H */ diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c index 8612d2eda309..3ca712ca3de3 100644 --- a/sys/arm64/arm64/genassym.c +++ b/sys/arm64/arm64/genassym.c @@ -1,90 +1,90 @@ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include +#include #include #include #include #include -#include #include #include /* Sizeof arm64_bootparams, rounded to keep stack alignment */ ASSYM(BOOTPARAMS_SIZE, roundup2(sizeof(struct arm64_bootparams), STACKALIGNBYTES + 1)); ASSYM(BP_MODULEP, offsetof(struct arm64_bootparams, modulep)); ASSYM(BP_KERN_STACK, offsetof(struct arm64_bootparams, kern_stack)); ASSYM(BP_KERN_TTBR0, offsetof(struct arm64_bootparams, kern_ttbr0)); ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el)); ASSYM(EC_EFI_STATUS, offsetof(struct efirt_callinfo, ec_efi_status)); ASSYM(EC_FPTR, offsetof(struct efirt_callinfo, ec_fptr)); ASSYM(EC_ARG1, offsetof(struct efirt_callinfo, ec_arg1)); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_SSBD, offsetof(struct pcpu, pc_ssbd)); /* Size of pcb, rounded to keep stack alignment */ ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1)); ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT); ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x)); ASSYM(PCB_X19, PCB_X19); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_TPIDRRO, offsetof(struct pcb, pcb_tpidrro_el0)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_AST, offsetof(struct thread, td_ast)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_MD_CANARY, offsetof(struct thread, td_md.md_canary)); ASSYM(TD_MD_EFIRT_TMP, offsetof(struct thread, td_md.md_efirt_tmp)); ASSYM(TF_SIZE, sizeof(struct trapframe)); ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); ASSYM(TF_LR, offsetof(struct trapframe, tf_lr)); ASSYM(TF_ELR, offsetof(struct trapframe, tf_elr)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_ESR, offsetof(struct trapframe, tf_esr)); ASSYM(TF_X, offsetof(struct trapframe, tf_x)); ASSYM(INTR_ROOT_IRQ, INTR_ROOT_IRQ); ASSYM(INTR_ROOT_FIQ, INTR_ROOT_FIQ); diff --git a/sys/arm64/arm64/gic_v3.c b/sys/arm64/arm64/gic_v3.c index 750f734a7757..964a129111e2 100644 --- a/sys/arm64/arm64/gic_v3.c +++ b/sys/arm64/arm64/gic_v3.c @@ -1,1712 +1,1712 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEV_ACPI #include #include #endif #include "gic_if.h" #include "pic_if.h" #include "msi_if.h" #include #include "gic_v3_reg.h" #include "gic_v3_var.h" static bus_print_child_t gic_v3_print_child; static bus_get_domain_t gic_v3_get_domain; static bus_read_ivar_t gic_v3_read_ivar; static bus_write_ivar_t gic_v3_write_ivar; static bus_alloc_resource_t gic_v3_alloc_resource; static pic_disable_intr_t gic_v3_disable_intr; static pic_enable_intr_t gic_v3_enable_intr; static pic_map_intr_t gic_v3_map_intr; static pic_setup_intr_t gic_v3_setup_intr; static pic_teardown_intr_t gic_v3_teardown_intr; static pic_post_filter_t gic_v3_post_filter; static pic_post_ithread_t gic_v3_post_ithread; static pic_pre_ithread_t gic_v3_pre_ithread; static pic_bind_intr_t gic_v3_bind_intr; #ifdef SMP static pic_init_secondary_t gic_v3_init_secondary; static pic_ipi_send_t gic_v3_ipi_send; static pic_ipi_setup_t gic_v3_ipi_setup; #endif static gic_reserve_msi_range_t gic_v3_reserve_msi_range; static gic_alloc_msi_t gic_v3_gic_alloc_msi; static gic_release_msi_t gic_v3_gic_release_msi; static gic_alloc_msix_t gic_v3_gic_alloc_msix; static gic_release_msix_t gic_v3_gic_release_msix; static msi_alloc_msi_t gic_v3_alloc_msi; static msi_release_msi_t gic_v3_release_msi; static msi_alloc_msix_t gic_v3_alloc_msix; static msi_release_msix_t gic_v3_release_msix; static msi_map_msi_t gic_v3_map_msi; static u_int gic_irq_cpu; #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), /* Bus interface */ DEVMETHOD(bus_print_child, gic_v3_print_child), DEVMETHOD(bus_get_domain, gic_v3_get_domain), DEVMETHOD(bus_read_ivar, gic_v3_read_ivar), DEVMETHOD(bus_write_ivar, gic_v3_write_ivar), DEVMETHOD(bus_alloc_resource, gic_v3_alloc_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gic_v3_disable_intr), DEVMETHOD(pic_enable_intr, gic_v3_enable_intr), DEVMETHOD(pic_map_intr, gic_v3_map_intr), DEVMETHOD(pic_setup_intr, gic_v3_setup_intr), DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr), DEVMETHOD(pic_post_filter, gic_v3_post_filter), DEVMETHOD(pic_post_ithread, gic_v3_post_ithread), DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gic_v3_bind_intr), DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup), #endif /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, gic_v3_alloc_msi), DEVMETHOD(msi_release_msi, gic_v3_release_msi), DEVMETHOD(msi_alloc_msix, gic_v3_alloc_msix), DEVMETHOD(msi_release_msix, gic_v3_release_msix), DEVMETHOD(msi_map_msi, gic_v3_map_msi), /* GIC */ DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range), DEVMETHOD(gic_alloc_msi, gic_v3_gic_alloc_msi), DEVMETHOD(gic_release_msi, gic_v3_gic_release_msi), DEVMETHOD(gic_alloc_msix, gic_v3_gic_alloc_msix), DEVMETHOD(gic_release_msix, gic_v3_gic_release_msix), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; struct gic_v3_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */ /* be used for MSI/MSI-X interrupts */ #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */ /* for a MSI/MSI-X interrupt */ u_int gi_flags; }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; #ifdef SMP /* Secondary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_secondary_init[] = { gic_v3_redist_init, gic_v3_cpu_init, NULL }; #endif uint32_t gic_r_read_4(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; struct resource *rdist; sc = device_get_softc(dev); rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res; offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset; return (bus_read_4(rdist, offset)); } uint64_t gic_r_read_8(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; struct resource *rdist; sc = device_get_softc(dev); rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res; offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset; return (bus_read_8(rdist, offset)); } void gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val) { struct gic_v3_softc *sc; struct resource *rdist; sc = device_get_softc(dev); rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res; offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset; bus_write_4(rdist, offset, val); } void gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val) { struct gic_v3_softc *sc; struct resource *rdist; sc = device_get_softc(dev); rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res; offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset; bus_write_8(rdist, offset, val); } static void gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count) { struct gic_v3_softc *sc; int i; sc = device_get_softc(dev); KASSERT((start + count) < sc->gic_nirqs, ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__, start, count, sc->gic_nirqs)); for (i = 0; i < count; i++) { KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0, ("%s: MSI interrupt %d already has a handler", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM, ("%s: MSI interrupt %d already has a polarity", __func__, count + i)); KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM, ("%s: MSI interrupt %d already has a trigger", __func__, count + i)); sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH; sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE; sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI; } } /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; u_int irq; const char *name; sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs, M_GIC_V3, M_WAITOK | M_ZERO); name = device_get_nameunit(dev); for (irq = 0; irq < sc->gic_nirqs; irq++) { struct intr_irqsrc *isrc; sc->gic_irqs[irq].gi_irq = irq; sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM; sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &sc->gic_irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { err = intr_isrc_register(isrc, sc->dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (err != 0) { /* XXX call intr_isrc_deregister() */ free(sc->gic_irqs, M_DEVBUF); return (err); } } mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF); if (sc->gic_mbi_start > 0) { if (!sc->gic_mbi_end) { /* * This is to address SPI based msi ranges, where * SPI range is not specified in ACPI */ sc->gic_mbi_end = sc->gic_nirqs - 1; } gic_v3_reserve_msi_range(dev, sc->gic_mbi_start, sc->gic_mbi_end - sc->gic_mbi_start); if (bootverbose) { device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start, sc->gic_mbi_end); } } /* * Read the Peripheral ID2 register. This is an implementation * defined register, but seems to be implemented in all GICv3 * parts and Linux expects it to be there. */ sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2); /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); free(sc->gic_redists.pcpu, M_GIC_V3); free(sc->ranges, M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } static int gic_v3_print_child(device_t bus, device_t child) { struct resource_list *rl; int retval = 0; rl = BUS_GET_RESOURCE_LIST(bus, child); KASSERT(rl != NULL, ("%s: No resource list", __func__)); retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += bus_print_child_footer(bus, child); return (retval); } static int gic_v3_get_domain(device_t dev, device_t child, int *domain) { struct gic_v3_devinfo *di; di = device_get_ivars(child); if (di->gic_domain < 0) return (ENOENT); *domain = di->gic_domain; return (0); } static int gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gic_v3_softc *sc; struct gic_v3_devinfo *di; sc = device_get_softc(dev); switch (which) { case GICV3_IVAR_NIRQS: *result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren; return (0); case GICV3_IVAR_REDIST: *result = (uintptr_t)&sc->gic_redists.pcpu[PCPU_GET(cpuid)]; return (0); case GICV3_IVAR_SUPPORT_LPIS: *result = (gic_d_read(sc, 4, GICD_TYPER) & GICD_TYPER_LPIS) != 0; return (0); case GIC_IVAR_HW_REV: KASSERT( GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 || GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4, ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)", GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2)); *result = GICR_PIDR2_ARCH(sc->gic_pidr2); return (0); case GIC_IVAR_BUS: KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN, ("gic_v3_read_ivar: Unknown bus type")); KASSERT(sc->gic_bus <= GIC_BUS_MAX, ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus)); *result = sc->gic_bus; return (0); case GIC_IVAR_VGIC: di = device_get_ivars(child); if (di == NULL) return (EINVAL); *result = di->is_vgic; return (0); } return (ENOENT); } static int gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch(which) { case GICV3_IVAR_NIRQS: case GICV3_IVAR_REDIST: case GIC_IVAR_HW_REV: case GIC_IVAR_BUS: return (EINVAL); } return (ENOENT); } static struct resource * gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gic_v3_softc *sc; struct resource_list_entry *rle; struct resource_list *rl; int j; /* We only allocate memory */ if (type != SYS_RES_MEMORY) return (NULL); sc = device_get_softc(bus); if (RMAN_IS_DEFAULT_RANGE(start, end)) { rl = BUS_GET_RESOURCE_LIST(bus, child); if (rl == NULL) return (NULL); /* Find defaults for this rid */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end); return (NULL); } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } int arm_gic_v3_intr(void *arg) { struct gic_v3_softc *sc = arg; struct gic_v3_irqsrc *gi; struct intr_pic *pic; uint64_t active_irq; struct trapframe *tf; pic = sc->gic_pic; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (active_irq >= GIC_FIRST_LPI) { intr_child_irq_handler(pic, active_irq); continue; } if (__predict_false(active_irq >= sc->gic_nirqs)) return (FILTER_HANDLED); tf = curthread->td_intr_frame; gi = &sc->gic_irqs[active_irq]; if (active_irq <= GIC_LAST_SGI) { /* Call EOI for all IPI before dispatch. */ gic_icc_write(EOIR1, (uint64_t)active_irq); #ifdef SMP intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]); #else device_printf(sc->dev, "SGI %ju on UP system detected\n", (uintmax_t)(active_irq - GIC_FIRST_SGI)); #endif } else if (active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) { if (gi->gi_trig == INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { if (gi->gi_trig != INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); gic_v3_disable_intr(sc->dev, &gi->gi_isrc); device_printf(sc->dev, "Stray irq %lu disabled\n", active_irq); } } } } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; if (ncells < 3) return (EINVAL); /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = edge triggered * 2 = edge triggered (PPI only) * 4 = level-sensitive * 8 = level-sensitive (PPI only) */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } switch (cells[2] & FDT_INTR_MASK) { case FDT_INTR_EDGE_RISING: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_HIGH; break; case FDT_INTR_EDGE_FALLING: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_LOW; break; case FDT_INTR_LEVEL_HIGH: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_HIGH; break; case FDT_INTR_LEVEL_LOW: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_LOW; break; default: device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", cells[2]); return (EINVAL); } /* Check the interrupt is valid */ if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH) return (EINVAL); *irqp = irq; return (0); } #endif static int gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_irqsrc *gi; /* SPI-mapped MSI */ gi = (struct gic_v3_irqsrc *)msi_data->isrc; if (gi == NULL) return (ENXIO); *irqp = gi->gi_irq; /* MSI/MSI-X interrupts are always edge triggered with high polarity */ *polp = INTR_POLARITY_HIGH; *trigp = INTR_TRIGGER_EDGE; return (0); } static int do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_softc *sc; enum intr_polarity pol; enum intr_trigger trig; struct intr_map_data_msi *dam; #ifdef FDT struct intr_map_data_fdt *daf; #endif #ifdef DEV_ACPI struct intr_map_data_acpi *daa; #endif u_int irq; sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); break; #endif #ifdef DEV_ACPI case INTR_MAP_DATA_ACPI: daa = (struct intr_map_data_acpi *)data; irq = daa->irq; pol = daa->pol; trig = daa->trig; break; #endif case INTR_MAP_DATA_MSI: /* SPI-mapped MSI */ dam = (struct intr_map_data_msi *)data; if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0) return (EINVAL); break; default: return (EINVAL); } if (irq >= sc->gic_nirqs) return (EINVAL); switch (pol) { case INTR_POLARITY_CONFORM: case INTR_POLARITY_LOW: case INTR_POLARITY_HIGH: break; default: return (EINVAL); } switch (trig) { case INTR_TRIGGER_CONFORM: case INTR_TRIGGER_EDGE: case INTR_TRIGGER_LEVEL: break; default: return (EINVAL); } *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int gic_v3_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct gic_v3_softc *sc; int error; u_int irq; error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } struct gic_v3_setup_periph_args { device_t dev; struct intr_irqsrc *isrc; }; static void gic_v3_setup_intr_periph(void *argp) { struct gic_v3_setup_periph_args *args = argp; struct intr_irqsrc *isrc = args->isrc; struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; device_t dev = args->dev; u_int irq = gi->gi_irq; struct gic_v3_softc *sc = device_get_softc(dev); uint32_t reg; MPASS(irq <= GIC_LAST_SPI); /* * We need the lock for both SGIs and PPIs for an atomic CPU_SET() at a * minimum, but we also need it below for SPIs. */ mtx_lock_spin(&sc->gic_mtx); if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) { /* Set the trigger and polarity */ if (irq <= GIC_LAST_PPI) reg = gic_r_read(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq)); else reg = gic_d_read(sc, 4, GICD_ICFGR(irq)); if (gi->gi_trig == INTR_TRIGGER_LEVEL) reg &= ~(2 << ((irq % 16) * 2)); else reg |= 2 << ((irq % 16) * 2); if (irq <= GIC_LAST_PPI) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, REDIST); } else { gic_d_write(sc, 4, GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, DIST); } } mtx_unlock_spin(&sc->gic_mtx); } static int gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; struct gic_v3_setup_periph_args pargs; enum intr_trigger trig; enum intr_polarity pol; u_int irq; int error; if (data == NULL) return (ENOTSUP); error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig); if (error != 0) return (error); if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM || trig == INTR_TRIGGER_CONFORM) return (EINVAL); /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if (pol != gi->gi_pol || trig != gi->gi_trig) return (EINVAL); else return (0); } /* For MSI/MSI-X we should have already configured these */ if ((gi->gi_flags & GI_FLAG_MSI) == 0) { gi->gi_pol = pol; gi->gi_trig = trig; } pargs.dev = dev; pargs.isrc = isrc; if (isrc->isrc_flags & INTR_ISRCF_PPI) { /* * If APs haven't been fired up yet, smp_rendezvous() will just * execute it on the single CPU and gic_v3_init_secondary() will * clean up afterwards. */ smp_rendezvous(NULL, gic_v3_setup_intr_periph, NULL, &pargs); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { gic_v3_setup_intr_periph(&pargs); gic_v3_bind_intr(dev, isrc); } return (0); } static int gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_enable_intr_periph(void *argp) { struct gic_v3_setup_periph_args *args = argp; struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)args->isrc; device_t dev = args->dev; struct gic_v3_softc *sc = device_get_softc(dev); u_int irq = gi->gi_irq; /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } static void gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_setup_periph_args pargs; struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; pargs.isrc = isrc; pargs.dev = dev; if (irq <= GIC_LAST_PPI) { /* * SGIs only need configured on the current AP. We'll setup and * enable IPIs as APs come online. */ if (irq <= GIC_LAST_SGI) gic_v3_enable_intr_periph(&pargs); else smp_rendezvous(NULL, gic_v3_enable_intr_periph, NULL, &pargs); return; } sc = device_get_softc(dev); if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; gic_v3_disable_intr(dev, isrc); gic_icc_write(EOIR1, gi->gi_irq); } static void gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc) { gic_v3_enable_intr(dev, isrc); } static void gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_trig == INTR_TRIGGER_EDGE) return; gic_icc_write(EOIR1, gi->gi_irq); } static int gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; int cpu; gi = (struct gic_v3_irqsrc *)isrc; KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI, ("%s: Attempting to bind an invalid IRQ", __func__)); sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(gic_irq_cpu)); } else { /* * We can only bind to a single CPU so select * the first CPU found. */ cpu = CPU_FFS(&isrc->isrc_cpu) - 1; gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu)); } return (0); } #ifdef SMP static void -gic_v3_init_secondary(device_t dev, enum root_type root_type) +gic_v3_init_secondary(device_t dev, uint32_t rootnum) { struct gic_v3_setup_periph_args pargs; device_t child; struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; struct intr_irqsrc *isrc; u_int cpu, irq; int err, i; sc = device_get_softc(dev); cpu = PCPU_GET(cpuid); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", cpu); return; } } pargs.dev = dev; /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) { pargs.isrc = isrc; gic_v3_enable_intr_periph(&pargs); } } /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) { pargs.isrc = isrc; gic_v3_setup_intr_periph(&pargs); gic_v3_enable_intr_periph(&pargs); } } for (i = 0; i < sc->gic_nchildren; i++) { child = sc->gic_children[i]; - PIC_INIT_SECONDARY(child, root_type); + PIC_INIT_SECONDARY(child, rootnum); } } static void gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; uint64_t aff, val, irq; int i; #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK) #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK) aff = GIC_AFFINITY(0); irq = gi->gi_irq; val = 0; /* Iterate through all CPUs in set */ for (i = 0; i <= mp_maxid; i++) { /* Move to the next affinity group */ if (aff != GIC_AFFINITY(i)) { /* Send the IPI */ if (val != 0) { gic_icc_write(SGI1R, val); val = 0; } aff = GIC_AFFINITY(i); } /* Send the IPI to this cpu */ if (CPU_ISSET(i, &cpus)) { #define ICC_SGI1R_AFFINITY(aff) \ (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \ ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \ ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT)) /* Set the affinity when the first at this level */ if (val == 0) val = ICC_SGI1R_AFFINITY(aff) | irq << ICC_SGI1R_EL1_SGIID_SHIFT; /* Set the bit to send the IPI to te CPU */ val |= 1 << CPU_AFF0(CPU_AFFINITY(i)); } } /* Send the IPI to the last cpu affinity group */ if (val != 0) gic_icc_write(SGI1R, val); #undef GIC_AFF_MASK #undef GIC_AFFINITY } static int gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct gic_v3_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif /* SMP */ /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; bus_size_t offset; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; offset = 0; break; case REDIST: res = sc->gic_redists.pcpu[cpuid].res; offset = sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, offset + GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all SPIs to be Group 1 Non-secure */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn) gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF); /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(0); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 8, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int gic_v3_redist_alloc(struct gic_v3_softc *sc) { sc->gic_redists.pcpu = mallocarray(mp_maxid + 1, sizeof(sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); return (0); } static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource *r_res; bus_size_t offset; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = sc->gic_redists.regions[i]; pidr2 = bus_read_4(r_res, GICR_PIDR2); switch (GICR_PIDR2_ARCH(pidr2)) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENODEV); } offset = 0; do { typer = bus_read_8(r_res, offset + GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(cpuid <= mp_maxid, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ sc->gic_redists.pcpu[cpuid].res = r_res; sc->gic_redists.pcpu[cpuid].offset = offset; sc->gic_redists.pcpu[cpuid].lpi_enabled = false; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } offset += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { offset += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } } while (offset < rman_get_size(r_res) && (typer & GICR_TYPER_LAST) == 0); } device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Configure SGIs and PPIs to be Group1 Non-secure */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0, 0xFFFFFFFF); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } /* * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller. */ static int gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count, int maxcount, struct intr_irqsrc **isrc) { struct gic_v3_softc *sc; int i, irq, end_irq; bool found; KASSERT(powerof2(count), ("%s: bad count", __func__)); KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__)); sc = device_get_softc(dev); mtx_lock(&sc->gic_mbi_mtx); found = false; for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) { /* Start on an aligned interrupt */ if ((irq & (maxcount - 1)) != 0) continue; /* Assume we found a valid range until shown otherwise */ found = true; /* Check this range is valid */ for (end_irq = irq; end_irq != irq + count; end_irq++) { /* No free interrupts */ if (end_irq == mbi_start + mbi_count) { found = false; break; } KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0, ("%s: Non-MSI interrupt found", __func__)); /* This is already used */ if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED) { found = false; break; } } if (found) break; } /* Not enough interrupts were found */ if (!found || irq == mbi_start + mbi_count) { mtx_unlock(&sc->gic_mbi_mtx); return (ENXIO); } for (i = 0; i < count; i++) { /* Mark the interrupt as used */ sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED; } mtx_unlock(&sc->gic_mbi_mtx); for (i = 0; i < count; i++) isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i]; return (0); } static int gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; int i; sc = device_get_softc(dev); mtx_lock(&sc->gic_mbi_mtx); for (i = 0; i < count; i++) { gi = (struct gic_v3_irqsrc *)isrc[i]; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); gi->gi_flags &= ~GI_FLAG_MSI_USED; } mtx_unlock(&sc->gic_mbi_mtx); return (0); } static int gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count, struct intr_irqsrc **isrcp) { struct gic_v3_softc *sc; int irq; sc = device_get_softc(dev); mtx_lock(&sc->gic_mbi_mtx); /* Find an unused interrupt */ for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) { KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0, ("%s: Non-MSI interrupt found", __func__)); if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0) break; } /* No free interrupt was found */ if (irq == mbi_start + mbi_count) { mtx_unlock(&sc->gic_mbi_mtx); return (ENXIO); } /* Mark the interrupt as used */ sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED; mtx_unlock(&sc->gic_mbi_mtx); *isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq]; return (0); } static int gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED, ("%s: Trying to release an unused MSI-X interrupt", __func__)); mtx_lock(&sc->gic_mbi_mtx); gi->gi_flags &= ~GI_FLAG_MSI_USED; mtx_unlock(&sc->gic_mbi_mtx); return (0); } static int gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **isrc) { struct gic_v3_softc *sc; int error; sc = device_get_softc(dev); error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start, sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc); if (error != 0) return (error); *pic = dev; return (0); } static int gic_v3_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { return (gic_v3_gic_release_msi(dev, count, isrc)); } static int gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic, struct intr_irqsrc **isrc) { struct gic_v3_softc *sc; int error; sc = device_get_softc(dev); error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start, sc->gic_mbi_end - sc->gic_mbi_start, isrc); if (error != 0) return (error); *pic = dev; return (0); } static int gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) { return (gic_v3_gic_release_msix(dev, isrc)); } static int gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct gic_v3_softc *sc = device_get_softc(dev); struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; *addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR; *data = gi->gi_irq; return (0); } diff --git a/sys/arm64/arm64/gicv3_its.c b/sys/arm64/arm64/gicv3_its.c index 31e23fc01224..5ecd9b8c0e94 100644 --- a/sys/arm64/arm64/gicv3_its.c +++ b/sys/arm64/arm64/gicv3_its.c @@ -1,2329 +1,2329 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * Copyright (c) 2023 Arm Ltd * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include #include #ifdef IOMMU #include #include #endif #include "pcib_if.h" #include "pic_if.h" #include "msi_if.h" MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", "ARM GICv3 Interrupt Translation Service"); #define LPI_NIRQS (64 * 1024) /* The size and alignment of the command circular buffer */ #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ #define ITS_CMDQ_ALIGN (64 * 1024) #define LPI_CONFTAB_SIZE LPI_NIRQS #define LPI_CONFTAB_ALIGN (64 * 1024) #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) #define LPI_PENDTAB_ALIGN (64 * 1024) #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ #define LPI_INT_TRANS_TAB_ALIGN 256 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) /* ITS commands encoding */ #define ITS_CMD_MOVI (0x01) #define ITS_CMD_SYNC (0x05) #define ITS_CMD_MAPD (0x08) #define ITS_CMD_MAPC (0x09) #define ITS_CMD_MAPTI (0x0a) #define ITS_CMD_MAPI (0x0b) #define ITS_CMD_INV (0x0c) #define ITS_CMD_INVALL (0x0d) /* Command */ #define CMD_COMMAND_MASK (0xFFUL) /* PCI device ID */ #define CMD_DEVID_SHIFT (32) #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) /* Size of IRQ ID bitfield */ #define CMD_SIZE_MASK (0xFFUL) /* Virtual LPI ID */ #define CMD_ID_MASK (0xFFFFFFFFUL) /* Physical LPI ID */ #define CMD_PID_SHIFT (32) #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) /* Collection */ #define CMD_COL_MASK (0xFFFFUL) /* Target (CPU or Re-Distributor) */ #define CMD_TARGET_SHIFT (16) #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) /* Interrupt Translation Table address */ #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) /* Valid command bit */ #define CMD_VALID_SHIFT (63) #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) #define ITS_TARGET_NONE 0xFBADBEEF /* LPI chunk owned by ITS device */ struct lpi_chunk { u_int lpi_base; u_int lpi_free; /* First free LPI in set */ u_int lpi_num; /* Total number of LPIs in chunk */ u_int lpi_busy; /* Number of busy LPIs in chink */ }; /* ITS device */ struct its_dev { TAILQ_ENTRY(its_dev) entry; /* PCI device */ device_t pci_dev; /* Device ID (i.e. PCI device ID) */ uint32_t devid; /* List of assigned LPIs */ struct lpi_chunk lpis; /* Virtual address of ITT */ void *itt; }; /* * ITS command descriptor. * Idea for command description passing taken from Linux. */ struct its_cmd_desc { uint8_t cmd_type; union { struct { struct its_dev *its_dev; struct its_col *col; uint32_t id; } cmd_desc_movi; struct { struct its_col *col; } cmd_desc_sync; struct { struct its_col *col; uint8_t valid; } cmd_desc_mapc; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; uint32_t id; } cmd_desc_mapvi; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_mapi; struct { struct its_dev *its_dev; uint8_t valid; } cmd_desc_mapd; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_inv; struct { struct its_col *col; } cmd_desc_invall; }; }; /* ITS command. Each command is 32 bytes long */ struct its_cmd { uint64_t cmd_dword[4]; /* ITS command double word */ }; /* An ITS private table */ struct its_ptable { void *ptab_vaddr; /* Size of the L1 and L2 tables */ size_t ptab_l1_size; size_t ptab_l2_size; /* Number of L1 and L2 entries */ int ptab_l1_nidents; int ptab_l2_nidents; int ptab_page_size; int ptab_share; bool ptab_indirect; }; /* ITS collection description. */ struct its_col { uint64_t col_target; /* Target Re-Distributor */ uint64_t col_id; /* Collection ID */ }; struct gicv3_its_irqsrc { struct intr_irqsrc gi_isrc; u_int gi_id; u_int gi_lpi; struct its_dev *gi_its_dev; TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; }; struct gicv3_its_softc { device_t dev; struct intr_pic *sc_pic; struct resource *sc_its_res; cpuset_t sc_cpus; struct domainset *sc_ds; u_int gic_irq_cpu; int sc_devbits; int sc_dev_table_idx; struct its_ptable sc_its_ptab[GITS_BASER_NUM]; struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ /* * TODO: We should get these from the parent as we only want a * single copy of each across the interrupt controller. */ uint8_t *sc_conf_base; void *sc_pend_base[MAXCPU]; /* Command handling */ struct mtx sc_its_cmd_lock; struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ size_t sc_its_cmd_next_idx; vmem_t *sc_irq_alloc; struct gicv3_its_irqsrc **sc_irqs; u_int sc_irq_base; u_int sc_irq_length; u_int sc_irq_count; struct mtx sc_its_dev_lock; TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 #define ITS_FLAGS_LPI_PREALLOC 0x00000008 u_int sc_its_flags; bool trace_enable; vm_page_t ma; /* fake msi page */ }; typedef void (its_quirk_func_t)(device_t); static its_quirk_func_t its_quirk_cavium_22375; static const struct { const char *desc; uint32_t iidr; uint32_t iidr_mask; its_quirk_func_t *func; } its_quirks[] = { { /* Cavium ThunderX Pass 1.x */ .desc = "Cavium ThunderX errata: 22375, 24313", .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), .iidr_mask = ~GITS_IIDR_REVISION_MASK, .func = its_quirk_cavium_22375, }, }; #define gic_its_read_4(sc, reg) \ bus_read_4((sc)->sc_its_res, (reg)) #define gic_its_read_8(sc, reg) \ bus_read_8((sc)->sc_its_res, (reg)) #define gic_its_write_4(sc, reg, val) \ bus_write_4((sc)->sc_its_res, (reg), (val)) #define gic_its_write_8(sc, reg, val) \ bus_write_8((sc)->sc_its_res, (reg), (val)) static device_attach_t gicv3_its_attach; static device_detach_t gicv3_its_detach; static pic_disable_intr_t gicv3_its_disable_intr; static pic_enable_intr_t gicv3_its_enable_intr; static pic_map_intr_t gicv3_its_map_intr; static pic_setup_intr_t gicv3_its_setup_intr; static pic_post_filter_t gicv3_its_post_filter; static pic_post_ithread_t gicv3_its_post_ithread; static pic_pre_ithread_t gicv3_its_pre_ithread; static pic_bind_intr_t gicv3_its_bind_intr; #ifdef SMP static pic_init_secondary_t gicv3_its_init_secondary; #endif static msi_alloc_msi_t gicv3_its_alloc_msi; static msi_release_msi_t gicv3_its_release_msi; static msi_alloc_msix_t gicv3_its_alloc_msix; static msi_release_msix_t gicv3_its_release_msix; static msi_map_msi_t gicv3_its_map_msi; #ifdef IOMMU static msi_iommu_init_t gicv3_iommu_init; static msi_iommu_deinit_t gicv3_iommu_deinit; #endif static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); static void its_cmd_mapc(device_t, struct its_col *, uint8_t); static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); static void its_cmd_invall(device_t, struct its_col *); static device_method_t gicv3_its_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gicv3_its_detach), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), DEVMETHOD(pic_map_intr, gicv3_its_map_intr), DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), DEVMETHOD(pic_post_filter, gicv3_its_post_filter), DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), #endif /* MSI/MSI-X */ DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), DEVMETHOD(msi_release_msi, gicv3_its_release_msi), DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), DEVMETHOD(msi_release_msix, gicv3_its_release_msix), DEVMETHOD(msi_map_msi, gicv3_its_map_msi), #ifdef IOMMU DEVMETHOD(msi_iommu_init, gicv3_iommu_init), DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), #endif /* End */ DEVMETHOD_END }; static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, sizeof(struct gicv3_its_softc)); static void gicv3_its_cmdq_init(struct gicv3_its_softc *sc) { vm_paddr_t cmd_paddr; uint64_t reg, tmp; /* Set up the command circular buffer */ sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0); sc->sc_its_cmd_next_idx = 0; cmd_paddr = vtophys(sc->sc_its_cmd_base); /* Set the base of the command buffer */ reg = GITS_CBASER_VALID | (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | (ITS_CMDQ_SIZE / 4096 - 1); gic_its_write_8(sc, GITS_CBASER, reg); /* Read back to check for fixed value fields */ tmp = gic_its_read_8(sc, GITS_CBASER); if ((tmp & GITS_CBASER_SHARE_MASK) != (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { /* Check if the hardware reported non-shareable */ if ((tmp & GITS_CBASER_SHARE_MASK) == (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { /* If so remove the cache attribute */ reg &= ~GITS_CBASER_CACHE_MASK; reg &= ~GITS_CBASER_SHARE_MASK; /* Set to Non-cacheable, Non-shareable */ reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; gic_its_write_8(sc, GITS_CBASER, reg); } /* The command queue has to be flushed after each command */ sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; } /* Get the next command from the start of the buffer */ gic_its_write_8(sc, GITS_CWRITER, 0x0); } static int gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table) { uint64_t reg, tmp; int page_size; page_size = PAGE_SIZE_64K; reg = gic_its_read_8(sc, GITS_BASER(table)); while (1) { reg &= GITS_BASER_PSZ_MASK; switch (page_size) { case PAGE_SIZE_4K: /* 4KB */ reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_16K: /* 16KB */ reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_64K: /* 64KB */ reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; break; } /* Write the new page size */ gic_its_write_8(sc, GITS_BASER(table), reg); /* Read back to check */ tmp = gic_its_read_8(sc, GITS_BASER(table)); /* The page size is correct */ if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK)) return (page_size); switch (page_size) { default: return (-1); case PAGE_SIZE_16K: page_size = PAGE_SIZE_4K; break; case PAGE_SIZE_64K: page_size = PAGE_SIZE_16K; break; } } } static bool gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table) { uint64_t reg; reg = gic_its_read_8(sc, GITS_BASER(table)); /* Try setting the indirect flag */ reg |= GITS_BASER_INDIRECT; gic_its_write_8(sc, GITS_BASER(table), reg); /* Read back to check */ reg = gic_its_read_8(sc, GITS_BASER(table)); return ((reg & GITS_BASER_INDIRECT) != 0); } static int gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) { void *table; vm_paddr_t paddr; uint64_t cache, reg, share, tmp, type; size_t its_tbl_size, nitspages, npages; size_t l1_esize, l2_esize, l1_nidents, l2_nidents; int i, page_size; int devbits; bool indirect; if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { /* * GITS_TYPER[17:13] of ThunderX reports that device IDs * are to be 21 bits in length. The entry size of the ITS * table can be read from GITS_BASERn[52:48] and on ThunderX * is supposed to be 8 bytes in length (for device table). * Finally the page size that is to be used by ITS to access * this table will be set to 64KB. * * This gives 0x200000 entries of size 0x8 bytes covered by * 256 pages each of which 64KB in size. The number of pages * (minus 1) should then be written to GITS_BASERn[7:0]. In * that case this value would be 0xFF but on ThunderX the * maximum value that HW accepts is 0xFD. * * Set an arbitrary number of device ID bits to 20 in order * to limit the number of entries in ITS device table to * 0x100000 and the table size to 8MB. */ devbits = 20; cache = 0; } else { devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); cache = GITS_BASER_CACHE_WAWB; } sc->sc_devbits = devbits; share = GITS_BASER_SHARE_IS; for (i = 0; i < GITS_BASER_NUM; i++) { reg = gic_its_read_8(sc, GITS_BASER(i)); /* The type of table */ type = GITS_BASER_TYPE(reg); if (type == GITS_BASER_TYPE_UNIMPL) continue; /* The table entry size */ l1_esize = GITS_BASER_ESIZE(reg); /* Find the tables page size */ page_size = gicv3_its_table_page_size(sc, i); if (page_size == -1) { device_printf(dev, "No valid page size for table %d\n", i); return (EINVAL); } indirect = false; l2_nidents = 0; l2_esize = 0; switch(type) { case GITS_BASER_TYPE_DEV: if (sc->sc_dev_table_idx != -1) device_printf(dev, "Warning: Multiple device tables found\n"); sc->sc_dev_table_idx = i; l1_nidents = (1 << devbits); if ((l1_esize * l1_nidents) > (page_size * 2)) { indirect = gicv3_its_table_supports_indirect(sc, i); if (indirect) { /* * Each l1 entry is 8 bytes and points * to an l2 table of size page_size. * Calculate how many entries this is * and use this to find how many * 8 byte l1 idents we need. */ l2_esize = l1_esize; l2_nidents = page_size / l2_esize; l1_nidents = l1_nidents / l2_nidents; l1_esize = GITS_INDIRECT_L1_ESIZE; } } its_tbl_size = l1_esize * l1_nidents; its_tbl_size = roundup2(its_tbl_size, page_size); break; case GITS_BASER_TYPE_PP: /* Undocumented? */ case GITS_BASER_TYPE_IC: its_tbl_size = page_size; break; case GITS_BASER_TYPE_VP: /* * If GITS_TYPER.SVPET != 0, the pending table is * shared amongst the redistibutors and ther other * ITSes. Requiring sharing across the ITSes when none * of the redistributors have GICR_VPROPBASER.Valid==1 * isn't specified in the architecture, but that's how * the GIC-700 behaves. We don't handle vPE tables at * all yet, so just skip this base register. */ default: if (bootverbose) device_printf(dev, "Unhandled table type %lx\n", type); continue; } npages = howmany(its_tbl_size, PAGE_SIZE); /* Allocate the table */ table = contigmalloc_domainset(npages * PAGE_SIZE, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, PAGE_SIZE_64K, 0); sc->sc_its_ptab[i].ptab_vaddr = table; sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size; sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents; sc->sc_its_ptab[i].ptab_l2_size = page_size; sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents; sc->sc_its_ptab[i].ptab_indirect = indirect; sc->sc_its_ptab[i].ptab_page_size = page_size; paddr = vtophys(table); while (1) { nitspages = howmany(its_tbl_size, page_size); /* Clear the fields we will be setting */ reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT | GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | GITS_BASER_PA_MASK | GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | GITS_BASER_SIZE_MASK); /* Set the new values */ reg |= GITS_BASER_VALID | (indirect ? GITS_BASER_INDIRECT : 0) | (cache << GITS_BASER_CACHE_SHIFT) | (type << GITS_BASER_TYPE_SHIFT) | paddr | (share << GITS_BASER_SHARE_SHIFT) | (nitspages - 1); switch (page_size) { case PAGE_SIZE_4K: /* 4KB */ reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_16K: /* 16KB */ reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; break; case PAGE_SIZE_64K: /* 64KB */ reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; break; } gic_its_write_8(sc, GITS_BASER(i), reg); /* Read back to check */ tmp = gic_its_read_8(sc, GITS_BASER(i)); /* Do the shareability masks line up? */ if ((tmp & GITS_BASER_SHARE_MASK) != (reg & GITS_BASER_SHARE_MASK)) { share = (tmp & GITS_BASER_SHARE_MASK) >> GITS_BASER_SHARE_SHIFT; continue; } if (tmp != reg) { device_printf(dev, "GITS_BASER%d: " "unable to be updated: %lx != %lx\n", i, reg, tmp); return (ENXIO); } sc->sc_its_ptab[i].ptab_share = share; /* We should have made all needed changes */ break; } } return (0); } static void gicv3_its_conftable_init(struct gicv3_its_softc *sc) { /* note: we assume the ITS children are serialized by the parent */ static void *conf_table; int extra_flags = 0; device_t gicv3; uint32_t ctlr; vm_paddr_t conf_pa; vm_offset_t conf_va; /* * The PROPBASER is a singleton in our parent. We only set it up the * first time through. conf_table is effectively global to all the units * and we rely on subr_bus to serialize probe/attach. */ if (conf_table != NULL) { sc->sc_conf_base = conf_table; return; } gicv3 = device_get_parent(sc->dev); ctlr = gic_r_read_4(gicv3, GICR_CTLR); if ((ctlr & GICR_CTLR_LPI_ENABLE) != 0) { conf_pa = gic_r_read_8(gicv3, GICR_PROPBASER); conf_pa &= GICR_PROPBASER_PA_MASK; /* * If there was a pre-existing PROPBASER, then we need to honor * it because implementation defined behavior in gicv3 makes it * impossible to quiesce to change it out. We will only see a * pre-existing one when we've been kexec'd from a Linux kernel, * or from a LinuxBoot environment. * * Linux provides us with a MEMRESERVE table that we put into * the excluded physmem area. If PROPBASER isn't in this tabke, * the system cannot run due to random memory corruption, * so we panic for this case. */ if (!physmem_excluded(conf_pa, LPI_CONFTAB_SIZE)) panic("gicv3 PROPBASER needs to reuse %#lx, but not reserved", conf_pa); conf_va = PHYS_TO_DMAP(conf_pa); if (!pmap_klookup(conf_va, NULL)) panic("Cannot map prior LPI mapping into KVA"); conf_table = (void *)conf_va; extra_flags = ITS_FLAGS_LPI_PREALLOC | ITS_FLAGS_LPI_CONF_FLUSH; if (bootverbose) device_printf(sc->dev, "LPI enabled, conf table using pa %#lx va %lx\n", conf_pa, conf_va); } else { /* * Otherwise just allocate contiguous pages. We'll configure the * PROPBASER register later in its_init_cpu_lpi(). */ conf_table = contigmalloc(LPI_CONFTAB_SIZE, M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, LPI_CONFTAB_ALIGN, 0); } sc->sc_conf_base = conf_table; sc->sc_its_flags |= extra_flags; /* Set the default configuration */ memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, LPI_CONFTAB_SIZE); /* Flush the table to memory */ cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE); } static void gicv3_its_pendtables_init(struct gicv3_its_softc *sc) { if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) { for (int i = 0; i <= mp_maxid; i++) { if (CPU_ISSET(i, &sc->sc_cpus) == 0) continue; sc->sc_pend_base[i] = contigmalloc( LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); /* Flush so the ITS can see the memory */ cpu_dcache_wb_range(sc->sc_pend_base[i], LPI_PENDTAB_SIZE); } } } static void its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) { device_t gicv3; uint64_t xbaser, tmp, size; uint32_t ctlr; u_int cpuid; gicv3 = device_get_parent(dev); cpuid = PCPU_GET(cpuid); /* * Set the redistributor base. If we're reusing what we found on boot * since the gic was already running, then don't touch it here. We also * don't need to disable / enable LPI if we're not changing PROPBASER, * so only do that if we're not prealloced. */ if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) { /* Disable LPIs */ ctlr = gic_r_read_4(gicv3, GICR_CTLR); ctlr &= ~GICR_CTLR_LPI_ENABLE; gic_r_write_4(gicv3, GICR_CTLR, ctlr); /* Make sure changes are observable my the GIC */ dsb(sy); size = (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); xbaser = vtophys(sc->sc_conf_base) | (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | size; gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); /* Check the cache attributes we set */ tmp = gic_r_read_8(gicv3, GICR_PROPBASER); if ((tmp & GICR_PROPBASER_SHARE_MASK) != (xbaser & GICR_PROPBASER_SHARE_MASK)) { if ((tmp & GICR_PROPBASER_SHARE_MASK) == (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { /* We need to mark as non-cacheable */ xbaser &= ~(GICR_PROPBASER_SHARE_MASK | GICR_PROPBASER_CACHE_MASK); /* Non-cacheable */ xbaser |= GICR_PROPBASER_CACHE_NIN << GICR_PROPBASER_CACHE_SHIFT; /* Non-shareable */ xbaser |= GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT; gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); } sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; } /* * Set the LPI pending table base */ xbaser = vtophys(sc->sc_pend_base[cpuid]) | (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); tmp = gic_r_read_8(gicv3, GICR_PENDBASER); if ((tmp & GICR_PENDBASER_SHARE_MASK) == (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { /* Clear the cahce and shareability bits */ xbaser &= ~(GICR_PENDBASER_CACHE_MASK | GICR_PENDBASER_SHARE_MASK); /* Mark as non-shareable */ xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; /* And non-cacheable */ xbaser |= GICR_PENDBASER_CACHE_NIN << GICR_PENDBASER_CACHE_SHIFT; } /* Enable LPIs */ ctlr = gic_r_read_4(gicv3, GICR_CTLR); ctlr |= GICR_CTLR_LPI_ENABLE; gic_r_write_4(gicv3, GICR_CTLR, ctlr); /* Make sure the GIC has seen everything */ dsb(sy); } else { KASSERT(sc->sc_pend_base[cpuid] == NULL, ("PREALLOC too soon cpuid %d", cpuid)); tmp = gic_r_read_8(gicv3, GICR_PENDBASER); tmp &= GICR_PENDBASER_PA_MASK; if (!physmem_excluded(tmp, LPI_PENDTAB_SIZE)) panic("gicv3 PENDBASER on cpu %d needs to reuse 0x%#lx, but not reserved\n", cpuid, tmp); sc->sc_pend_base[cpuid] = (void *)PHYS_TO_DMAP(tmp); } if (bootverbose) device_printf(gicv3, "using %sPENDBASE of %#lx on cpu %d\n", (sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) ? "pre-existing " : "", vtophys(sc->sc_pend_base[cpuid]), cpuid); } static int its_init_cpu(device_t dev, struct gicv3_its_softc *sc) { device_t gicv3; vm_paddr_t target; u_int cpuid; struct redist_pcpu *rpcpu; gicv3 = device_get_parent(dev); cpuid = PCPU_GET(cpuid); if (!CPU_ISSET(cpuid, &sc->sc_cpus)) return (0); /* Check if the ITS is enabled on this CPU */ if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) return (ENXIO); rpcpu = gicv3_get_redist(dev); /* Do per-cpu LPI init once */ if (!rpcpu->lpi_enabled) { its_init_cpu_lpi(dev, sc); rpcpu->lpi_enabled = true; } if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { /* This ITS wants the redistributor physical address */ target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) + rpcpu->offset); } else { /* This ITS wants the unique processor number */ target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << CMD_TARGET_SHIFT; } sc->sc_its_cols[cpuid]->col_target = target; sc->sc_its_cols[cpuid]->col_id = cpuid; its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); its_cmd_invall(dev, sc->sc_its_cols[cpuid]); return (0); } static int gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) { struct gicv3_its_softc *sc; int rv; sc = arg1; rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); if (rv != 0 || req->newptr == NULL) return (rv); if (sc->trace_enable) gic_its_write_8(sc, GITS_TRKCTLR, 3); else gic_its_write_8(sc, GITS_TRKCTLR, 0); return (0); } static int gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) { struct gicv3_its_softc *sc; struct sbuf *sb; int err; sc = arg1; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) { device_printf(sc->dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } sbuf_cat(sb, "\n"); sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKCTLR)); sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKR)); sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKDIDR)); sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKPIDR)); sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKVIDR)); sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", gic_its_read_4(sc, GITS_TRKTGTR)); err = sbuf_finish(sb); if (err) device_printf(sc->dev, "Error finishing sbuf: %d\n", err); sbuf_delete(sb); return(err); } static int gicv3_its_init_sysctl(struct gicv3_its_softc *sc) { struct sysctl_oid *oid, *child; struct sysctl_ctx_list *ctx_list; ctx_list = device_get_sysctl_ctx(sc->dev); child = device_get_sysctl_tree(sc->dev); oid = SYSCTL_ADD_NODE(ctx_list, SYSCTL_CHILDREN(child), OID_AUTO, "tracing", CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); if (oid == NULL) return (ENXIO); /* Add registers */ SYSCTL_ADD_PROC(ctx_list, SYSCTL_CHILDREN(oid), OID_AUTO, "enable", CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); SYSCTL_ADD_PROC(ctx_list, SYSCTL_CHILDREN(oid), OID_AUTO, "capture", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); return (0); } static int gicv3_its_attach(device_t dev) { struct gicv3_its_softc *sc; int domain, err, i, rid; uint64_t phys; uint32_t ctlr, iidr; sc = device_get_softc(dev); sc->sc_dev_table_idx = -1; sc->sc_irq_length = gicv3_get_nirqs(dev); sc->sc_irq_base = GIC_FIRST_LPI; sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; rid = 0; sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_its_res == NULL) { device_printf(dev, "Could not allocate memory\n"); return (ENXIO); } phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER, PAGE_SIZE); sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); CPU_COPY(&all_cpus, &sc->sc_cpus); iidr = gic_its_read_4(sc, GITS_IIDR); for (i = 0; i < nitems(its_quirks); i++) { if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { if (bootverbose) { device_printf(dev, "Applying %s\n", its_quirks[i].desc); } its_quirks[i].func(dev); break; } } if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { sc->sc_ds = DOMAINSET_PREF(domain); } else { sc->sc_ds = DOMAINSET_RR(); } /* * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be * coming in via, for instance, a kexec/kboot style setup where a * previous kernel has configured then relinquished control. Clear it * so that we can reconfigure GITS_BASER*. */ ctlr = gic_its_read_4(sc, GITS_CTLR); if ((ctlr & GITS_CTLR_EN) != 0) { ctlr &= ~GITS_CTLR_EN; gic_its_write_4(sc, GITS_CTLR, ctlr); } /* Allocate the private tables */ err = gicv3_its_table_init(dev, sc); if (err != 0) return (err); /* Protects access to the device list */ mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); /* Protects access to the ITS command circular buffer. */ mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); /* Allocate the command circular buffer */ gicv3_its_cmdq_init(sc); /* Allocate the per-CPU collections */ for (int cpu = 0; cpu <= mp_maxid; cpu++) if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) sc->sc_its_cols[cpu] = malloc_domainset( sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), M_WAITOK | M_ZERO); else sc->sc_its_cols[cpu] = NULL; /* Enable the ITS */ gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN); /* Create the LPI configuration table */ gicv3_its_conftable_init(sc); /* And the pending tebles */ gicv3_its_pendtables_init(sc); /* Enable LPIs on this CPU */ its_init_cpu(dev, sc); TAILQ_INIT(&sc->sc_its_dev_list); TAILQ_INIT(&sc->sc_free_irqs); /* * Create the vmem object to allocate INTRNG IRQs from. We try to * use all IRQs not already used by the GICv3. * XXX: This assumes there are no other interrupt controllers in the * system. */ sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, M_GICV3_ITS, M_WAITOK | M_ZERO); /* For GIC-500 install tracking sysctls. */ if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) gicv3_its_init_sysctl(sc); return (0); } static int gicv3_its_detach(device_t dev) { return (ENXIO); } static void its_quirk_cavium_22375(device_t dev) { struct gicv3_its_softc *sc; int domain; sc = device_get_softc(dev); sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; /* * We need to limit which CPUs we send these interrupts to on * the original dual socket ThunderX as it is unable to * forward them between the two sockets. */ if (bus_get_domain(dev, &domain) == 0) { if (domain < MAXMEMDOM) { CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); } else { CPU_ZERO(&sc->sc_cpus); } } } static void gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; uint8_t *conf; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; conf = sc->sc_conf_base; conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { /* Clean D-cache under command. */ cpu_dcache_wb_range(&conf[girq->gi_lpi], 1); } else { /* DSB inner shareable, store */ dsb(ishst); } its_cmd_inv(dev, girq->gi_its_dev, girq); } static void gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; uint8_t *conf; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; conf = sc->sc_conf_base; conf[girq->gi_lpi] |= LPI_CONF_ENABLE; if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { /* Clean D-cache under command. */ cpu_dcache_wb_range(&conf[girq->gi_lpi], 1); } else { /* DSB inner shareable, store */ dsb(ishst); } its_cmd_inv(dev, girq->gi_its_dev, girq); } static int gicv3_its_intr(void *arg, uintptr_t irq) { struct gicv3_its_softc *sc = arg; struct gicv3_its_irqsrc *girq; struct trapframe *tf; irq -= sc->sc_irq_base; girq = sc->sc_irqs[irq]; if (girq == NULL) panic("gicv3_its_intr: Invalid interrupt %ld", irq + sc->sc_irq_base); tf = curthread->td_intr_frame; intr_isrc_dispatch(&girq->gi_isrc, tf); return (FILTER_HANDLED); } static void gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; girq = (struct gicv3_its_irqsrc *)isrc; gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); } static void gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) { } static void gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; girq = (struct gicv3_its_irqsrc *)isrc; gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); } static int gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, &sc->sc_cpus); CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); } return (0); } static int gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gicv3_its_irqsrc *girq; gicv3_its_select_cpu(dev, isrc); girq = (struct gicv3_its_irqsrc *)isrc; its_cmd_movi(dev, girq); return (0); } static int gicv3_its_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { /* * This should never happen, we only call this function to map * interrupts found before the controller driver is ready. */ panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); } static int gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { /* Bind the interrupt to a CPU */ gicv3_its_bind_intr(dev, isrc); return (0); } #ifdef SMP static void -gicv3_its_init_secondary(device_t dev, enum root_type root_type) +gicv3_its_init_secondary(device_t dev, uint32_t rootnum) { struct gicv3_its_softc *sc; sc = device_get_softc(dev); /* * This is fatal as otherwise we may bind interrupts to this CPU. * We need a way to tell the interrupt framework to only bind to a * subset of given CPUs when it performs the shuffle. */ if (its_init_cpu(dev, sc) != 0) panic("gicv3_its_init_secondary: No usable ITS on CPU%d", PCPU_GET(cpuid)); } #endif static uint32_t its_get_devid(device_t pci_dev) { uintptr_t id; if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) panic("%s: %s: Unable to get the MSI DeviceID", __func__, device_get_nameunit(pci_dev)); return (id); } static struct its_dev * its_device_find(device_t dev, device_t child) { struct gicv3_its_softc *sc; struct its_dev *its_dev = NULL; sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { if (its_dev->pci_dev == child) break; } mtx_unlock_spin(&sc->sc_its_dev_lock); return (its_dev); } static bool its_device_alloc(struct gicv3_its_softc *sc, int devid) { struct its_ptable *ptable; void *l2_table; uint64_t *table; uint32_t index; bool shareable; /* No device table */ if (sc->sc_dev_table_idx < 0) { if (devid >= (1 << sc->sc_devbits)) { if (bootverbose) { device_printf(sc->dev, "%s: Device out of range for hardware " "(%x >= %x)\n", __func__, devid, 1 << sc->sc_devbits); } return (false); } return (true); } ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx]; /* Check the devid is within the table limit */ if (!ptable->ptab_indirect) { if (devid >= ptable->ptab_l1_nidents) { if (bootverbose) { device_printf(sc->dev, "%s: Device out of range for table " "(%x >= %x)\n", __func__, devid, ptable->ptab_l1_nidents); } return (false); } return (true); } /* Check the devid is within the allocated range */ index = devid / ptable->ptab_l2_nidents; if (index >= ptable->ptab_l1_nidents) { if (bootverbose) { device_printf(sc->dev, "%s: Index out of range for table (%x >= %x)\n", __func__, index, ptable->ptab_l1_nidents); } return (false); } table = (uint64_t *)ptable->ptab_vaddr; /* We have an second level table */ if ((table[index] & GITS_BASER_VALID) != 0) return (true); shareable = true; if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS) shareable = false; l2_table = contigmalloc_domainset(ptable->ptab_l2_size, M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ptable->ptab_page_size, 0); if (!shareable) cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size); table[index] = vtophys(l2_table) | GITS_BASER_VALID; if (!shareable) cpu_dcache_wb_range(&table[index], sizeof(table[index])); dsb(sy); return (true); } static struct its_dev * its_device_get(device_t dev, device_t child, u_int nvecs) { struct gicv3_its_softc *sc; struct its_dev *its_dev; vmem_addr_t irq_base; size_t esize, itt_size; sc = device_get_softc(dev); its_dev = its_device_find(dev, child); if (its_dev != NULL) return (its_dev); its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); if (its_dev == NULL) return (NULL); its_dev->pci_dev = child; its_dev->devid = its_get_devid(child); its_dev->lpis.lpi_busy = 0; its_dev->lpis.lpi_num = nvecs; its_dev->lpis.lpi_free = nvecs; if (!its_device_alloc(sc, its_dev->devid)) { free(its_dev, M_GICV3_ITS); return (NULL); } if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, &irq_base) != 0) { free(its_dev, M_GICV3_ITS); return (NULL); } its_dev->lpis.lpi_base = irq_base; /* Get ITT entry size */ esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); /* * Allocate ITT for this device. * PA has to be 256 B aligned. At least two entries for device. */ itt_size = roundup2(MAX(nvecs, 2) * esize, 256); its_dev->itt = contigmalloc_domainset(itt_size, M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); if (its_dev->itt == NULL) { vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); free(its_dev, M_GICV3_ITS); return (NULL); } /* Make sure device sees zeroed ITT. */ if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) cpu_dcache_wb_range(its_dev->itt, itt_size); mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); mtx_unlock_spin(&sc->sc_its_dev_lock); /* Map device to its ITT */ its_cmd_mapd(dev, its_dev, 1); return (its_dev); } static void its_device_release(device_t dev, struct its_dev *its_dev) { struct gicv3_its_softc *sc; KASSERT(its_dev->lpis.lpi_busy == 0, ("its_device_release: Trying to release an inuse ITS device")); /* Unmap device in ITS */ its_cmd_mapd(dev, its_dev, 0); sc = device_get_softc(dev); /* Remove the device from the list of devices */ mtx_lock_spin(&sc->sc_its_dev_lock); TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); mtx_unlock_spin(&sc->sc_its_dev_lock); /* Free ITT */ KASSERT(its_dev->itt != NULL, ("Invalid ITT in valid ITS device")); free(its_dev->itt, M_GICV3_ITS); /* Free the IRQ allocation */ vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, its_dev->lpis.lpi_num); free(its_dev, M_GICV3_ITS); } static struct gicv3_its_irqsrc * gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) { struct gicv3_its_irqsrc *girq = NULL; KASSERT(sc->sc_irqs[irq] == NULL, ("%s: Interrupt %u already allocated", __func__, irq)); mtx_lock_spin(&sc->sc_its_dev_lock); if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { girq = TAILQ_FIRST(&sc->sc_free_irqs); TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); } mtx_unlock_spin(&sc->sc_its_dev_lock); if (girq == NULL) { girq = malloc(sizeof(*girq), M_GICV3_ITS, M_NOWAIT | M_ZERO); if (girq == NULL) return (NULL); girq->gi_id = -1; if (intr_isrc_register(&girq->gi_isrc, dev, 0, "%s,%u", device_get_nameunit(dev), irq) != 0) { free(girq, M_GICV3_ITS); return (NULL); } } girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; sc->sc_irqs[irq] = girq; return (girq); } static void gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, struct gicv3_its_irqsrc *girq) { u_int irq; mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; sc->sc_irqs[irq] = NULL; girq->gi_id = -1; girq->gi_its_dev = NULL; TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); } static int gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, device_t *pic, struct intr_irqsrc **srcs) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; u_int irq; int i; its_dev = its_device_get(dev, child, count); if (its_dev == NULL) return (ENXIO); KASSERT(its_dev->lpis.lpi_free >= count, ("gicv3_its_alloc_msi: No free LPIs")); sc = device_get_softc(dev); irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - its_dev->lpis.lpi_free; /* Allocate the irqsrc for each MSI */ for (i = 0; i < count; i++, irq++) { its_dev->lpis.lpi_free--; srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, sc, irq); if (srcs[i] == NULL) break; } /* The allocation failed, release them */ if (i != count) { mtx_lock_spin(&sc->sc_its_dev_lock); for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)srcs[i]; if (girq == NULL) break; gicv3_its_release_irqsrc(sc, girq); srcs[i] = NULL; } mtx_unlock_spin(&sc->sc_its_dev_lock); return (ENXIO); } /* Finish the allocation now we have all MSI irqsrcs */ for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)srcs[i]; girq->gi_id = i; girq->gi_its_dev = its_dev; /* Map the message to the given IRQ */ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); its_cmd_mapti(dev, girq); } its_dev->lpis.lpi_busy += count; *pic = dev; return (0); } static int gicv3_its_release_msi(device_t dev, device_t child, int count, struct intr_irqsrc **isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; int i; its_dev = its_device_find(dev, child); KASSERT(its_dev != NULL, ("gicv3_its_release_msi: Releasing a MSI interrupt with " "no ITS device")); KASSERT(its_dev->lpis.lpi_busy >= count, ("gicv3_its_release_msi: Releasing more interrupts than " "were allocated: releasing %d, allocated %d", count, its_dev->lpis.lpi_busy)); sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_dev_lock); for (i = 0; i < count; i++) { girq = (struct gicv3_its_irqsrc *)isrc[i]; gicv3_its_release_irqsrc(sc, girq); } mtx_unlock_spin(&sc->sc_its_dev_lock); its_dev->lpis.lpi_busy -= count; if (its_dev->lpis.lpi_busy == 0) its_device_release(dev, its_dev); return (0); } static int gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, struct intr_irqsrc **isrcp) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; u_int nvecs, irq; nvecs = pci_msix_count(child); its_dev = its_device_get(dev, child, nvecs); if (its_dev == NULL) return (ENXIO); KASSERT(its_dev->lpis.lpi_free > 0, ("gicv3_its_alloc_msix: No free LPIs")); sc = device_get_softc(dev); irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - its_dev->lpis.lpi_free; girq = gicv3_its_alloc_irqsrc(dev, sc, irq); if (girq == NULL) return (ENXIO); girq->gi_id = its_dev->lpis.lpi_busy; girq->gi_its_dev = its_dev; its_dev->lpis.lpi_free--; its_dev->lpis.lpi_busy++; /* Map the message to the given IRQ */ gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); its_cmd_mapti(dev, girq); *pic = dev; *isrcp = (struct intr_irqsrc *)girq; return (0); } static int gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; struct its_dev *its_dev; its_dev = its_device_find(dev, child); KASSERT(its_dev != NULL, ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " "no ITS device")); KASSERT(its_dev->lpis.lpi_busy > 0, ("gicv3_its_release_msix: Releasing more interrupts than " "were allocated: allocated %d", its_dev->lpis.lpi_busy)); sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; mtx_lock_spin(&sc->sc_its_dev_lock); gicv3_its_release_irqsrc(sc, girq); mtx_unlock_spin(&sc->sc_its_dev_lock); its_dev->lpis.lpi_busy--; if (its_dev->lpis.lpi_busy == 0) its_device_release(dev, its_dev); return (0); } static int gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, uint64_t *addr, uint32_t *data) { struct gicv3_its_softc *sc; struct gicv3_its_irqsrc *girq; sc = device_get_softc(dev); girq = (struct gicv3_its_irqsrc *)isrc; *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; *data = girq->gi_id; return (0); } #ifdef IOMMU static int gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) { struct gicv3_its_softc *sc; struct iommu_ctx *ctx; int error; sc = device_get_softc(dev); ctx = iommu_get_dev_ctx(child); if (ctx == NULL) return (ENXIO); /* Map the page containing the GITS_TRANSLATER register. */ error = iommu_map_msi(ctx, PAGE_SIZE, 0, IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); *domain = iommu_get_ctx_domain(ctx); return (error); } static void gicv3_iommu_deinit(device_t dev, device_t child) { struct iommu_ctx *ctx; ctx = iommu_get_dev_ctx(child); if (ctx == NULL) return; iommu_unmap_msi(ctx); } #endif /* * Commands handling. */ static __inline void cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) { /* Command field: DW0 [7:0] */ cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); cmd->cmd_dword[0] |= htole64(cmd_type); } static __inline void cmd_format_devid(struct its_cmd *cmd, uint32_t devid) { /* Device ID field: DW0 [63:32] */ cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); } static __inline void cmd_format_size(struct its_cmd *cmd, uint16_t size) { /* Size field: DW1 [4:0] */ cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); } static __inline void cmd_format_id(struct its_cmd *cmd, uint32_t id) { /* ID field: DW1 [31:0] */ cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); cmd->cmd_dword[1] |= htole64(id); } static __inline void cmd_format_pid(struct its_cmd *cmd, uint32_t pid) { /* Physical ID field: DW1 [63:32] */ cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); } static __inline void cmd_format_col(struct its_cmd *cmd, uint16_t col_id) { /* Collection field: DW2 [16:0] */ cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); cmd->cmd_dword[2] |= htole64(col_id); } static __inline void cmd_format_target(struct its_cmd *cmd, uint64_t target) { /* Target Address field: DW2 [47:16] */ cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); } static __inline void cmd_format_itt(struct its_cmd *cmd, uint64_t itt) { /* ITT Address field: DW2 [47:8] */ cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); } static __inline void cmd_format_valid(struct its_cmd *cmd, uint8_t valid) { /* Valid field: DW2 [63] */ cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); } static inline bool its_cmd_queue_full(struct gicv3_its_softc *sc) { size_t read_idx, next_write_idx; /* Get the index of the next command */ next_write_idx = (sc->sc_its_cmd_next_idx + 1) % (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); /* And the index of the current command being read */ read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); /* * The queue is full when the write offset points * at the command before the current read offset. */ return (next_write_idx == read_idx); } static inline void its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) { if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { /* Clean D-cache under command. */ cpu_dcache_wb_range(cmd, sizeof(*cmd)); } else { /* DSB inner shareable, store */ dsb(ishst); } } static inline uint64_t its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) { uint64_t off; off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); return (off); } static void its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, struct its_cmd *cmd_last) { struct gicv3_its_softc *sc; uint64_t first, last, read; size_t us_left; sc = device_get_softc(dev); /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete are not known. */ us_left = 1000000; first = its_cmd_cwriter_offset(sc, cmd_first); last = its_cmd_cwriter_offset(sc, cmd_last); for (;;) { read = gic_its_read_8(sc, GITS_CREADR); if (first < last) { if (read < first || read >= last) break; } else if (read < first && read >= last) break; if (us_left-- == 0) { /* This means timeout */ device_printf(dev, "Timeout while waiting for CMD completion.\n"); return; } DELAY(1); } } static struct its_cmd * its_cmd_alloc_locked(device_t dev) { struct gicv3_its_softc *sc; struct its_cmd *cmd; size_t us_left; sc = device_get_softc(dev); /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete (and therefore free the descriptor) * are not known. */ us_left = 1000000; mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); while (its_cmd_queue_full(sc)) { if (us_left-- == 0) { /* Timeout while waiting for free command */ device_printf(dev, "Timeout while waiting for free command\n"); return (NULL); } DELAY(1); } cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; sc->sc_its_cmd_next_idx++; sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); return (cmd); } static uint64_t its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) { uint64_t target; uint8_t cmd_type; u_int size; cmd_type = desc->cmd_type; target = ITS_TARGET_NONE; switch (cmd_type) { case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ target = desc->cmd_desc_movi.col->col_target; cmd_format_command(cmd, ITS_CMD_MOVI); cmd_format_id(cmd, desc->cmd_desc_movi.id); cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); break; case ITS_CMD_SYNC: /* Wait for previous commands completion */ target = desc->cmd_desc_sync.col->col_target; cmd_format_command(cmd, ITS_CMD_SYNC); cmd_format_target(cmd, target); break; case ITS_CMD_MAPD: /* Assign ITT to device */ cmd_format_command(cmd, ITS_CMD_MAPD); cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); /* * Size describes number of bits to encode interrupt IDs * supported by the device minus one. * When V (valid) bit is zero, this field should be written * as zero. */ if (desc->cmd_desc_mapd.valid != 0) { size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); size = MAX(1, size) - 1; } else size = 0; cmd_format_size(cmd, size); cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); break; case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ target = desc->cmd_desc_mapc.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPC); cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); cmd_format_target(cmd, target); break; case ITS_CMD_MAPTI: target = desc->cmd_desc_mapvi.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPTI); cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapvi.id); cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); break; case ITS_CMD_MAPI: target = desc->cmd_desc_mapi.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPI); cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapi.pid); cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); break; case ITS_CMD_INV: target = desc->cmd_desc_inv.col->col_target; cmd_format_command(cmd, ITS_CMD_INV); cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_inv.pid); break; case ITS_CMD_INVALL: cmd_format_command(cmd, ITS_CMD_INVALL); cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); break; default: panic("its_cmd_prepare: Invalid command: %x", cmd_type); } return (target); } static int its_cmd_send(device_t dev, struct its_cmd_desc *desc) { struct gicv3_its_softc *sc; struct its_cmd *cmd, *cmd_sync, *cmd_write; struct its_col col_sync; struct its_cmd_desc desc_sync; uint64_t target, cwriter; sc = device_get_softc(dev); mtx_lock_spin(&sc->sc_its_cmd_lock); cmd = its_cmd_alloc_locked(dev); if (cmd == NULL) { device_printf(dev, "could not allocate ITS command\n"); mtx_unlock_spin(&sc->sc_its_cmd_lock); return (EBUSY); } target = its_cmd_prepare(cmd, desc); its_cmd_sync(sc, cmd); if (target != ITS_TARGET_NONE) { cmd_sync = its_cmd_alloc_locked(dev); if (cmd_sync != NULL) { desc_sync.cmd_type = ITS_CMD_SYNC; col_sync.col_target = target; desc_sync.cmd_desc_sync.col = &col_sync; its_cmd_prepare(cmd_sync, &desc_sync); its_cmd_sync(sc, cmd_sync); } } /* Update GITS_CWRITER */ cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); gic_its_write_8(sc, GITS_CWRITER, cwriter); cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; mtx_unlock_spin(&sc->sc_its_cmd_lock); its_cmd_wait_completion(dev, cmd, cmd_write); return (0); } /* Handlers to send commands */ static void its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; sc = device_get_softc(dev); col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; desc.cmd_type = ITS_CMD_MOVI; desc.cmd_desc_movi.its_dev = girq->gi_its_dev; desc.cmd_desc_movi.col = col; desc.cmd_desc_movi.id = girq->gi_id; its_cmd_send(dev, &desc); } static void its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPC; desc.cmd_desc_mapc.col = col; /* * Valid bit set - map the collection. * Valid bit cleared - unmap the collection. */ desc.cmd_desc_mapc.valid = valid; its_cmd_send(dev, &desc); } static void its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; u_int col_id; sc = device_get_softc(dev); col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; col = sc->sc_its_cols[col_id]; desc.cmd_type = ITS_CMD_MAPTI; desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; desc.cmd_desc_mapvi.col = col; /* The EventID sent to the device */ desc.cmd_desc_mapvi.id = girq->gi_id; /* The physical interrupt presented to softeware */ desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; its_cmd_send(dev, &desc); } static void its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPD; desc.cmd_desc_mapd.its_dev = its_dev; desc.cmd_desc_mapd.valid = valid; its_cmd_send(dev, &desc); } static void its_cmd_inv(device_t dev, struct its_dev *its_dev, struct gicv3_its_irqsrc *girq) { struct gicv3_its_softc *sc; struct its_cmd_desc desc; struct its_col *col; sc = device_get_softc(dev); col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; desc.cmd_type = ITS_CMD_INV; /* The EventID sent to the device */ desc.cmd_desc_inv.pid = girq->gi_id; desc.cmd_desc_inv.its_dev = its_dev; desc.cmd_desc_inv.col = col; its_cmd_send(dev, &desc); } static void its_cmd_invall(device_t dev, struct its_col *col) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_INVALL; desc.cmd_desc_invall.col = col; its_cmd_send(dev, &desc); } #ifdef FDT static device_probe_t gicv3_its_fdt_probe; static device_attach_t gicv3_its_fdt_attach; static device_method_t gicv3_its_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gicv3_its_fdt_probe), DEVMETHOD(device_attach, gicv3_its_fdt_attach), /* End */ DEVMETHOD_END }; #define its_baseclasses its_fdt_baseclasses DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, sizeof(struct gicv3_its_softc), gicv3_its_driver); #undef its_baseclasses EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gicv3_its_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) return (ENXIO); if (!gicv3_get_support_lpis(dev)) return (ENXIO); device_set_desc(dev, "ARM GIC Interrupt Translation Service"); return (BUS_PROBE_DEFAULT); } static int gicv3_its_fdt_attach(device_t dev) { struct gicv3_its_softc *sc; phandle_t xref; int err; sc = device_get_softc(dev); sc->dev = dev; err = gicv3_its_attach(dev); if (err != 0) return (err); /* Register this device as a interrupt controller */ xref = OF_xref_from_node(ofw_bus_get_node(dev)); sc->sc_pic = intr_pic_register(dev, xref); err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); if (err != 0) { device_printf(dev, "Failed to add PIC handler: %d\n", err); return (err); } /* Register this device to handle MSI interrupts */ err = intr_msi_register(dev, xref); if (err != 0) { device_printf(dev, "Failed to register for MSIs: %d\n", err); return (err); } return (0); } #endif #ifdef DEV_ACPI static device_probe_t gicv3_its_acpi_probe; static device_attach_t gicv3_its_acpi_attach; static device_method_t gicv3_its_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gicv3_its_acpi_probe), DEVMETHOD(device_attach, gicv3_its_acpi_attach), /* End */ DEVMETHOD_END }; #define its_baseclasses its_acpi_baseclasses DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, sizeof(struct gicv3_its_softc), gicv3_its_driver); #undef its_baseclasses EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gicv3_its_acpi_probe(device_t dev) { if (gic_get_bus(dev) != GIC_BUS_ACPI) return (EINVAL); if (gic_get_hw_rev(dev) < 3) return (EINVAL); if (!gicv3_get_support_lpis(dev)) return (ENXIO); device_set_desc(dev, "ARM GIC Interrupt Translation Service"); return (BUS_PROBE_DEFAULT); } static int gicv3_its_acpi_attach(device_t dev) { struct gicv3_its_softc *sc; struct gic_v3_devinfo *di; int err; sc = device_get_softc(dev); sc->dev = dev; err = gicv3_its_attach(dev); if (err != 0) return (err); di = device_get_ivars(dev); sc->sc_pic = intr_pic_register(dev, di->msi_xref); err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); if (err != 0) { device_printf(dev, "Failed to add PIC handler: %d\n", err); return (err); } /* Register this device to handle MSI interrupts */ err = intr_msi_register(dev, di->msi_xref); if (err != 0) { device_printf(dev, "Failed to register for MSIs: %d\n", err); return (err); } return (0); } #endif diff --git a/sys/arm64/include/intr.h b/sys/arm64/include/intr.h index c3fe5edc8a6c..38cba6ae8b0d 100644 --- a/sys/arm64/include/intr.h +++ b/sys/arm64/include/intr.h @@ -1,56 +1,52 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef FDT #include #endif -enum root_type { - INTR_ROOT_IRQ = 0, - INTR_ROOT_FIQ = 1, - - INTR_ROOT_COUNT /* MUST BE LAST */ -}; - #ifndef NIRQ #define NIRQ 16384 /* XXX - It should be an option. */ #endif static inline void arm_irq_memory_barrier(uintptr_t irq) { } #ifdef DEV_ACPI #define ACPI_INTR_XREF 1 #define ACPI_MSI_XREF 2 #define ACPI_GPIO_XREF 3 #endif +#define INTR_ROOT_FIQ 1 +#define INTR_ROOT_NUM 2 + #endif /* _MACHINE_INTR_H */ diff --git a/sys/kern/pic_if.m b/sys/kern/pic_if.m index 2d938520b106..f78e787594c5 100644 --- a/sys/kern/pic_if.m +++ b/sys/kern/pic_if.m @@ -1,174 +1,174 @@ #- # Copyright (c) 2015-2016 Svatopluk Kraus # Copyright (c) 2015-2016 Michal Meloun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # #include #include #include #include INTERFACE pic; CODE { static int dflt_pic_bind_intr(device_t dev, struct intr_irqsrc *isrc) { return (EOPNOTSUPP); } static int null_pic_activate_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { return (0); } static int null_pic_deactivate_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { return (0); } static int null_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { return (0); } static int null_pic_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { return (0); } static void - null_pic_init_secondary(device_t dev, enum root_type root_type) + null_pic_init_secondary(device_t dev, uint32_t rootnum) { } static void null_pic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi) { } static int dflt_pic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc *isrc) { return (EOPNOTSUPP); } }; METHOD int activate_intr { device_t dev; struct intr_irqsrc *isrc; struct resource *res; struct intr_map_data *data; } DEFAULT null_pic_activate_intr; METHOD int bind_intr { device_t dev; struct intr_irqsrc *isrc; } DEFAULT dflt_pic_bind_intr; METHOD void disable_intr { device_t dev; struct intr_irqsrc *isrc; }; METHOD void enable_intr { device_t dev; struct intr_irqsrc *isrc; }; METHOD int map_intr { device_t dev; struct intr_map_data *data; struct intr_irqsrc **isrcp; }; METHOD int deactivate_intr { device_t dev; struct intr_irqsrc *isrc; struct resource *res; struct intr_map_data *data; } DEFAULT null_pic_deactivate_intr; METHOD int setup_intr { device_t dev; struct intr_irqsrc *isrc; struct resource *res; struct intr_map_data *data; } DEFAULT null_pic_setup_intr; METHOD int teardown_intr { device_t dev; struct intr_irqsrc *isrc; struct resource *res; struct intr_map_data *data; } DEFAULT null_pic_teardown_intr; METHOD void post_filter { device_t dev; struct intr_irqsrc *isrc; }; METHOD void post_ithread { device_t dev; struct intr_irqsrc *isrc; }; METHOD void pre_ithread { device_t dev; struct intr_irqsrc *isrc; }; METHOD void init_secondary { device_t dev; - enum root_type root_type; + uint32_t rootnum; } DEFAULT null_pic_init_secondary; METHOD void ipi_send { device_t dev; struct intr_irqsrc *isrc; cpuset_t cpus; u_int ipi; } DEFAULT null_pic_ipi_send; METHOD int ipi_setup { device_t dev; u_int ipi; struct intr_irqsrc **isrcp; } DEFAULT dflt_pic_ipi_setup; diff --git a/sys/kern/subr_intr.c b/sys/kern/subr_intr.c index e88018e58729..b8d85bf20f28 100644 --- a/sys/kern/subr_intr.c +++ b/sys/kern/subr_intr.c @@ -1,1953 +1,1962 @@ /*- * Copyright (c) 2015-2016 Svatopluk Kraus * Copyright (c) 2015-2016 Michal Meloun * All rights reserved. * Copyright (c) 2015-2016 The FreeBSD Foundation * Copyright (c) 2021 Jessica Clarke * * Portions of this software were developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * New-style Interrupt Framework * * TODO: - add support for disconnected PICs. * - to support IPI (PPI) enabling on other CPUs if already started. * - to complete things for removable PICs. */ #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #ifdef DDB #include #endif #ifdef IOMMU #include #endif #include "pic_if.h" #include "msi_if.h" #define INTRNAME_LEN (2*MAXCOMLEN + 1) +/* + * Archs may define multiple roots with INTR_ROOT_NUM to support different kinds + * of interrupts (e.g. arm64 FIQs which use a different exception vector than + * IRQs). + */ +#if !defined(INTR_ROOT_NUM) +#define INTR_ROOT_NUM 1 +#endif + #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif MALLOC_DECLARE(M_INTRNG); MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); /* Root interrupt controller stuff. */ struct intr_irq_root { device_t dev; intr_irq_filter_t *filter; void *arg; }; -static struct intr_irq_root intr_irq_roots[INTR_ROOT_COUNT]; +static struct intr_irq_root intr_irq_roots[INTR_ROOT_NUM]; struct intr_pic_child { SLIST_ENTRY(intr_pic_child) pc_next; struct intr_pic *pc_pic; intr_child_irq_filter_t *pc_filter; void *pc_filter_arg; uintptr_t pc_start; uintptr_t pc_length; }; /* Interrupt controller definition. */ struct intr_pic { SLIST_ENTRY(intr_pic) pic_next; intptr_t pic_xref; /* hardware identification */ device_t pic_dev; /* Only one of FLAG_PIC or FLAG_MSI may be set */ #define FLAG_PIC (1 << 0) #define FLAG_MSI (1 << 1) #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI) u_int pic_flags; struct mtx pic_child_lock; SLIST_HEAD(, intr_pic_child) pic_children; }; #ifdef SMP #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t *ii_handler; void *ii_handler_arg; struct intr_irqsrc *ii_isrc; char ii_name[INTR_IPI_NAMELEN]; u_long *ii_count; }; static device_t intr_ipi_dev; static u_int intr_ipi_dev_priority; static bool intr_ipi_dev_frozen; #endif static struct mtx pic_list_lock; static SLIST_HEAD(, intr_pic) pic_list; static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, u_int flags); /* Interrupt source definition. */ static struct mtx isrc_table_lock; static struct intr_irqsrc **irq_sources; static u_int irq_next_free; #ifdef SMP #ifdef EARLY_AP_STARTUP static bool irq_assign_cpu = true; #else static bool irq_assign_cpu = false; #endif static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; #endif u_int intr_nirq = NIRQ; SYSCTL_UINT(_machdep, OID_AUTO, nirq, CTLFLAG_RDTUN, &intr_nirq, 0, "Number of IRQs"); /* Data for MI statistics reporting. */ u_long *intrcnt; char *intrnames; size_t sintrcnt; size_t sintrnames; int nintrcnt; static bitstr_t *intrcnt_bitmap; static struct intr_irqsrc *intr_map_get_isrc(u_int res_id); static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc); static struct intr_map_data * intr_map_get_map_data(u_int res_id); static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref, struct intr_map_data **data); /* * Interrupt framework initialization routine. */ static void intr_irq_init(void *dummy __unused) { SLIST_INIT(&pic_list); mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); /* * - 2 counters for each I/O interrupt. * - mp_maxid + 1 counters for each IPI counters for SMP. */ nintrcnt = intr_nirq * 2; #ifdef SMP nintrcnt += INTR_IPI_COUNT * (mp_maxid + 1); #endif intrcnt = mallocarray(nintrcnt, sizeof(u_long), M_INTRNG, M_WAITOK | M_ZERO); intrnames = mallocarray(nintrcnt, INTRNAME_LEN, M_INTRNG, M_WAITOK | M_ZERO); sintrcnt = nintrcnt * sizeof(u_long); sintrnames = nintrcnt * INTRNAME_LEN; /* Allocate the bitmap tracking counter allocations. */ intrcnt_bitmap = bit_alloc(nintrcnt, M_INTRNG, M_WAITOK | M_ZERO); irq_sources = mallocarray(intr_nirq, sizeof(struct intr_irqsrc*), M_INTRNG, M_WAITOK | M_ZERO); } SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", INTRNAME_LEN - 1, name); } /* * Update name for interrupt source with interrupt event. */ static void intrcnt_updatename(struct intr_irqsrc *isrc) { /* QQQ: What about stray counter name? */ mtx_assert(&isrc_table_lock, MA_OWNED); intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); } /* * Virtualization for interrupt source interrupt counter increment. */ static inline void isrc_increment_count(struct intr_irqsrc *isrc) { if (isrc->isrc_flags & INTR_ISRCF_PPI) atomic_add_long(&isrc->isrc_count[0], 1); else isrc->isrc_count[0]++; } /* * Virtualization for interrupt source interrupt stray counter increment. */ static inline void isrc_increment_straycount(struct intr_irqsrc *isrc) { isrc->isrc_count[1]++; } /* * Virtualization for interrupt source interrupt name update. */ static void isrc_update_name(struct intr_irqsrc *isrc, const char *name) { char str[INTRNAME_LEN]; mtx_assert(&isrc_table_lock, MA_OWNED); if (name != NULL) { snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, name); intrcnt_setname(str, isrc->isrc_index + 1); } else { snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index); snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); intrcnt_setname(str, isrc->isrc_index + 1); } } /* * Virtualization for interrupt source interrupt counters setup. */ static void isrc_setup_counters(struct intr_irqsrc *isrc) { int index; mtx_assert(&isrc_table_lock, MA_OWNED); /* * Allocate two counter values, the second tracking "stray" interrupts. */ bit_ffc_area(intrcnt_bitmap, nintrcnt, 2, &index); if (index == -1) panic("Failed to allocate 2 counters. Array exhausted?"); bit_nset(intrcnt_bitmap, index, index + 1); isrc->isrc_index = index; isrc->isrc_count = &intrcnt[index]; isrc_update_name(isrc, NULL); } /* * Virtualization for interrupt source interrupt counters release. */ static void isrc_release_counters(struct intr_irqsrc *isrc) { int idx = isrc->isrc_index; mtx_assert(&isrc_table_lock, MA_OWNED); bit_nclear(intrcnt_bitmap, idx, idx + 1); } /* * Main interrupt dispatch handler. It's called straight * from the assembler, where CPU interrupt is served. */ void -intr_irq_handler(struct trapframe *tf, u_register_t root_type) +intr_irq_handler(struct trapframe *tf, uint32_t rootnum) { struct trapframe * oldframe; struct thread * td; struct intr_irq_root *root; - KASSERT((uintmax_t)root_type < INTR_ROOT_COUNT, - ("%s: invalid interrupt root %ju", __func__, (uintmax_t)root_type)); + KASSERT(rootnum < INTR_ROOT_NUM, + ("%s: invalid interrupt root %d", __func__, rootnum)); - root = &intr_irq_roots[root_type]; + root = &intr_irq_roots[rootnum]; KASSERT(root->filter != NULL, ("%s: no filter", __func__)); kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0); kmsan_mark(tf, sizeof(*tf), KMSAN_STATE_INITED); VM_CNT_INC(v_intr); critical_enter(); td = curthread; oldframe = td->td_intr_frame; td->td_intr_frame = tf; (root->filter)(root->arg); td->td_intr_frame = oldframe; critical_exit(); #ifdef HWPMC_HOOKS if (pmc_hook && TRAPF_USERMODE(tf) && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); #endif } int intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) { struct intr_pic_child *child; bool found; found = false; mtx_lock_spin(&parent->pic_child_lock); SLIST_FOREACH(child, &parent->pic_children, pc_next) { if (child->pc_start <= irq && irq < (child->pc_start + child->pc_length)) { found = true; break; } } mtx_unlock_spin(&parent->pic_child_lock); if (found) return (child->pc_filter(child->pc_filter_arg, irq)); return (FILTER_STRAY); } /* * interrupt controller dispatch function for interrupts. It should * be called straight from the interrupt controller, when associated interrupt * source is learned. */ int intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) { KASSERT(isrc != NULL, ("%s: no source", __func__)); if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_increment_count(isrc); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { int error; error = isrc->isrc_filter(isrc->isrc_arg, tf); PIC_POST_FILTER(isrc->isrc_dev, isrc); if (error == FILTER_HANDLED) return (0); } else #endif if (isrc->isrc_event != NULL) { if (intr_event_handle(isrc->isrc_event, tf) == 0) return (0); } if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_increment_straycount(isrc); return (EINVAL); } /* * Alloc unique interrupt number (resource handle) for interrupt source. * * There could be various strategies how to allocate free interrupt number * (resource handle) for new interrupt source. * * 1. Handles are always allocated forward, so handles are not recycled * immediately. However, if only one free handle left which is reused * constantly... */ static inline int isrc_alloc_irq(struct intr_irqsrc *isrc) { u_int irq; mtx_assert(&isrc_table_lock, MA_OWNED); if (irq_next_free >= intr_nirq) return (ENOSPC); for (irq = irq_next_free; irq < intr_nirq; irq++) { if (irq_sources[irq] == NULL) goto found; } for (irq = 0; irq < irq_next_free; irq++) { if (irq_sources[irq] == NULL) goto found; } irq_next_free = intr_nirq; return (ENOSPC); found: isrc->isrc_irq = irq; irq_sources[irq] = isrc; irq_next_free = irq + 1; if (irq_next_free >= intr_nirq) irq_next_free = 0; return (0); } /* * Free unique interrupt number (resource handle) from interrupt source. */ static inline int isrc_free_irq(struct intr_irqsrc *isrc) { mtx_assert(&isrc_table_lock, MA_OWNED); if (isrc->isrc_irq >= intr_nirq) return (EINVAL); if (irq_sources[isrc->isrc_irq] != isrc) return (EINVAL); irq_sources[isrc->isrc_irq] = NULL; isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ /* * If we are recovering from the state irq_sources table is full, * then the following allocation should check the entire table. This * will ensure maximum separation of allocation order from release * order. */ if (irq_next_free >= intr_nirq) irq_next_free = 0; return (0); } device_t -intr_irq_root_device(enum root_type root_type) +intr_irq_root_device(uint32_t rootnum) { - KASSERT((uintmax_t)root_type < INTR_ROOT_COUNT, - ("%s: invalid interrupt root %ju", __func__, (uintmax_t)root_type)); - return (intr_irq_roots[root_type].dev); + KASSERT(rootnum < INTR_ROOT_NUM, + ("%s: invalid interrupt root %d", __func__, rootnum)); + return (intr_irq_roots[rootnum].dev); } /* * Initialize interrupt source and register it into global interrupt table. */ int intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, const char *fmt, ...) { int error; va_list ap; bzero(isrc, sizeof(struct intr_irqsrc)); isrc->isrc_dev = dev; isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ isrc->isrc_flags = flags; va_start(ap, fmt); vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); va_end(ap); mtx_lock(&isrc_table_lock); error = isrc_alloc_irq(isrc); if (error != 0) { mtx_unlock(&isrc_table_lock); return (error); } /* * Setup interrupt counters, but not for IPI sources. Those are setup * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust * our counter pool. */ if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_setup_counters(isrc); mtx_unlock(&isrc_table_lock); return (0); } /* * Deregister interrupt source from global interrupt table. */ int intr_isrc_deregister(struct intr_irqsrc *isrc) { int error; mtx_lock(&isrc_table_lock); if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) isrc_release_counters(isrc); error = isrc_free_irq(isrc); mtx_unlock(&isrc_table_lock); return (error); } #ifdef SMP /* * A support function for a PIC to decide if provided ISRC should be inited * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of * struct intr_irqsrc is the following: * * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus * set in isrc_cpu. If not, the ISRC should be inited on every cpu and * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. */ bool intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) { if (isrc->isrc_handlers == 0) return (false); if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) return (false); if (isrc->isrc_flags & INTR_ISRCF_BOUND) return (CPU_ISSET(cpu, &isrc->isrc_cpu)); CPU_SET(cpu, &isrc->isrc_cpu); return (true); } #endif #ifdef INTR_SOLO /* * Setup filter into interrupt source. */ static int iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, intr_irq_filter_t *filter, void *arg, void **cookiep) { if (filter == NULL) return (EINVAL); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. */ if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { mtx_unlock(&isrc_table_lock); return (EBUSY); } isrc->isrc_filter = filter; isrc->isrc_arg = arg; isrc_update_name(isrc, name); mtx_unlock(&isrc_table_lock); *cookiep = isrc; return (0); } #endif /* * Interrupt source pre_ithread method for MI interrupt framework. */ static void intr_isrc_pre_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_ithread method for MI interrupt framework. */ static void intr_isrc_post_ithread(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_ITHREAD(isrc->isrc_dev, isrc); } /* * Interrupt source post_filter method for MI interrupt framework. */ static void intr_isrc_post_filter(void *arg) { struct intr_irqsrc *isrc = arg; PIC_POST_FILTER(isrc->isrc_dev, isrc); } /* * Interrupt source assign_cpu method for MI interrupt framework. */ static int intr_isrc_assign_cpu(void *arg, int cpu) { #ifdef SMP struct intr_irqsrc *isrc = arg; int error; mtx_lock(&isrc_table_lock); if (cpu == NOCPU) { CPU_ZERO(&isrc->isrc_cpu); isrc->isrc_flags &= ~INTR_ISRCF_BOUND; } else { CPU_SETOF(cpu, &isrc->isrc_cpu); isrc->isrc_flags |= INTR_ISRCF_BOUND; } /* * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or * re-balance it to another CPU or enable it on more CPUs. However, * PIC is expected to change isrc_cpu appropriately to keep us well * informed if the call is successful. */ if (irq_assign_cpu) { error = PIC_BIND_INTR(isrc->isrc_dev, isrc); if (error) { CPU_ZERO(&isrc->isrc_cpu); mtx_unlock(&isrc_table_lock); return (error); } } mtx_unlock(&isrc_table_lock); return (0); #else return (EOPNOTSUPP); #endif } /* * Create interrupt event for interrupt source. */ static int isrc_event_create(struct intr_irqsrc *isrc) { struct intr_event *ie; int error; error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, intr_isrc_assign_cpu, "%s:", isrc->isrc_name); if (error) return (error); mtx_lock(&isrc_table_lock); /* * Make sure that we do not mix the two ways * how we handle interrupt sources. Let contested event wins. */ #ifdef INTR_SOLO if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { #else if (isrc->isrc_event != NULL) { #endif mtx_unlock(&isrc_table_lock); intr_event_destroy(ie); return (isrc->isrc_event != NULL ? EBUSY : 0); } isrc->isrc_event = ie; mtx_unlock(&isrc_table_lock); return (0); } #ifdef notyet /* * Destroy interrupt event for interrupt source. */ static void isrc_event_destroy(struct intr_irqsrc *isrc) { struct intr_event *ie; mtx_lock(&isrc_table_lock); ie = isrc->isrc_event; isrc->isrc_event = NULL; mtx_unlock(&isrc_table_lock); if (ie != NULL) intr_event_destroy(ie); } #endif /* * Add handler to interrupt source. */ static int isrc_add_handler(struct intr_irqsrc *isrc, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { int error; if (isrc->isrc_event == NULL) { error = isrc_event_create(isrc); if (error) return (error); } error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } /* * Lookup interrupt controller locked. */ static inline struct intr_pic * pic_lookup_locked(device_t dev, intptr_t xref, u_int flags) { struct intr_pic *pic; mtx_assert(&pic_list_lock, MA_OWNED); if (dev == NULL && xref == 0) return (NULL); /* Note that pic->pic_dev is never NULL on registered PIC. */ SLIST_FOREACH(pic, &pic_list, pic_next) { if ((pic->pic_flags & FLAG_TYPE_MASK) != (flags & FLAG_TYPE_MASK)) continue; if (dev == NULL) { if (xref == pic->pic_xref) return (pic); } else if (xref == 0 || pic->pic_xref == 0) { if (dev == pic->pic_dev) return (pic); } else if (xref == pic->pic_xref && dev == pic->pic_dev) return (pic); } return (NULL); } /* * Lookup interrupt controller. */ static struct intr_pic * pic_lookup(device_t dev, intptr_t xref, u_int flags) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref, flags); mtx_unlock(&pic_list_lock); return (pic); } /* * Create interrupt controller. */ static struct intr_pic * pic_create(device_t dev, intptr_t xref, u_int flags) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref, flags); if (pic != NULL) { mtx_unlock(&pic_list_lock); return (pic); } pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); if (pic == NULL) { mtx_unlock(&pic_list_lock); return (NULL); } pic->pic_xref = xref; pic->pic_dev = dev; pic->pic_flags = flags; mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); SLIST_INSERT_HEAD(&pic_list, pic, pic_next); mtx_unlock(&pic_list_lock); return (pic); } #ifdef notyet /* * Destroy interrupt controller. */ static void pic_destroy(device_t dev, intptr_t xref, u_int flags) { struct intr_pic *pic; mtx_lock(&pic_list_lock); pic = pic_lookup_locked(dev, xref, flags); if (pic == NULL) { mtx_unlock(&pic_list_lock); return; } SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); mtx_unlock(&pic_list_lock); free(pic, M_INTRNG); } #endif /* * Register interrupt controller. */ struct intr_pic * intr_pic_register(device_t dev, intptr_t xref) { struct intr_pic *pic; if (dev == NULL) return (NULL); pic = pic_create(dev, xref, FLAG_PIC); if (pic == NULL) return (NULL); debugf("PIC %p registered for %s \n", pic, device_get_nameunit(dev), dev, (uintmax_t)xref); return (pic); } /* * Unregister interrupt controller. */ int intr_pic_deregister(device_t dev, intptr_t xref) { panic("%s: not implemented", __func__); } /* * Mark interrupt controller (itself) as a root one. * * Note that only an interrupt controller can really know its position * in interrupt controller's tree. So root PIC must claim itself as a root. * * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, * page 30: * "The root of the interrupt tree is determined when traversal * of the interrupt tree reaches an interrupt controller node without * an interrupts property and thus no explicit interrupt parent." */ int intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, - void *arg, enum root_type root_type) + void *arg, uint32_t rootnum) { struct intr_pic *pic; struct intr_irq_root *root; pic = pic_lookup(dev, xref, FLAG_PIC); if (pic == NULL) { device_printf(dev, "not registered\n"); return (EINVAL); } KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, ("%s: Found a non-PIC controller: %s", __func__, device_get_name(pic->pic_dev))); if (filter == NULL) { device_printf(dev, "filter missing\n"); return (EINVAL); } /* * Only one interrupt controllers could be on the root for now. * Note that we further suppose that there is not threaded interrupt * routine (handler) on the root. See intr_irq_handler(). */ - KASSERT((uintmax_t)root_type < INTR_ROOT_COUNT, - ("%s: invalid interrupt root %ju", __func__, (uintmax_t)root_type)); - root = &intr_irq_roots[root_type]; + KASSERT(rootnum < INTR_ROOT_NUM, + ("%s: invalid interrupt root %d", __func__, rootnum)); + root = &intr_irq_roots[rootnum]; if (root->dev != NULL) { device_printf(dev, "another root already set\n"); return (EBUSY); } root->dev = dev; root->filter = filter; root->arg = arg; debugf("irq root set to %s\n", device_get_nameunit(dev)); return (0); } /* * Add a handler to manage a sub range of a parents interrupts. */ int intr_pic_add_handler(device_t parent, struct intr_pic *pic, intr_child_irq_filter_t *filter, void *arg, uintptr_t start, uintptr_t length) { struct intr_pic *parent_pic; struct intr_pic_child *newchild; #ifdef INVARIANTS struct intr_pic_child *child; #endif /* Find the parent PIC */ parent_pic = pic_lookup(parent, 0, FLAG_PIC); if (parent_pic == NULL) return (ENXIO); newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); newchild->pc_pic = pic; newchild->pc_filter = filter; newchild->pc_filter_arg = arg; newchild->pc_start = start; newchild->pc_length = length; mtx_lock_spin(&parent_pic->pic_child_lock); #ifdef INVARIANTS SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", __func__)); } #endif SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); mtx_unlock_spin(&parent_pic->pic_child_lock); return (0); } static int intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data, struct intr_irqsrc **isrc) { struct intr_pic *pic; struct intr_map_data_msi *msi; if (data == NULL) return (EINVAL); pic = pic_lookup(dev, xref, (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC); if (pic == NULL) return (ESRCH); switch (data->type) { case INTR_MAP_DATA_MSI: KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); msi = (struct intr_map_data_msi *)data; *isrc = msi->isrc; return (0); default: KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, ("%s: Found a non-PIC controller: %s", __func__, device_get_name(pic->pic_dev))); return (PIC_MAP_INTR(pic->pic_dev, data, isrc)); } } bool intr_is_per_cpu(struct resource *res) { u_int res_id; struct intr_irqsrc *isrc; res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL) panic("Attempt to get isrc for non-active resource id: %u\n", res_id); return ((isrc->isrc_flags & INTR_ISRCF_PPI) != 0); } int intr_activate_irq(device_t dev, struct resource *res) { device_t map_dev; intptr_t map_xref; struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; int error; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); if (intr_map_get_isrc(res_id) != NULL) panic("Attempt to double activation of resource id: %u\n", res_id); intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data); error = intr_resolve_irq(map_dev, map_xref, data, &isrc); if (error != 0) { free(data, M_INTRNG); /* XXX TODO DISCONECTED PICs */ /* if (error == EINVAL) return(0); */ return (error); } intr_map_set_isrc(res_id, isrc); rman_set_virtual(res, data); return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data)); } int intr_deactivate_irq(device_t dev, struct resource *res) { struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; int error; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL) panic("Attempt to deactivate non-active resource id: %u\n", res_id); data = rman_get_virtual(res); error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data); intr_map_set_isrc(res_id, NULL); rman_set_virtual(res, NULL); free(data, M_INTRNG); return (error); } int intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, driver_intr_t hand, void *arg, int flags, void **cookiep) { int error; struct intr_map_data *data; struct intr_irqsrc *isrc; const char *name; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL) { /* XXX TODO DISCONECTED PICs */ return (EINVAL); } data = rman_get_virtual(res); name = device_get_nameunit(dev); #ifdef INTR_SOLO /* * Standard handling is done through MI interrupt framework. However, * some interrupts could request solely own special handling. This * non standard handling can be used for interrupt controllers without * handler (filter only), so in case that interrupt controllers are * chained, MI interrupt framework is called only in leaf controller. * * Note that root interrupt controller routine is served as well, * however in intr_irq_handler(), i.e. main system dispatch routine. */ if (flags & INTR_SOLO && hand != NULL) { debugf("irq %u cannot solo on %s\n", irq, name); return (EINVAL); } if (flags & INTR_SOLO) { error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, arg, cookiep); debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error, name); } else #endif { error = isrc_add_handler(isrc, name, filt, hand, arg, flags, cookiep); debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name); } if (error != 0) return (error); mtx_lock(&isrc_table_lock); error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); if (error == 0) { isrc->isrc_handlers++; if (isrc->isrc_handlers == 1) PIC_ENABLE_INTR(isrc->isrc_dev, isrc); } mtx_unlock(&isrc_table_lock); if (error != 0) intr_event_remove_handler(*cookiep); return (error); } int intr_teardown_irq(device_t dev, struct resource *res, void *cookie) { int error; struct intr_map_data *data; struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); data = rman_get_virtual(res); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc->isrc_filter = NULL; isrc->isrc_arg = NULL; isrc->isrc_handlers = 0; PIC_DISABLE_INTR(isrc->isrc_dev, isrc); PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); isrc_update_name(isrc, NULL); mtx_unlock(&isrc_table_lock); return (0); } #endif if (isrc != intr_handler_source(cookie)) return (EINVAL); error = intr_event_remove_handler(cookie); if (error == 0) { mtx_lock(&isrc_table_lock); isrc->isrc_handlers--; if (isrc->isrc_handlers == 0) PIC_DISABLE_INTR(isrc->isrc_dev, isrc); PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } int intr_describe_irq(device_t dev, struct resource *res, void *cookie, const char *descr) { int error; struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) { if (isrc != cookie) return (EINVAL); mtx_lock(&isrc_table_lock); isrc_update_name(isrc, descr); mtx_unlock(&isrc_table_lock); return (0); } #endif error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); if (error == 0) { mtx_lock(&isrc_table_lock); intrcnt_updatename(isrc); mtx_unlock(&isrc_table_lock); } return (error); } #ifdef SMP int intr_bind_irq(device_t dev, struct resource *res, int cpu) { struct intr_irqsrc *isrc; u_int res_id; KASSERT(rman_get_start(res) == rman_get_end(res), ("%s: more interrupts in resource", __func__)); res_id = (u_int)rman_get_start(res); isrc = intr_map_get_isrc(res_id); if (isrc == NULL || isrc->isrc_handlers == 0) return (EINVAL); #ifdef INTR_SOLO if (isrc->isrc_filter != NULL) return (intr_isrc_assign_cpu(isrc, cpu)); #endif return (intr_event_bind(isrc->isrc_event, cpu)); } /* * Return the CPU that the next interrupt source should use. * For now just returns the next CPU according to round-robin. */ u_int intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) { u_int cpu; KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__)); if (!irq_assign_cpu || mp_ncpus == 1) { cpu = PCPU_GET(cpuid); if (CPU_ISSET(cpu, cpumask)) return (curcpu); return (CPU_FFS(cpumask) - 1); } do { last_cpu++; if (last_cpu > mp_maxid) last_cpu = 0; } while (!CPU_ISSET(last_cpu, cpumask)); return (last_cpu); } #ifndef EARLY_AP_STARTUP /* * Distribute all the interrupt sources among the available * CPUs once the AP's have been launched. */ static void intr_irq_shuffle(void *arg __unused) { struct intr_irqsrc *isrc; u_int i; if (mp_ncpus == 1) return; mtx_lock(&isrc_table_lock); irq_assign_cpu = true; for (i = 0; i < intr_nirq; i++) { isrc = irq_sources[i]; if (isrc == NULL || isrc->isrc_handlers == 0 || isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) continue; if (isrc->isrc_event != NULL && isrc->isrc_flags & INTR_ISRCF_BOUND && isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) panic("%s: CPU inconsistency", __func__); if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) CPU_ZERO(&isrc->isrc_cpu); /* start again */ /* * We are in wicked position here if the following call fails * for bound ISRC. The best thing we can do is to clear * isrc_cpu so inconsistency with ie_cpu will be detectable. */ if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) CPU_ZERO(&isrc->isrc_cpu); } mtx_unlock(&isrc_table_lock); } SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); #endif /* !EARLY_AP_STARTUP */ #else u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) { return (PCPU_GET(cpuid)); } #endif /* SMP */ /* * Allocate memory for new intr_map_data structure. * Initialize common fields. */ struct intr_map_data * intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags) { struct intr_map_data *data; data = malloc(len, M_INTRNG, flags); data->type = type; data->len = len; return (data); } void intr_free_intr_map_data(struct intr_map_data *data) { free(data, M_INTRNG); } /* * Register a MSI/MSI-X interrupt controller */ int intr_msi_register(device_t dev, intptr_t xref) { struct intr_pic *pic; if (dev == NULL) return (EINVAL); pic = pic_create(dev, xref, FLAG_MSI); if (pic == NULL) return (ENOMEM); debugf("PIC %p registered for %s \n", pic, device_get_nameunit(dev), dev, (uintmax_t)xref); return (0); } int intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, int maxcount, int *irqs) { struct iommu_domain *domain; struct intr_irqsrc **isrc; struct intr_pic *pic; device_t pdev; struct intr_map_data_msi *msi; int err, i; pic = pic_lookup(NULL, xref, FLAG_MSI); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); /* * If this is the first time we have used this context ask the * interrupt controller to map memory the msi source will need. */ err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain); if (err != 0) return (err); isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); if (err != 0) { free(isrc, M_INTRNG); return (err); } for (i = 0; i < count; i++) { isrc[i]->isrc_iommu = domain; msi = (struct intr_map_data_msi *)intr_alloc_map_data( INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); msi-> isrc = isrc[i]; irqs[i] = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); } free(isrc, M_INTRNG); return (err); } int intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, int *irqs) { struct intr_irqsrc **isrc; struct intr_pic *pic; struct intr_map_data_msi *msi; int i, err; pic = pic_lookup(NULL, xref, FLAG_MSI); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); for (i = 0; i < count; i++) { msi = (struct intr_map_data_msi *) intr_map_get_map_data(irqs[i]); KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, ("%s: irq %d map data is not MSI", __func__, irqs[i])); isrc[i] = msi->isrc; } MSI_IOMMU_DEINIT(pic->pic_dev, child); err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); for (i = 0; i < count; i++) { if (isrc[i] != NULL) intr_unmap_irq(irqs[i]); } free(isrc, M_INTRNG); return (err); } int intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) { struct iommu_domain *domain; struct intr_irqsrc *isrc; struct intr_pic *pic; device_t pdev; struct intr_map_data_msi *msi; int err; pic = pic_lookup(NULL, xref, FLAG_MSI); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); /* * If this is the first time we have used this context ask the * interrupt controller to map memory the msi source will need. */ err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain); if (err != 0) return (err); err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); if (err != 0) return (err); isrc->isrc_iommu = domain; msi = (struct intr_map_data_msi *)intr_alloc_map_data( INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); msi->isrc = isrc; *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); return (0); } int intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) { struct intr_irqsrc *isrc; struct intr_pic *pic; struct intr_map_data_msi *msi; int err; pic = pic_lookup(NULL, xref, FLAG_MSI); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); msi = (struct intr_map_data_msi *) intr_map_get_map_data(irq); KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, ("%s: irq %d map data is not MSI", __func__, irq)); isrc = msi->isrc; if (isrc == NULL) { intr_unmap_irq(irq); return (EINVAL); } MSI_IOMMU_DEINIT(pic->pic_dev, child); err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); intr_unmap_irq(irq); return (err); } int intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, uint64_t *addr, uint32_t *data) { struct intr_irqsrc *isrc; struct intr_pic *pic; int err; pic = pic_lookup(NULL, xref, FLAG_MSI); if (pic == NULL) return (ESRCH); KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, ("%s: Found a non-MSI controller: %s", __func__, device_get_name(pic->pic_dev))); isrc = intr_map_get_isrc(irq); if (isrc == NULL) return (EINVAL); err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); #ifdef IOMMU if (isrc->isrc_iommu != NULL) iommu_translate_msi(isrc->isrc_iommu, addr); #endif return (err); } void dosoftints(void); void dosoftints(void) { } #ifdef SMP /* * Init interrupt controller on another CPU. */ void intr_pic_init_secondary(void) { device_t dev; - enum root_type root_type; + uint32_t rootnum; /* * QQQ: Only root PICs are aware of other CPUs ??? */ //mtx_lock(&isrc_table_lock); - for (root_type = 0; root_type < INTR_ROOT_COUNT; root_type++) { - dev = intr_irq_roots[root_type].dev; + for (rootnum = 0; rootnum < INTR_ROOT_NUM; rootnum++) { + dev = intr_irq_roots[rootnum].dev; if (dev != NULL) { - PIC_INIT_SECONDARY(dev, root_type); + PIC_INIT_SECONDARY(dev, rootnum); } } //mtx_unlock(&isrc_table_lock); } #endif #ifdef DDB DB_SHOW_COMMAND_FLAGS(irqs, db_show_irqs, DB_CMD_MEMSAFE) { u_int i, irqsum; u_long num; struct intr_irqsrc *isrc; for (irqsum = 0, i = 0; i < intr_nirq; i++) { isrc = irq_sources[i]; if (isrc == NULL) continue; num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, isrc->isrc_name, isrc->isrc_cpu.__bits[0], isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); irqsum += num; } db_printf("irq total %u\n", irqsum); } #endif /* * Interrupt mapping table functions. * * Please, keep this part separately, it can be transformed to * extension of standard resources. */ struct intr_map_entry { device_t dev; intptr_t xref; struct intr_map_data *map_data; struct intr_irqsrc *isrc; /* XXX TODO DISCONECTED PICs */ /*int flags */ }; /* XXX Convert irq_map[] to dynamicaly expandable one. */ static struct intr_map_entry **irq_map; static u_int irq_map_count; static u_int irq_map_first_free_idx; static struct mtx irq_map_lock; static struct intr_irqsrc * intr_map_get_isrc(u_int res_id) { struct intr_irqsrc *isrc; isrc = NULL; mtx_lock(&irq_map_lock); if (res_id < irq_map_count && irq_map[res_id] != NULL) isrc = irq_map[res_id]->isrc; mtx_unlock(&irq_map_lock); return (isrc); } static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc) { mtx_lock(&irq_map_lock); if (res_id < irq_map_count && irq_map[res_id] != NULL) irq_map[res_id]->isrc = isrc; mtx_unlock(&irq_map_lock); } /* * Get a copy of intr_map_entry data */ static struct intr_map_data * intr_map_get_map_data(u_int res_id) { struct intr_map_data *data; data = NULL; mtx_lock(&irq_map_lock); if (res_id >= irq_map_count || irq_map[res_id] == NULL) panic("Attempt to copy invalid resource id: %u\n", res_id); data = irq_map[res_id]->map_data; mtx_unlock(&irq_map_lock); return (data); } /* * Get a copy of intr_map_entry data */ static void intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref, struct intr_map_data **data) { size_t len; len = 0; mtx_lock(&irq_map_lock); if (res_id >= irq_map_count || irq_map[res_id] == NULL) panic("Attempt to copy invalid resource id: %u\n", res_id); if (irq_map[res_id]->map_data != NULL) len = irq_map[res_id]->map_data->len; mtx_unlock(&irq_map_lock); if (len == 0) *data = NULL; else *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO); mtx_lock(&irq_map_lock); if (irq_map[res_id] == NULL) panic("Attempt to copy invalid resource id: %u\n", res_id); if (len != 0) { if (len != irq_map[res_id]->map_data->len) panic("Resource id: %u has changed.\n", res_id); memcpy(*data, irq_map[res_id]->map_data, len); } *map_dev = irq_map[res_id]->dev; *map_xref = irq_map[res_id]->xref; mtx_unlock(&irq_map_lock); } /* * Allocate and fill new entry in irq_map table. */ u_int intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data) { u_int i; struct intr_map_entry *entry; /* Prepare new entry first. */ entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO); entry->dev = dev; entry->xref = xref; entry->map_data = data; entry->isrc = NULL; mtx_lock(&irq_map_lock); for (i = irq_map_first_free_idx; i < irq_map_count; i++) { if (irq_map[i] == NULL) { irq_map[i] = entry; irq_map_first_free_idx = i + 1; mtx_unlock(&irq_map_lock); return (i); } } for (i = 0; i < irq_map_first_free_idx; i++) { if (irq_map[i] == NULL) { irq_map[i] = entry; irq_map_first_free_idx = i + 1; mtx_unlock(&irq_map_lock); return (i); } } mtx_unlock(&irq_map_lock); /* XXX Expand irq_map table */ panic("IRQ mapping table is full."); } /* * Remove and free mapping entry. */ void intr_unmap_irq(u_int res_id) { struct intr_map_entry *entry; mtx_lock(&irq_map_lock); if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) panic("Attempt to unmap invalid resource id: %u\n", res_id); entry = irq_map[res_id]; irq_map[res_id] = NULL; irq_map_first_free_idx = res_id; mtx_unlock(&irq_map_lock); intr_free_intr_map_data(entry->map_data); free(entry, M_INTRNG); } /* * Clone mapping entry. */ u_int intr_map_clone_irq(u_int old_res_id) { device_t map_dev; intptr_t map_xref; struct intr_map_data *data; intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data); return (intr_map_irq(map_dev, map_xref, data)); } static void intr_map_init(void *dummy __unused) { mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF); irq_map_count = 2 * intr_nirq; irq_map = mallocarray(irq_map_count, sizeof(struct intr_map_entry*), M_INTRNG, M_WAITOK | M_ZERO); } SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL); #ifdef SMP /* Virtualization for interrupt source IPI counter increment. */ static inline void intr_ipi_increment_count(u_long *counter, u_int cpu) { KASSERT(cpu < mp_maxid + 1, ("%s: too big cpu %u", __func__, cpu)); counter[cpu]++; } /* * Virtualization for interrupt source IPI counters setup. */ static u_long * intr_ipi_setup_counters(const char *name) { u_int index, i; char str[INTRNAME_LEN]; mtx_lock(&isrc_table_lock); /* * We should never have a problem finding mp_maxid + 1 contiguous * counters, in practice. Interrupts will be allocated sequentially * during boot, so the array should fill from low to high index. Once * reserved, the IPI counters will never be released. Similarly, we * will not need to allocate more IPIs once the system is running. */ bit_ffc_area(intrcnt_bitmap, nintrcnt, mp_maxid + 1, &index); if (index == -1) panic("Failed to allocate %d counters. Array exhausted?", mp_maxid + 1); bit_nset(intrcnt_bitmap, index, index + mp_maxid); for (i = 0; i < mp_maxid + 1; i++) { snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); intrcnt_setname(str, index + i); } mtx_unlock(&isrc_table_lock); return (&intrcnt[index]); } /* * Lookup IPI source. */ static struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } int intr_ipi_pic_register(device_t dev, u_int priority) { if (intr_ipi_dev_frozen) { device_printf(dev, "IPI device already frozen"); return (EBUSY); } if (intr_ipi_dev == NULL || priority > intr_ipi_dev_priority) { intr_ipi_dev_priority = priority; intr_ipi_dev = dev; } return (0); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ void intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { struct intr_irqsrc *isrc; struct intr_ipi *ii; int error; if (!intr_ipi_dev_frozen) { if (intr_ipi_dev == NULL) panic("%s: no IPI PIC attached", __func__); intr_ipi_dev_frozen = true; device_printf(intr_ipi_dev, "using for IPIs\n"); } KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); error = PIC_IPI_SETUP(intr_ipi_dev, ipi, &isrc); if (error != 0) return; isrc->isrc_handlers++; ii = intr_ipi_lookup(ipi); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = arg; ii->ii_isrc = isrc; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); PIC_ENABLE_INTR(intr_ipi_dev, isrc); } void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; KASSERT(intr_ipi_dev_frozen, ("%s: IPI device not yet frozen", __func__)); ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); /* * XXX: Surely needed on other architectures too? Either way should be * some kind of MI hook defined in an MD header, or the responsibility * of the MD caller if not widespread. */ #ifdef __aarch64__ /* * Ensure that this CPU's stores will be visible to IPI * recipients before starting to send the interrupts. */ dsb(ishst); #endif PIC_IPI_SEND(intr_ipi_dev, ii->ii_isrc, cpus, ipi); } /* * interrupt controller dispatch function for IPIs. It should * be called straight from the interrupt controller, when associated * interrupt source is learned. Or from anybody who has an interrupt * source mapped. */ void intr_ipi_dispatch(u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); ii->ii_handler(ii->ii_handler_arg); } #endif diff --git a/sys/riscv/include/intr.h b/sys/riscv/include/intr.h index 100f1ba40ff3..657781efb620 100644 --- a/sys/riscv/include/intr.h +++ b/sys/riscv/include/intr.h @@ -1,63 +1,57 @@ /*- * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _MACHINE_INTR_MACHDEP_H_ #define _MACHINE_INTR_MACHDEP_H_ -enum root_type { - INTR_ROOT_IRQ = 0, - - INTR_ROOT_COUNT /* MUST BE LAST */ -}; - #ifndef NIRQ #define NIRQ 1024 #endif enum { IRQ_SOFTWARE_USER, IRQ_SOFTWARE_SUPERVISOR, IRQ_SOFTWARE_HYPERVISOR, IRQ_SOFTWARE_MACHINE, IRQ_TIMER_USER, IRQ_TIMER_SUPERVISOR, IRQ_TIMER_HYPERVISOR, IRQ_TIMER_MACHINE, IRQ_EXTERNAL_USER, IRQ_EXTERNAL_SUPERVISOR, IRQ_EXTERNAL_HYPERVISOR, IRQ_EXTERNAL_MACHINE, }; #endif /* !_MACHINE_INTR_MACHDEP_H_ */ diff --git a/sys/riscv/riscv/intc.c b/sys/riscv/riscv/intc.c index fcfc0c826fb9..248175e8bea3 100644 --- a/sys/riscv/riscv/intc.c +++ b/sys/riscv/riscv/intc.c @@ -1,311 +1,311 @@ /*- * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * Copyright (c) 2021 Jessica Clarke * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #define INTC_NIRQS 16 struct intc_irqsrc { struct intr_irqsrc isrc; u_int irq; }; struct intc_softc { device_t dev; struct intc_irqsrc isrcs[INTC_NIRQS]; }; static int intc_intr(void *arg); static phandle_t intc_ofw_find(device_t dev, uint32_t hartid) { phandle_t node; pcell_t reg; node = OF_finddevice("/cpus"); if (node == -1) { device_printf(dev, "Can't find cpus node\n"); return ((phandle_t)-1); } for (node = OF_child(node); node != 0; node = OF_peer(node)) { if (!ofw_bus_node_status_okay(node)) continue; if (!ofw_bus_node_is_compatible(node, "riscv")) continue; if (OF_searchencprop(node, "reg", ®, sizeof(reg)) == -1) continue; if (reg == hartid) break; } if (node == 0) { device_printf(dev, "Can't find boot cpu node\n"); return ((phandle_t)-1); } for (node = OF_child(node); node != 0; node = OF_peer(node)) { if (!ofw_bus_node_status_okay(node)) continue; if (ofw_bus_node_is_compatible(node, "riscv,cpu-intc")) break; } if (node == 0) { device_printf(dev, "Can't find boot cpu local interrupt controller\n"); return ((phandle_t)-1); } return (node); } static void intc_identify(driver_t *driver, device_t parent) { device_t dev; phandle_t node; if (device_find_child(parent, "intc", -1) != NULL) return; node = intc_ofw_find(parent, PCPU_GET(hart)); if (node == -1) return; dev = simplebus_add_device(parent, node, 0, "intc", -1, NULL); if (dev == NULL) device_printf(parent, "Can't add intc child\n"); } static int intc_probe(device_t dev) { device_set_desc(dev, "RISC-V Local Interrupt Controller"); return (BUS_PROBE_NOWILDCARD); } static int intc_attach(device_t dev) { struct intc_irqsrc *isrcs; struct intc_softc *sc; struct intr_pic *pic; const char *name; phandle_t xref; u_int flags; int i, error; sc = device_get_softc(dev); sc->dev = dev; name = device_get_nameunit(dev); xref = OF_xref_from_node(ofw_bus_get_node(dev)); isrcs = sc->isrcs; for (i = 0; i < INTC_NIRQS; i++) { isrcs[i].irq = i; flags = i == IRQ_SOFTWARE_SUPERVISOR ? INTR_ISRCF_IPI : INTR_ISRCF_PPI; error = intr_isrc_register(&isrcs[i].isrc, sc->dev, flags, "%s,%u", name, i); if (error != 0) { device_printf(dev, "Can't register interrupt %d\n", i); return (error); } } pic = intr_pic_register(sc->dev, xref); if (pic == NULL) return (ENXIO); return (intr_pic_claim_root(sc->dev, xref, intc_intr, sc, INTR_ROOT_IRQ)); } static void intc_disable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct intc_irqsrc *)isrc)->irq; if (irq >= INTC_NIRQS) panic("%s: Unsupported IRQ %u", __func__, irq); csr_clear(sie, 1ul << irq); } static void intc_enable_intr(device_t dev, struct intr_irqsrc *isrc) { u_int irq; irq = ((struct intc_irqsrc *)isrc)->irq; if (irq >= INTC_NIRQS) panic("%s: Unsupported IRQ %u", __func__, irq); csr_set(sie, 1ul << irq); } static int intc_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct intr_map_data_fdt *daf; struct intc_softc *sc; sc = device_get_softc(dev); if (data->type != INTR_MAP_DATA_FDT) return (ENOTSUP); daf = (struct intr_map_data_fdt *)data; if (daf->ncells != 1 || daf->cells[0] >= INTC_NIRQS) return (EINVAL); *isrcp = &sc->isrcs[daf->cells[0]].isrc; return (0); } static int intc_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); return (0); } #ifdef SMP static void -intc_init_secondary(device_t dev, enum root_type root_type) +intc_init_secondary(device_t dev, uint32_t rootnum) { struct intc_softc *sc; struct intr_irqsrc *isrc; u_int cpu, irq; sc = device_get_softc(dev); cpu = PCPU_GET(cpuid); /* Unmask attached interrupts */ for (irq = 0; irq < INTC_NIRQS; irq++) { isrc = &sc->isrcs[irq].isrc; if (intr_isrc_init_on_cpu(isrc, cpu)) intc_enable_intr(dev, isrc); } } #endif static int intc_intr(void *arg) { struct trapframe *frame; struct intc_softc *sc; uint64_t active_irq; struct intc_irqsrc *src; sc = arg; frame = curthread->td_intr_frame; KASSERT((frame->tf_scause & SCAUSE_INTR) != 0, ("%s: not an interrupt frame", __func__)); active_irq = frame->tf_scause & SCAUSE_CODE; if (active_irq >= INTC_NIRQS) return (FILTER_HANDLED); src = &sc->isrcs[active_irq]; if (intr_isrc_dispatch(&src->isrc, frame) != 0) { intc_disable_intr(sc->dev, &src->isrc); device_printf(sc->dev, "Stray irq %lu disabled\n", active_irq); } return (FILTER_HANDLED); } static device_method_t intc_methods[] = { /* Device interface */ DEVMETHOD(device_identify, intc_identify), DEVMETHOD(device_probe, intc_probe), DEVMETHOD(device_attach, intc_attach), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, intc_disable_intr), DEVMETHOD(pic_enable_intr, intc_enable_intr), DEVMETHOD(pic_map_intr, intc_map_intr), DEVMETHOD(pic_setup_intr, intc_setup_intr), #ifdef SMP DEVMETHOD(pic_init_secondary, intc_init_secondary), #endif DEVMETHOD_END }; DEFINE_CLASS_0(intc, intc_driver, intc_methods, sizeof(struct intc_softc)); EARLY_DRIVER_MODULE(intc, ofwbus, intc_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_FIRST); diff --git a/sys/sys/intr.h b/sys/sys/intr.h index f612fc2744f1..f11e96777927 100644 --- a/sys/sys/intr.h +++ b/sys/sys/intr.h @@ -1,172 +1,174 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Svatopluk Kraus * Copyright (c) 2015-2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_INTR_H_ #define _SYS_INTR_H_ #ifndef INTRNG #error Need INTRNG for this file #endif #include #include #define INTR_IRQ_INVALID 0xFFFFFFFF +#define INTR_ROOT_IRQ 0 + enum intr_map_data_type { INTR_MAP_DATA_ACPI = 0, INTR_MAP_DATA_FDT, INTR_MAP_DATA_GPIO, INTR_MAP_DATA_MSI, /* Placeholders for platform specific types */ INTR_MAP_DATA_PLAT_1 = 1000, INTR_MAP_DATA_PLAT_2, INTR_MAP_DATA_PLAT_3, INTR_MAP_DATA_PLAT_4, INTR_MAP_DATA_PLAT_5, }; struct intr_map_data { size_t len; enum intr_map_data_type type; }; struct intr_map_data_msi { struct intr_map_data hdr; struct intr_irqsrc *isrc; }; #ifdef notyet #define INTR_SOLO INTR_MD1 typedef int intr_irq_filter_t(void *arg, struct trapframe *tf); #else typedef int intr_irq_filter_t(void *arg); #endif typedef int intr_child_irq_filter_t(void *arg, uintptr_t irq); #define INTR_ISRC_NAMELEN (MAXCOMLEN + 1) #define INTR_ISRCF_IPI 0x01 /* IPI interrupt */ #define INTR_ISRCF_PPI 0x02 /* PPI interrupt */ #define INTR_ISRCF_BOUND 0x04 /* bound to a CPU */ struct intr_pic; /* Interrupt source definition. */ struct intr_irqsrc { device_t isrc_dev; /* where isrc is mapped */ u_int isrc_irq; /* unique identificator */ u_int isrc_flags; char isrc_name[INTR_ISRC_NAMELEN]; cpuset_t isrc_cpu; /* on which CPUs is enabled */ u_int isrc_index; u_long * isrc_count; u_int isrc_handlers; struct intr_event * isrc_event; #ifdef INTR_SOLO intr_irq_filter_t * isrc_filter; void * isrc_arg; #endif /* Used by MSI interrupts to store the iommu details */ void * isrc_iommu; }; /* Intr interface for PIC. */ int intr_isrc_deregister(struct intr_irqsrc *); int intr_isrc_register(struct intr_irqsrc *, device_t, u_int, const char *, ...) __printflike(4, 5); #ifdef SMP bool intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu); #endif int intr_isrc_dispatch(struct intr_irqsrc *, struct trapframe *); u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask); struct intr_pic *intr_pic_register(device_t, intptr_t); int intr_pic_deregister(device_t, intptr_t); int intr_pic_claim_root(device_t, intptr_t, intr_irq_filter_t *, void *, - enum root_type); + uint32_t); int intr_pic_add_handler(device_t, struct intr_pic *, intr_child_irq_filter_t *, void *, uintptr_t, uintptr_t); bool intr_is_per_cpu(struct resource *); -device_t intr_irq_root_device(enum root_type); +device_t intr_irq_root_device(uint32_t); /* Intr interface for BUS. */ int intr_activate_irq(device_t, struct resource *); int intr_deactivate_irq(device_t, struct resource *); int intr_setup_irq(device_t, struct resource *, driver_filter_t, driver_intr_t, void *, int, void **); int intr_teardown_irq(device_t, struct resource *, void *); int intr_describe_irq(device_t, struct resource *, void *, const char *); int intr_child_irq_handler(struct intr_pic *, uintptr_t); /* Intr resources mapping. */ struct intr_map_data *intr_alloc_map_data(enum intr_map_data_type, size_t, int); void intr_free_intr_map_data(struct intr_map_data *); u_int intr_map_irq(device_t, intptr_t, struct intr_map_data *); void intr_unmap_irq(u_int ); u_int intr_map_clone_irq(u_int ); /* MSI/MSI-X handling */ int intr_msi_register(device_t, intptr_t); int intr_alloc_msi(device_t, device_t, intptr_t, int, int, int *); int intr_release_msi(device_t, device_t, intptr_t, int, int *); int intr_map_msi(device_t, device_t, intptr_t, int, uint64_t *, uint32_t *); int intr_alloc_msix(device_t, device_t, intptr_t, int *); int intr_release_msix(device_t, device_t, intptr_t, int); #ifdef SMP int intr_bind_irq(device_t, struct resource *, int); void intr_pic_init_secondary(void); #endif extern u_int intr_nirq; /* number of IRQs on intrng platforms */ /* Intr interface for IPIs. */ #ifdef SMP typedef void intr_ipi_handler_t(void *); int intr_ipi_pic_register(device_t dev, u_int priority); void intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg); void intr_ipi_send(cpuset_t cpus, u_int ipi); void intr_ipi_dispatch(u_int ipi); #endif -/* Main interrupt handler called from asm on many archs. */ -void intr_irq_handler(struct trapframe *tf, u_register_t root_type); +/* Main interrupt handler called from asm on most archs except riscv. */ +void intr_irq_handler(struct trapframe *tf, uint32_t rootnum); #endif /* _SYS_INTR_H */