Index: head/sys/arm64/arm64/gic_v3.c =================================================================== --- head/sys/arm64/arm64/gic_v3.c (revision 286918) +++ head/sys/arm64/arm64/gic_v3.c (revision 286919) @@ -1,599 +1,719 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include "pic_if.h" #include "gic_v3_reg.h" #include "gic_v3_var.h" /* Device and PIC methods */ static void gic_v3_dispatch(device_t, struct trapframe *); static void gic_v3_eoi(device_t, u_int); static void gic_v3_mask_irq(device_t, u_int); static void gic_v3_unmask_irq(device_t, u_int); +#ifdef SMP +static void gic_v3_init_secondary(device_t); +static void gic_v3_ipi_send(device_t, cpuset_t, u_int); +#endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), /* PIC interface */ DEVMETHOD(pic_dispatch, gic_v3_dispatch), DEVMETHOD(pic_eoi, gic_v3_eoi), DEVMETHOD(pic_mask, gic_v3_mask_irq), DEVMETHOD(pic_unmask, gic_v3_unmask_irq), - +#ifdef SMP + DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), + DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), +#endif /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic_v3, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); +static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, + gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; +#ifdef SMP +/* Secondary CPU initialization sequence */ +static gic_v3_initseq_t gic_v3_secondary_init[] = { + gic_v3_redist_init, + gic_v3_cpu_init, + NULL +}; +#endif + /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } /* * Full success. * Now register PIC to the interrupts handling layer. */ arm_register_root_pic(dev, sc->gic_nirqs); sc->gic_registered = TRUE; return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; size_t i; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); - for (i = 0; i < MAXCPU; i++) + for (i = 0; i < mp_ncpus; i++) free(sc->gic_redists.pcpu[i], M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } /* * PIC interface. */ static void gic_v3_dispatch(device_t dev, struct trapframe *frame) { uint64_t active_irq; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR)) break; if (__predict_true((active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) { arm_dispatch_intr(active_irq, frame); continue; } if (active_irq <= GIC_LAST_SGI) { - /* - * TODO: Implement proper SGI handling. - * Mask it if such is received for some reason. - */ - device_printf(dev, - "Received unsupported interrupt type: SGI\n"); - PIC_MASK(dev, active_irq); + gic_icc_write(EOIR1, (uint64_t)active_irq); + arm_dispatch_intr(active_irq, frame); + continue; } } } static void gic_v3_eoi(device_t dev, u_int irq) { gic_icc_write(EOIR1, (uint64_t)irq); } static void gic_v3_mask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); - if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_PPI) { /* PPIs in corresponding Re-Distributor */ + if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_mask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } static void gic_v3_unmask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); - if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_PPI) { /* PPIs in corresponding Re-Distributor */ + if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_unmask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } +#ifdef SMP +static void +gic_v3_init_secondary(device_t dev) +{ + struct gic_v3_softc *sc; + gic_v3_initseq_t *init_func; + int err; + + sc = device_get_softc(dev); + + /* Train init sequence for boot CPU */ + for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { + err = (*init_func)(sc); + if (err != 0) { + device_printf(dev, + "Could not initialize GIC for CPU%u\n", + PCPU_GET(cpuid)); + return; + } + } + + /* + * Try to initialize ITS. + * If there is no driver attached this routine will fail but that + * does not mean failure here as only LPIs will not be functional + * on the current CPU. + */ + if (its_init_cpu(NULL) != 0) { + device_printf(dev, + "Could not initialize ITS for CPU%u. " + "No LPIs will arrive on this CPU\n", + PCPU_GET(cpuid)); + } + + /* + * ARM64TODO: Unmask timer PPIs. To be removed when appropriate + * mechanism is implemented. + * Activate the timer interrupts: virtual (27), secure (29), + * and non-secure (30). Use hardcoded values here as there + * should be no defines for them. + */ + gic_v3_unmask_irq(dev, 27); + gic_v3_unmask_irq(dev, 29); + gic_v3_unmask_irq(dev, 30); +} + +static void +gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi) +{ + u_int cpu; + uint64_t aff, tlist; + uint64_t val; + uint64_t aff_mask; + + /* Set affinity mask to match level 3, 2 and 1 */ + aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK; + + /* Iterate through all CPUs in set */ + while (!CPU_EMPTY(&cpuset)) { + aff = tlist = 0; + for (cpu = 0; cpu < mp_ncpus; cpu++) { + /* Compose target list for single AFF3:AFF2:AFF1 set */ + if (CPU_ISSET(cpu, &cpuset)) { + if (!tlist) { + /* + * Save affinity of the first CPU to + * send IPI to for later comparison. + */ + aff = CPU_AFFINITY(cpu); + tlist |= (1UL << CPU_AFF0(aff)); + CPU_CLR(cpu, &cpuset); + } + /* Check for same Affinity level 3, 2 and 1 */ + if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) { + tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu))); + /* Clear CPU in cpuset from target list */ + CPU_CLR(cpu, &cpuset); + } + } + } + if (tlist) { + KASSERT((tlist & ~GICI_SGI_TLIST_MASK) == 0, + ("Target list too long for GICv3 IPI")); + /* Send SGI to CPUs in target list */ + val = tlist; + val |= (uint64_t)CPU_AFF3(aff) << GICI_SGI_AFF3_SHIFT; + val |= (uint64_t)CPU_AFF2(aff) << GICI_SGI_AFF2_SHIFT; + val |= (uint64_t)CPU_AFF1(aff) << GICI_SGI_AFF1_SHIFT; + val |= (uint64_t)(ipi & GICI_SGI_IPI_MASK) << GICI_SGI_IPI_SHIFT; + gic_icc_write(SGI1R, val); + } + } +} +#endif + /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; break; case REDIST: res = sc->gic_redists.pcpu[cpuid]; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(PCPU_GET(cpuid)); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 4, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int +gic_v3_redist_alloc(struct gic_v3_softc *sc) +{ + u_int cpuid; + + /* Allocate struct resource for all CPU's Re-Distributor registers */ + for (cpuid = 0; cpuid < mp_ncpus; cpuid++) + if (CPU_ISSET(cpuid, &all_cpus) != 0) + sc->gic_redists.pcpu[cpuid] = + malloc(sizeof(*sc->gic_redists.pcpu[0]), + M_GIC_V3, M_WAITOK); + else + sc->gic_redists.pcpu[cpuid] = NULL; + return (0); +} + +static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource r_res; bus_space_handle_t r_bsh; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); - /* Allocate struct resource for this CPU's Re-Distributor registers */ - sc->gic_redists.pcpu[cpuid] = - malloc(sizeof(*sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); - aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = *sc->gic_redists.regions[i]; r_bsh = rman_get_bushandle(&r_res); pidr2 = bus_read_4(&r_res, GICR_PIDR2); switch (pidr2 & GICR_PIDR2_ARCH_MASK) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); - free(sc->gic_redists.pcpu[cpuid], M_GIC_V3); return (ENODEV); } do { typer = bus_read_8(&r_res, GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ *sc->gic_redists.pcpu[cpuid] = r_res; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { r_bsh += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } rman_set_bushandle(&r_res, r_bsh); } while ((typer & GICR_TYPER_LAST) == 0); } - free(sc->gic_redists.pcpu[cpuid], M_GIC_V3); device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } Index: head/sys/arm64/arm64/gic_v3_its.c =================================================================== --- head/sys/arm64/arm64/gic_v3_its.c (revision 286918) +++ head/sys/arm64/arm64/gic_v3_its.c (revision 286919) @@ -1,1632 +1,1671 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include "gic_v3_reg.h" #include "gic_v3_var.h" #include "pic_if.h" /* Device and PIC methods */ static int gic_v3_its_attach(device_t); static device_method_t gic_v3_its_methods[] = { /* Device interface */ DEVMETHOD(device_attach, gic_v3_its_attach), /* * PIC interface */ /* MSI-X */ DEVMETHOD(pic_alloc_msix, gic_v3_its_alloc_msix), DEVMETHOD(pic_map_msix, gic_v3_its_map_msix), /* MSI */ DEVMETHOD(pic_alloc_msi, gic_v3_its_alloc_msi), DEVMETHOD(pic_map_msi, gic_v3_its_map_msix), /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic_v3_its, gic_v3_its_driver, gic_v3_its_methods, sizeof(struct gic_v3_its_softc)); MALLOC_DEFINE(M_GIC_V3_ITS, "GICv3 ITS", GIC_V3_ITS_DEVSTR); static int its_alloc_tables(struct gic_v3_its_softc *); static void its_free_tables(struct gic_v3_its_softc *); static void its_init_commandq(struct gic_v3_its_softc *); -static int its_init_cpu(struct gic_v3_its_softc *); static void its_init_cpu_collection(struct gic_v3_its_softc *); static uint32_t its_get_devid(device_t); static int its_cmd_send(struct gic_v3_its_softc *, struct its_cmd_desc *); static void its_cmd_mapc(struct gic_v3_its_softc *, struct its_col *, uint8_t); static void its_cmd_mapvi(struct gic_v3_its_softc *, struct its_dev *, uint32_t, uint32_t); static void its_cmd_mapi(struct gic_v3_its_softc *, struct its_dev *, uint32_t); static void its_cmd_inv(struct gic_v3_its_softc *, struct its_dev *, uint32_t); static void its_cmd_invall(struct gic_v3_its_softc *, struct its_col *); static uint32_t its_get_devbits(device_t); static void lpi_init_conftable(struct gic_v3_its_softc *); static void lpi_bitmap_init(struct gic_v3_its_softc *); -static void lpi_init_cpu(struct gic_v3_its_softc *); static int lpi_config_cpu(struct gic_v3_its_softc *); +static void lpi_alloc_cpu_pendtables(struct gic_v3_its_softc *); const char *its_ptab_cache[] = { [GITS_BASER_CACHE_NCNB] = "(NC,NB)", [GITS_BASER_CACHE_NC] = "(NC)", [GITS_BASER_CACHE_RAWT] = "(RA,WT)", [GITS_BASER_CACHE_RAWB] = "(RA,WB)", [GITS_BASER_CACHE_WAWT] = "(WA,WT)", [GITS_BASER_CACHE_WAWB] = "(WA,WB)", [GITS_BASER_CACHE_RAWAWT] = "(RAWA,WT)", [GITS_BASER_CACHE_RAWAWB] = "(RAWA,WB)", }; const char *its_ptab_share[] = { [GITS_BASER_SHARE_NS] = "none", [GITS_BASER_SHARE_IS] = "inner", [GITS_BASER_SHARE_OS] = "outer", [GITS_BASER_SHARE_RES] = "none", }; const char *its_ptab_type[] = { [GITS_BASER_TYPE_UNIMPL] = "Unimplemented", [GITS_BASER_TYPE_DEV] = "Devices", [GITS_BASER_TYPE_VP] = "Virtual Processors", [GITS_BASER_TYPE_PP] = "Physical Processors", [GITS_BASER_TYPE_IC] = "Interrupt Collections", [GITS_BASER_TYPE_RES5] = "Reserved (5)", [GITS_BASER_TYPE_RES6] = "Reserved (6)", [GITS_BASER_TYPE_RES7] = "Reserved (7)", }; /* * Vendor specific quirks. * One needs to add appropriate entry to its_quirks[] * table if the imlementation varies from the generic ARM ITS. */ /* Cavium ThunderX PCI devid acquire function */ static uint32_t its_get_devbits_thunder(device_t); static uint32_t its_get_devid_thunder(device_t); static const struct its_quirks its_quirks[] = { { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0, Pass 1.1 */ .cpuid = CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, 0), .cpuid_mask = CPU_IMPL_MASK | CPU_PART_MASK, .devid_func = its_get_devid_thunder, .devbits_func = its_get_devbits_thunder, }, }; static struct gic_v3_its_softc *its_sc; #define gic_its_read(sc, len, reg) \ bus_read_##len(&sc->its_res[0], reg) #define gic_its_write(sc, len, reg, val) \ bus_write_##len(&sc->its_res[0], reg, val) static int gic_v3_its_attach(device_t dev) { struct gic_v3_its_softc *sc; uint64_t gits_tmp; uint32_t gits_pidr2; int rid; int ret; sc = device_get_softc(dev); /* * Initialize sleep & spin mutex for ITS */ /* Protects ITS device list and assigned LPIs bitmaps. */ mtx_init(&sc->its_mtx, "ITS sleep lock", NULL, MTX_DEF); /* Protects access to ITS command circular buffer. */ mtx_init(&sc->its_spin_mtx, "ITS spin lock", NULL, MTX_SPIN); rid = 0; sc->its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->its_res == NULL) { device_printf(dev, "Could not allocate memory\n"); return (ENXIO); } sc->dev = dev; gits_pidr2 = gic_its_read(sc, 4, GITS_PIDR2); switch (gits_pidr2 & GITS_PIDR2_ARCH_MASK) { case GITS_PIDR2_ARCH_GICv3: /* fall through */ case GITS_PIDR2_ARCH_GICv4: if (bootverbose) { device_printf(dev, "ITS found. Architecture rev. %u\n", (u_int)(gits_pidr2 & GITS_PIDR2_ARCH_MASK) >> 4); } break; default: device_printf(dev, "No ITS found in the system\n"); gic_v3_its_detach(dev); return (ENODEV); } /* 1. Initialize commands queue */ its_init_commandq(sc); /* 2. Provide memory for any private ITS tables */ ret = its_alloc_tables(sc); if (ret != 0) { gic_v3_its_detach(dev); return (ret); } /* 3. Allocate collections. One per-CPU */ - sc->its_cols = malloc(sizeof(*sc->its_cols) * MAXCPU, - M_GIC_V3_ITS, (M_WAITOK | M_ZERO)); + for (int cpu = 0; cpu < mp_ncpus; cpu++) + if (CPU_ISSET(cpu, &all_cpus) != 0) + sc->its_cols[cpu] = malloc(sizeof(*sc->its_cols[0]), + M_GIC_V3_ITS, (M_WAITOK | M_ZERO)); + else + sc->its_cols[cpu] = NULL; /* 4. Enable ITS in GITS_CTLR */ gits_tmp = gic_its_read(sc, 4, GITS_CTLR); gic_its_write(sc, 4, GITS_CTLR, gits_tmp | GITS_CTLR_EN); /* 5. Initialize LPIs configuration table */ lpi_init_conftable(sc); /* 6. LPIs bitmap init */ lpi_bitmap_init(sc); - /* 7. CPU init */ + /* 7. Allocate pending tables for all CPUs */ + lpi_alloc_cpu_pendtables(sc); + + /* 8. CPU init */ (void)its_init_cpu(sc); - /* 8. Init ITS devices list */ + /* 9. Init ITS devices list */ TAILQ_INIT(&sc->its_dev_list); arm_register_msi_pic(dev); /* * XXX ARM64TODO: We need to have ITS software context * when being called by the interrupt code (mask/unmask). * This may be used only when one ITS is present in * the system and eventually should be removed. */ KASSERT(its_sc == NULL, ("Trying to assign its_sc that is already set")); its_sc = sc; return (0); } /* Will not detach but use it for convenience */ int gic_v3_its_detach(device_t dev) { device_t parent; struct gic_v3_softc *gic_sc; struct gic_v3_its_softc *sc; u_int cpuid; int rid = 0; sc = device_get_softc(dev); cpuid = PCPU_GET(cpuid); /* Release what's possible */ /* Command queue */ if ((void *)sc->its_cmdq_base != NULL) { contigfree((void *)sc->its_cmdq_base, ITS_CMDQ_SIZE, M_GIC_V3_ITS); } /* ITTs */ its_free_tables(sc); /* Collections */ - free(sc->its_cols, M_GIC_V3_ITS); + for (cpuid = 0; cpuid < mp_ncpus; cpuid++) + free(sc->its_cols[cpuid], M_GIC_V3_ITS); /* LPI config table */ parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); if ((void *)gic_sc->gic_redists.lpis.conf_base != NULL) { contigfree((void *)gic_sc->gic_redists.lpis.conf_base, LPI_CONFTAB_SIZE, M_GIC_V3_ITS); } - if ((void *)gic_sc->gic_redists.lpis.pend_base[cpuid] != NULL) { - contigfree((void *)gic_sc->gic_redists.lpis.pend_base[cpuid], - roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K), M_GIC_V3_ITS); - } + for (cpuid = 0; cpuid < mp_ncpus; cpuid++) + if ((void *)gic_sc->gic_redists.lpis.pend_base[cpuid] != NULL) { + contigfree( + (void *)gic_sc->gic_redists.lpis.pend_base[cpuid], + roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K), + M_GIC_V3_ITS); + } /* Resource... */ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->its_res); /* XXX ARM64TODO: Reset global pointer to ITS software context */ its_sc = NULL; return (0); } static int its_alloc_tables(struct gic_v3_its_softc *sc) { uint64_t gits_baser, gits_tmp; uint64_t type, esize, cache, share, psz; size_t page_size, npages, nitspages, nidents, tn; size_t its_tbl_size; vm_offset_t ptab_vaddr; vm_paddr_t ptab_paddr; boolean_t first = TRUE; page_size = PAGE_SIZE_64K; for (tn = 0; tn < GITS_BASER_NUM; tn++) { gits_baser = gic_its_read(sc, 8, GITS_BASER(tn)); type = GITS_BASER_TYPE(gits_baser); /* Get the Table Entry size */ esize = GITS_BASER_ESIZE(gits_baser); switch (type) { case GITS_BASER_TYPE_UNIMPL: /* fall through */ case GITS_BASER_TYPE_RES5: case GITS_BASER_TYPE_RES6: case GITS_BASER_TYPE_RES7: continue; case GITS_BASER_TYPE_DEV: nidents = (1 << its_get_devbits(sc->dev)); its_tbl_size = esize * nidents; its_tbl_size = roundup2(its_tbl_size, page_size); npages = howmany(its_tbl_size, PAGE_SIZE); break; default: npages = howmany(page_size, PAGE_SIZE); break; } /* Allocate required space */ ptab_vaddr = (vm_offset_t)contigmalloc(npages * PAGE_SIZE, M_GIC_V3_ITS, (M_WAITOK | M_ZERO), 0, ~0UL, PAGE_SIZE, 0); sc->its_ptabs[tn].ptab_vaddr = ptab_vaddr; sc->its_ptabs[tn].ptab_pgsz = PAGE_SIZE; sc->its_ptabs[tn].ptab_npages = npages; ptab_paddr = vtophys(ptab_vaddr); KASSERT((ptab_paddr & GITS_BASER_PA_MASK) == ptab_paddr, ("%s: Unaligned PA for Interrupt Translation Table", device_get_name(sc->dev))); /* Set defaults: WAWB, IS */ cache = GITS_BASER_CACHE_WAWB; share = GITS_BASER_SHARE_IS; for (;;) { nitspages = howmany(its_tbl_size, page_size); switch (page_size) { case PAGE_SIZE: /* 4KB */ psz = GITS_BASER_PSZ_4K; break; case PAGE_SIZE_16K: /* 16KB */ psz = GITS_BASER_PSZ_4K; break; case PAGE_SIZE_64K: /* 64KB */ psz = GITS_BASER_PSZ_64K; break; default: device_printf(sc->dev, "Unsupported page size: %zuKB\n", (page_size / 1024)); its_free_tables(sc); return (ENXIO); } /* Clear fields under modification first */ gits_baser &= ~(GITS_BASER_VALID | GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK | GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | GITS_BASER_SIZE_MASK); /* Construct register value */ gits_baser |= (type << GITS_BASER_TYPE_SHIFT) | ((esize - 1) << GITS_BASER_ESIZE_SHIFT) | (cache << GITS_BASER_CACHE_SHIFT) | (share << GITS_BASER_SHARE_SHIFT) | (psz << GITS_BASER_PSZ_SHIFT) | ptab_paddr | (nitspages - 1) | GITS_BASER_VALID; gic_its_write(sc, 8, GITS_BASER(tn), gits_baser); /* * Verify. * Depending on implementation we may encounter * shareability and page size mismatch. */ gits_tmp = gic_its_read(sc, 8, GITS_BASER(tn)); if (((gits_tmp ^ gits_baser) & GITS_BASER_SHARE_MASK) != 0) { share = gits_tmp & GITS_BASER_SHARE_MASK; share >>= GITS_BASER_SHARE_SHIFT; continue; } if (((gits_tmp ^ gits_baser) & GITS_BASER_PSZ_MASK) != 0) { switch (page_size) { case PAGE_SIZE_16K: /* Drop to 4KB page */ page_size = PAGE_SIZE; continue; case PAGE_SIZE_64K: /* Drop to 16KB page */ page_size = PAGE_SIZE_16K; continue; } } /* * All possible adjustments should * be applied by now so just break the loop. */ break; } /* * Do not compare Cacheability field since * it is implementation defined. */ gits_tmp &= ~GITS_BASER_CACHE_MASK; gits_baser &= ~GITS_BASER_CACHE_MASK; if (gits_tmp != gits_baser) { device_printf(sc->dev, "Could not allocate ITS tables\n"); its_free_tables(sc); return (ENXIO); } if (bootverbose) { if (first) { device_printf(sc->dev, "Allocated ITS private tables:\n"); first = FALSE; } device_printf(sc->dev, "\tPTAB%zu for %s: PA 0x%lx," " %lu entries," " cache policy %s, %s shareable," " page size %zuKB\n", tn, its_ptab_type[type], ptab_paddr, (page_size * nitspages) / esize, its_ptab_cache[cache], its_ptab_share[share], page_size / 1024); } } return (0); } static void its_free_tables(struct gic_v3_its_softc *sc) { vm_offset_t ptab_vaddr; size_t size; size_t tn; for (tn = 0; tn < GITS_BASER_NUM; tn++) { ptab_vaddr = sc->its_ptabs[tn].ptab_vaddr; if (ptab_vaddr == 0) continue; size = sc->its_ptabs[tn].ptab_pgsz; size *= sc->its_ptabs[tn].ptab_npages; if ((void *)ptab_vaddr != NULL) contigfree((void *)ptab_vaddr, size, M_GIC_V3_ITS); /* Clear the table description */ memset(&sc->its_ptabs[tn], 0, sizeof(sc->its_ptabs[tn])); } } static void its_init_commandq(struct gic_v3_its_softc *sc) { uint64_t gits_cbaser, gits_tmp; uint64_t cache, share; vm_paddr_t cmdq_paddr; device_t dev; dev = sc->dev; /* Allocate memory for command queue */ sc->its_cmdq_base = contigmalloc(ITS_CMDQ_SIZE, M_GIC_V3_ITS, (M_WAITOK | M_ZERO), 0, ~0UL, ITS_CMDQ_SIZE, 0); /* Set command queue write pointer (command queue empty) */ sc->its_cmdq_write = sc->its_cmdq_base; /* Save command queue pointer and attributes */ cmdq_paddr = vtophys(sc->its_cmdq_base); /* Set defaults: Normal Inner WAWB, IS */ cache = GITS_CBASER_CACHE_NIWAWB; share = GITS_CBASER_SHARE_IS; gits_cbaser = (cmdq_paddr | (cache << GITS_CBASER_CACHE_SHIFT) | (share << GITS_CBASER_SHARE_SHIFT) | /* Number of 4KB pages - 1 */ ((ITS_CMDQ_SIZE / PAGE_SIZE) - 1) | /* Valid bit */ GITS_CBASER_VALID); gic_its_write(sc, 8, GITS_CBASER, gits_cbaser); gits_tmp = gic_its_read(sc, 8, GITS_CBASER); if (((gits_tmp ^ gits_cbaser) & GITS_CBASER_SHARE_MASK) != 0) { if (bootverbose) { device_printf(dev, "Will use cache flushing for commands queue\n"); } /* Command queue needs cache flushing */ sc->its_flags |= ITS_FLAGS_CMDQ_FLUSH; } gic_its_write(sc, 8, GITS_CWRITER, 0x0); } -static int +int its_init_cpu(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; /* + * NULL in place of the softc pointer means that + * this function was called during GICv3 secondary initialization. + */ + if (sc == NULL) { + if (device_is_attached(its_sc->dev)) { + /* + * XXX ARM64TODO: This is part of the workaround that + * saves ITS software context for further use in + * mask/unmask and here. This should be removed as soon + * as the upper layer is capable of passing the ITS + * context to this function. + */ + sc = its_sc; + } else + return (ENXIO); + } + + /* * Check for LPIs support on this Re-Distributor. */ parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); if ((gic_r_read(gic_sc, 4, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) { if (bootverbose) { device_printf(sc->dev, "LPIs not supported on CPU%u\n", PCPU_GET(cpuid)); } return (ENXIO); } - /* Initialize LPIs for this CPU */ - lpi_init_cpu(sc); + /* Configure LPIs for this CPU */ + lpi_config_cpu(sc); /* Initialize collections */ its_init_cpu_collection(sc); return (0); } static void its_init_cpu_collection(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; uint64_t typer; uint64_t target; vm_offset_t redist_base; u_int cpuid; cpuid = PCPU_GET(cpuid); parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); typer = gic_its_read(sc, 8, GITS_TYPER); if ((typer & GITS_TYPER_PTA) != 0) { redist_base = rman_get_bushandle(gic_sc->gic_redists.pcpu[cpuid]); /* * Target Address correspond to the base physical * address of Re-Distributors. */ target = vtophys(redist_base); } else { /* Target Address correspond to unique processor numbers */ typer = gic_r_read(gic_sc, 8, GICR_TYPER); target = GICR_TYPER_CPUNUM(typer); } - sc->its_cols[cpuid].col_target = target; - sc->its_cols[cpuid].col_id = cpuid; + sc->its_cols[cpuid]->col_target = target; + sc->its_cols[cpuid]->col_id = cpuid; - its_cmd_mapc(sc, &sc->its_cols[cpuid], 1); - its_cmd_invall(sc, &sc->its_cols[cpuid]); + its_cmd_mapc(sc, sc->its_cols[cpuid], 1); + its_cmd_invall(sc, sc->its_cols[cpuid]); + } static void lpi_init_conftable(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; vm_offset_t conf_base; uint8_t prio_default; parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); /* * LPI Configuration Table settings. * Notice that Configuration Table is shared among all * Re-Distributors, so this is going to be created just once. */ conf_base = (vm_offset_t)contigmalloc(LPI_CONFTAB_SIZE, M_GIC_V3_ITS, (M_WAITOK | M_ZERO), 0, ~0UL, PAGE_SIZE_64K, 0); if (bootverbose) { device_printf(sc->dev, "LPI Configuration Table at PA: 0x%lx\n", vtophys(conf_base)); } /* * Let the default priority be aligned with all other * interrupts assuming that each interrupt is assigned * MAX priority at startup. MAX priority on the other * hand cannot be higher than 0xFC for LPIs. */ prio_default = GIC_PRIORITY_MAX; /* Write each settings byte to LPI configuration table */ memset((void *)conf_base, (prio_default & LPI_CONF_PRIO_MASK) | LPI_CONF_GROUP1, LPI_CONFTAB_SIZE); cpu_dcache_wb_range((vm_offset_t)conf_base, roundup2(LPI_CONFTAB_SIZE, PAGE_SIZE_64K)); gic_sc->gic_redists.lpis.conf_base = conf_base; } static void -lpi_init_cpu(struct gic_v3_its_softc *sc) +lpi_alloc_cpu_pendtables(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; vm_offset_t pend_base; u_int cpuid; parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); /* * LPI Pending Table settings. * This has to be done for each Re-Distributor, hence for each CPU. */ - cpuid = PCPU_GET(cpuid); + for (cpuid = 0; cpuid < mp_ncpus; cpuid++) { - pend_base = (vm_offset_t)contigmalloc( - roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K), M_GIC_V3_ITS, - (M_WAITOK | M_ZERO), 0, ~0UL, PAGE_SIZE_64K, 0); + /* Limit allocation to active CPUs only */ + if (CPU_ISSET(cpuid, &all_cpus) == 0) + continue; - /* Clean D-cache so that ITS can see zeroed pages */ - cpu_dcache_wb_range((vm_offset_t)pend_base, - roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K)); + pend_base = (vm_offset_t)contigmalloc( + roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K), M_GIC_V3_ITS, + (M_WAITOK | M_ZERO), 0, ~0UL, PAGE_SIZE_64K, 0); - if (bootverbose) { - device_printf(sc->dev, - "LPI Pending Table for CPU%u at PA: 0x%lx\n", - cpuid, vtophys(pend_base)); - } + /* Clean D-cache so that ITS can see zeroed pages */ + cpu_dcache_wb_range((vm_offset_t)pend_base, + roundup2(LPI_PENDTAB_SIZE, PAGE_SIZE_64K)); - gic_sc->gic_redists.lpis.pend_base[cpuid] = pend_base; + if (bootverbose) { + device_printf(sc->dev, + "LPI Pending Table for CPU%u at PA: 0x%lx\n", + cpuid, vtophys(pend_base)); + } - lpi_config_cpu(sc); + gic_sc->gic_redists.lpis.pend_base[cpuid] = pend_base; + } + + /* Ensure visibility of pend_base addresses on other CPUs */ + wmb(); } static int lpi_config_cpu(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; vm_offset_t conf_base, pend_base; uint64_t gicr_xbaser, gicr_temp; uint64_t cache, share, idbits; uint32_t gicr_ctlr; u_int cpuid; parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); cpuid = PCPU_GET(cpuid); + /* Ensure data observability on a current CPU */ + rmb(); + conf_base = gic_sc->gic_redists.lpis.conf_base; pend_base = gic_sc->gic_redists.lpis.pend_base[cpuid]; /* Disable LPIs */ gicr_ctlr = gic_r_read(gic_sc, 4, GICR_CTLR); gicr_ctlr &= ~GICR_CTLR_LPI_ENABLE; gic_r_write(gic_sc, 4, GICR_CTLR, gicr_ctlr); /* Perform full system barrier */ dsb(sy); /* * Set GICR_PROPBASER */ /* * Find out how many bits do we need for LPI identifiers. * Remark 1.: Even though we have (LPI_CONFTAB_SIZE / 8) LPIs * the notified LPI ID still starts from 8192 * (GIC_FIRST_LPI). * Remark 2.: This could be done on compilation time but there * seems to be no sufficient macro. */ idbits = flsl(LPI_CONFTAB_SIZE + GIC_FIRST_LPI) - 1; /* Set defaults: Normal Inner WAWB, IS */ cache = GICR_PROPBASER_CACHE_NIWAWB; share = GICR_PROPBASER_SHARE_IS; gicr_xbaser = vtophys(conf_base) | ((idbits - 1) & GICR_PROPBASER_IDBITS_MASK) | (cache << GICR_PROPBASER_CACHE_SHIFT) | (share << GICR_PROPBASER_SHARE_SHIFT); gic_r_write(gic_sc, 8, GICR_PROPBASER, gicr_xbaser); gicr_temp = gic_r_read(gic_sc, 8, GICR_PROPBASER); if (((gicr_xbaser ^ gicr_temp) & GICR_PROPBASER_SHARE_MASK) != 0) { if (bootverbose) { device_printf(sc->dev, "Will use cache flushing for LPI " "Configuration Table\n"); } gic_sc->gic_redists.lpis.flags |= LPI_FLAGS_CONF_FLUSH; } /* * Set GICR_PENDBASER */ /* Set defaults: Normal Inner WAWB, IS */ cache = GICR_PENDBASER_CACHE_NIWAWB; share = GICR_PENDBASER_SHARE_IS; gicr_xbaser = vtophys(pend_base) | (cache << GICR_PENDBASER_CACHE_SHIFT) | (share << GICR_PENDBASER_SHARE_SHIFT); gic_r_write(gic_sc, 8, GICR_PENDBASER, gicr_xbaser); /* Enable LPIs */ gicr_ctlr = gic_r_read(gic_sc, 4, GICR_CTLR); gicr_ctlr |= GICR_CTLR_LPI_ENABLE; gic_r_write(gic_sc, 4, GICR_CTLR, gicr_ctlr); dsb(sy); return (0); } static void lpi_bitmap_init(struct gic_v3_its_softc *sc) { device_t parent; struct gic_v3_softc *gic_sc; uint32_t lpi_id_num; size_t lpi_chunks_num; size_t bits_in_chunk; parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); lpi_id_num = (1 << gic_sc->gic_idbits) - 1; /* Substract IDs dedicated for SGIs, PPIs and SPIs */ lpi_id_num -= GIC_FIRST_LPI; sc->its_lpi_maxid = lpi_id_num; bits_in_chunk = sizeof(*sc->its_lpi_bitmap) * NBBY; /* * Round up to the number of bits in chunk. * We will need to take care to avoid using invalid LPI IDs later. */ lpi_id_num = roundup2(lpi_id_num, bits_in_chunk); lpi_chunks_num = lpi_id_num / bits_in_chunk; sc->its_lpi_bitmap = contigmalloc((lpi_chunks_num * sizeof(*sc->its_lpi_bitmap)), M_GIC_V3_ITS, (M_WAITOK | M_ZERO), 0, ~0UL, sizeof(*sc->its_lpi_bitmap), 0); } static int lpi_alloc_chunk(struct gic_v3_its_softc *sc, struct lpi_chunk *lpic, u_int nvecs) { int fclr; /* First cleared bit */ uint8_t *bitmap; size_t nb, i; bitmap = (uint8_t *)sc->its_lpi_bitmap; fclr = 0; retry: /* Check other bits - sloooow */ for (i = 0, nb = fclr; i < nvecs; i++, nb++) { if (nb > sc->its_lpi_maxid) return (EINVAL); if (isset(bitmap, nb)) { /* To little free bits in this area. Move on. */ fclr = nb + 1; goto retry; } } /* This area is free. Take it. */ bit_nset(bitmap, fclr, fclr + nvecs - 1); lpic->lpi_base = fclr + GIC_FIRST_LPI; lpic->lpi_num = nvecs; lpic->lpi_free = lpic->lpi_num; return (0); } static void lpi_free_chunk(struct gic_v3_its_softc *sc, struct lpi_chunk *lpic) { int start, end; uint8_t *bitmap; bitmap = (uint8_t *)sc->its_lpi_bitmap; KASSERT((lpic->lpi_free == lpic->lpi_num), ("Trying to free LPI chunk that is still in use.\n")); /* First bit of this chunk in a global bitmap */ start = lpic->lpi_base - GIC_FIRST_LPI; /* and last bit of this chunk... */ end = start + lpic->lpi_num - 1; /* Finally free this chunk */ bit_nclear(bitmap, start, end); } static void lpi_configure(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint32_t lpinum, boolean_t unmask) { device_t parent; struct gic_v3_softc *gic_sc; uint8_t *conf_byte; parent = device_get_parent(sc->dev); gic_sc = device_get_softc(parent); conf_byte = (uint8_t *)gic_sc->gic_redists.lpis.conf_base; conf_byte += (lpinum - GIC_FIRST_LPI); if (unmask) *conf_byte |= LPI_CONF_ENABLE; else *conf_byte &= ~LPI_CONF_ENABLE; if ((gic_sc->gic_redists.lpis.flags & LPI_FLAGS_CONF_FLUSH) != 0) { /* Clean D-cache under configuration byte */ cpu_dcache_wb_range((vm_offset_t)conf_byte, sizeof(*conf_byte)); } else { /* DSB inner shareable, store */ dsb(ishst); } its_cmd_inv(sc, its_dev, lpinum); } static void lpi_map_to_device(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint32_t id, uint32_t pid) { if ((pid < its_dev->lpis.lpi_base) || (pid >= (its_dev->lpis.lpi_base + its_dev->lpis.lpi_num))) panic("Trying to map ivalid LPI %u for the device\n", pid); its_cmd_mapvi(sc, its_dev, id, pid); } static void lpi_xmask_irq(device_t parent, uint32_t irq, boolean_t unmask) { struct its_dev *its_dev; TAILQ_FOREACH(its_dev, &its_sc->its_dev_list, entry) { if (irq >= its_dev->lpis.lpi_base && irq < (its_dev->lpis.lpi_base + its_dev->lpis.lpi_num)) { lpi_configure(its_sc, its_dev, irq, unmask); return; } } panic("Trying to %s not existing LPI: %u\n", (unmask == TRUE) ? "unmask" : "mask", irq); } void lpi_unmask_irq(device_t parent, uint32_t irq) { lpi_xmask_irq(parent, irq, 1); } void lpi_mask_irq(device_t parent, uint32_t irq) { lpi_xmask_irq(parent, irq, 0); } /* * Commands handling. */ static __inline void cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) { /* Command field: DW0 [7:0] */ cmd->cmd_dword[0] &= ~CMD_COMMAND_MASK; cmd->cmd_dword[0] |= cmd_type; } static __inline void cmd_format_devid(struct its_cmd *cmd, uint32_t devid) { /* Device ID field: DW0 [63:32] */ cmd->cmd_dword[0] &= ~CMD_DEVID_MASK; cmd->cmd_dword[0] |= ((uint64_t)devid << CMD_DEVID_SHIFT); } static __inline void cmd_format_size(struct its_cmd *cmd, uint16_t size) { /* Size field: DW1 [4:0] */ cmd->cmd_dword[1] &= ~CMD_SIZE_MASK; cmd->cmd_dword[1] |= (size & CMD_SIZE_MASK); } static __inline void cmd_format_id(struct its_cmd *cmd, uint32_t id) { /* ID field: DW1 [31:0] */ cmd->cmd_dword[1] &= ~CMD_ID_MASK; cmd->cmd_dword[1] |= id; } static __inline void cmd_format_pid(struct its_cmd *cmd, uint32_t pid) { /* Physical ID field: DW1 [63:32] */ cmd->cmd_dword[1] &= ~CMD_PID_MASK; cmd->cmd_dword[1] |= ((uint64_t)pid << CMD_PID_SHIFT); } static __inline void cmd_format_col(struct its_cmd *cmd, uint16_t col_id) { /* Collection field: DW2 [16:0] */ cmd->cmd_dword[2] &= ~CMD_COL_MASK; cmd->cmd_dword[2] |= col_id; } static __inline void cmd_format_target(struct its_cmd *cmd, uint64_t target) { /* Target Address field: DW2 [47:16] */ cmd->cmd_dword[2] &= ~CMD_TARGET_MASK; cmd->cmd_dword[2] |= (target & CMD_TARGET_MASK); } static __inline void cmd_format_itt(struct its_cmd *cmd, uint64_t itt) { /* ITT Address field: DW2 [47:8] */ cmd->cmd_dword[2] &= ~CMD_ITT_MASK; cmd->cmd_dword[2] |= (itt & CMD_ITT_MASK); } static __inline void cmd_format_valid(struct its_cmd *cmd, uint8_t valid) { /* Valid field: DW2 [63] */ cmd->cmd_dword[2] &= ~CMD_VALID_MASK; cmd->cmd_dword[2] |= ((uint64_t)valid << CMD_VALID_SHIFT); } static __inline void cmd_fix_endian(struct its_cmd *cmd) { size_t i; for (i = 0; i < nitems(cmd->cmd_dword); i++) cmd->cmd_dword[i] = htole64(cmd->cmd_dword[i]); } static void its_cmd_mapc(struct gic_v3_its_softc *sc, struct its_col *col, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPC; desc.cmd_desc_mapc.col = col; /* * Valid bit set - map the collection. * Valid bit cleared - unmap the collection. */ desc.cmd_desc_mapc.valid = valid; its_cmd_send(sc, &desc); } static void its_cmd_mapvi(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint32_t id, uint32_t pid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPVI; desc.cmd_desc_mapvi.its_dev = its_dev; desc.cmd_desc_mapvi.id = id; desc.cmd_desc_mapvi.pid = pid; its_cmd_send(sc, &desc); } static void __unused its_cmd_mapi(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint32_t lpinum) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPI; desc.cmd_desc_mapi.its_dev = its_dev; desc.cmd_desc_mapi.lpinum = lpinum; its_cmd_send(sc, &desc); } static void its_cmd_mapd(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint8_t valid) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_MAPD; desc.cmd_desc_mapd.its_dev = its_dev; desc.cmd_desc_mapd.valid = valid; its_cmd_send(sc, &desc); } static void its_cmd_inv(struct gic_v3_its_softc *sc, struct its_dev *its_dev, uint32_t lpinum) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_INV; desc.cmd_desc_inv.lpinum = lpinum - its_dev->lpis.lpi_base; desc.cmd_desc_inv.its_dev = its_dev; its_cmd_send(sc, &desc); } static void its_cmd_invall(struct gic_v3_its_softc *sc, struct its_col *col) { struct its_cmd_desc desc; desc.cmd_type = ITS_CMD_INVALL; desc.cmd_desc_invall.col = col; its_cmd_send(sc, &desc); } /* * Helper routines for commands processing. */ static __inline boolean_t its_cmd_queue_full(struct gic_v3_its_softc *sc) { size_t read_idx, write_idx; write_idx = (size_t)(sc->its_cmdq_write - sc->its_cmdq_base); read_idx = gic_its_read(sc, 4, GITS_CREADR) / sizeof(struct its_cmd); /* * The queue is full when the write offset points * at the command before the current read offset. */ if (((write_idx + 1) % ITS_CMDQ_NENTRIES) == read_idx) return (TRUE); return (FALSE); } static __inline void its_cmd_sync(struct gic_v3_its_softc *sc, struct its_cmd *cmd) { if ((sc->its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { /* Clean D-cache under command. */ cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); } else { /* DSB inner shareable, store */ dsb(ishst); } } static struct its_cmd * its_cmd_alloc_locked(struct gic_v3_its_softc *sc) { struct its_cmd *cmd; size_t us_left; /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete (and therefore free the descriptor) * are not known. */ us_left = 1000000; mtx_assert(&sc->its_spin_mtx, MA_OWNED); while (its_cmd_queue_full(sc)) { if (us_left-- == 0) { /* Timeout while waiting for free command */ device_printf(sc->dev, "Timeout while waiting for free command\n"); return (NULL); } DELAY(1); } cmd = sc->its_cmdq_write; sc->its_cmdq_write++; if (sc->its_cmdq_write == (sc->its_cmdq_base + ITS_CMDQ_NENTRIES)) { /* Wrap the queue */ sc->its_cmdq_write = sc->its_cmdq_base; } return (cmd); } static uint64_t its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) { uint64_t target; uint8_t cmd_type; u_int size; boolean_t error; error = FALSE; cmd_type = desc->cmd_type; target = ITS_TARGET_NONE; switch (cmd_type) { case ITS_CMD_SYNC: /* Wait for previous commands completion */ target = desc->cmd_desc_sync.col->col_target; cmd_format_command(cmd, ITS_CMD_SYNC); cmd_format_target(cmd, target); break; case ITS_CMD_MAPD: /* Assign ITT to device */ target = desc->cmd_desc_mapd.its_dev->col->col_target; cmd_format_command(cmd, ITS_CMD_MAPD); cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); /* * Size describes number of bits to encode interrupt IDs * supported by the device minus one. * When V (valid) bit is zero, this field should be written * as zero. */ if (desc->cmd_desc_mapd.valid != 0) { size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); size = MAX(1, size) - 1; } else size = 0; cmd_format_size(cmd, size); cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); break; case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ target = desc->cmd_desc_mapc.col->col_target; cmd_format_command(cmd, ITS_CMD_MAPC); cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); cmd_format_target(cmd, target); break; case ITS_CMD_MAPVI: target = desc->cmd_desc_mapvi.its_dev->col->col_target; cmd_format_command(cmd, ITS_CMD_MAPVI); cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapvi.id); cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); cmd_format_col(cmd, desc->cmd_desc_mapvi.its_dev->col->col_id); break; case ITS_CMD_MAPI: target = desc->cmd_desc_mapi.its_dev->col->col_target; cmd_format_command(cmd, ITS_CMD_MAPI); cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_mapi.lpinum); cmd_format_col(cmd, desc->cmd_desc_mapi.its_dev->col->col_id); break; case ITS_CMD_INV: target = desc->cmd_desc_inv.its_dev->col->col_target; cmd_format_command(cmd, ITS_CMD_INV); cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); cmd_format_id(cmd, desc->cmd_desc_inv.lpinum); break; case ITS_CMD_INVALL: cmd_format_command(cmd, ITS_CMD_INVALL); cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); break; default: error = TRUE; break; } if (!error) cmd_fix_endian(cmd); return (target); } static __inline uint64_t its_cmd_cwriter_offset(struct gic_v3_its_softc *sc, struct its_cmd *cmd) { uint64_t off; off = (cmd - sc->its_cmdq_base) * sizeof(*cmd); return (off); } static void its_cmd_wait_completion(struct gic_v3_its_softc *sc, struct its_cmd *cmd_first, struct its_cmd *cmd_last) { uint64_t first, last, read; size_t us_left; /* * XXX ARM64TODO: This is obviously a significant delay. * The reason for that is that currently the time frames for * the command to complete are not known. */ us_left = 1000000; first = its_cmd_cwriter_offset(sc, cmd_first); last = its_cmd_cwriter_offset(sc, cmd_last); for (;;) { read = gic_its_read(sc, 8, GITS_CREADR); if (read < first || read >= last) break; if (us_left-- == 0) { /* This means timeout */ device_printf(sc->dev, "Timeout while waiting for CMD completion.\n"); return; } DELAY(1); } } static int its_cmd_send(struct gic_v3_its_softc *sc, struct its_cmd_desc *desc) { struct its_cmd *cmd, *cmd_sync; struct its_col col_sync; struct its_cmd_desc desc_sync; uint64_t target, cwriter; mtx_lock_spin(&sc->its_spin_mtx); cmd = its_cmd_alloc_locked(sc); mtx_unlock_spin(&sc->its_spin_mtx); if (cmd == NULL) { device_printf(sc->dev, "could not allocate ITS command\n"); return (EBUSY); } target = its_cmd_prepare(cmd, desc); its_cmd_sync(sc, cmd); if (target != ITS_TARGET_NONE) { mtx_lock_spin(&sc->its_spin_mtx); cmd_sync = its_cmd_alloc_locked(sc); mtx_unlock_spin(&sc->its_spin_mtx); if (cmd_sync == NULL) goto end; desc_sync.cmd_type = ITS_CMD_SYNC; col_sync.col_target = target; desc_sync.cmd_desc_sync.col = &col_sync; its_cmd_prepare(cmd_sync, &desc_sync); its_cmd_sync(sc, cmd_sync); } end: /* Update GITS_CWRITER */ mtx_lock_spin(&sc->its_spin_mtx); cwriter = its_cmd_cwriter_offset(sc, sc->its_cmdq_write); gic_its_write(sc, 8, GITS_CWRITER, cwriter); mtx_unlock_spin(&sc->its_spin_mtx); its_cmd_wait_completion(sc, cmd, sc->its_cmdq_write); return (0); } static struct its_dev * its_device_find_locked(struct gic_v3_its_softc *sc, device_t pci_dev) { struct its_dev *its_dev; mtx_assert(&sc->its_mtx, MA_OWNED); /* Find existing device if any */ TAILQ_FOREACH(its_dev, &sc->its_dev_list, entry) { if (its_dev->pci_dev == pci_dev) return (its_dev); } return (NULL); } static struct its_dev * its_device_alloc_locked(struct gic_v3_its_softc *sc, device_t pci_dev, u_int nvecs) { struct its_dev *newdev; uint64_t typer; uint32_t devid; u_int cpuid; size_t esize; mtx_assert(&sc->its_mtx, MA_OWNED); /* Find existing device if any */ newdev = its_device_find_locked(sc, pci_dev); if (newdev != NULL) return (newdev); devid = its_get_devid(pci_dev); /* There was no previously created device. Create one now */ newdev = malloc(sizeof(*newdev), M_GIC_V3_ITS, (M_NOWAIT | M_ZERO)); if (newdev == NULL) return (NULL); newdev->pci_dev = pci_dev; newdev->devid = devid; if (lpi_alloc_chunk(sc, &newdev->lpis, nvecs) != 0) { free(newdev, M_GIC_V3_ITS); return (NULL); } /* Get ITT entry size */ typer = gic_its_read(sc, 8, GITS_TYPER); esize = GITS_TYPER_ITTES(typer); /* * Allocate ITT for this device. * PA has to be 256 B aligned. At least two entries for device. */ newdev->itt = (vm_offset_t)contigmalloc( roundup2(roundup2(nvecs, 2) * esize, 0x100), M_GIC_V3_ITS, (M_NOWAIT | M_ZERO), 0, ~0UL, 0x100, 0); if (newdev->itt == 0) { lpi_free_chunk(sc, &newdev->lpis); free(newdev, M_GIC_V3_ITS); return (NULL); } /* * XXX ARM64TODO: Currently all interrupts are going * to be bound to the CPU that performs the configuration. */ cpuid = PCPU_GET(cpuid); - newdev->col = &sc->its_cols[cpuid]; + newdev->col = sc->its_cols[cpuid]; TAILQ_INSERT_TAIL(&sc->its_dev_list, newdev, entry); /* Map device to its ITT */ its_cmd_mapd(sc, newdev, 1); return (newdev); } static __inline void its_device_asign_lpi_locked(struct gic_v3_its_softc *sc, struct its_dev *its_dev, u_int *irq) { mtx_assert(&sc->its_mtx, MA_OWNED); if (its_dev->lpis.lpi_free == 0) { panic("Requesting more LPIs than allocated for this device. " "LPI num: %u, free %u", its_dev->lpis.lpi_num, its_dev->lpis.lpi_free); } *irq = its_dev->lpis.lpi_base + (its_dev->lpis.lpi_num - its_dev->lpis.lpi_free); its_dev->lpis.lpi_free--; } /* * ITS quirks. * Add vendor specific PCI devid function here. */ static uint32_t its_get_devid_thunder(device_t pci_dev) { int bsf; int pem; uint32_t bus; bus = pci_get_bus(pci_dev); bsf = PCI_RID(pci_get_bus(pci_dev), pci_get_slot(pci_dev), pci_get_function(pci_dev)); /* ECAM is on bus=0 */ if (bus == 0) { return ((pci_get_domain(pci_dev) << PCI_RID_DOMAIN_SHIFT) | bsf); /* PEM otherwise */ } else { /* PEM (PCIe MAC/root complex) number is equal to domain */ pem = pci_get_domain(pci_dev); /* * Set appropriate device ID (passed by the HW along with * the transaction to memory) for different root complex * numbers using hard-coded domain portion for each group. */ if (pem < 3) return ((0x1 << PCI_RID_DOMAIN_SHIFT) | bsf); if (pem < 6) return ((0x3 << PCI_RID_DOMAIN_SHIFT) | bsf); if (pem < 9) return ((0x9 << PCI_RID_DOMAIN_SHIFT) | bsf); if (pem < 12) return ((0xB << PCI_RID_DOMAIN_SHIFT) | bsf); } return (0); } static uint32_t its_get_devbits_thunder(device_t dev) { uint32_t devid_bits; /* * GITS_TYPER[17:13] of ThunderX reports that device IDs * are to be 21 bits in length. * The entry size of the ITS table can be read from GITS_BASERn[52:48] * and on ThunderX is supposed to be 8 bytes in length (for device * table). Finally the page size that is to be used by ITS to access * this table will be set to 64KB. * * This gives 0x200000 entries of size 0x8 bytes covered by 256 pages * each of which 64KB in size. The number of pages (minus 1) should * then be written to GITS_BASERn[7:0]. In that case this value would * be 0xFF but on ThunderX the maximum value that HW accepts is 0xFD. * * Set arbitrary number of device ID bits to 20 in order to limit * the number of entries in ITS device table to 0x100000 and hence * the table size to 8MB. */ devid_bits = 20; if (bootverbose) { device_printf(dev, "Limiting number of Device ID bits implemented to %d\n", devid_bits); } return (devid_bits); } static __inline uint32_t its_get_devbits_default(device_t dev) { uint64_t gits_typer; struct gic_v3_its_softc *sc; sc = device_get_softc(dev); gits_typer = gic_its_read(sc, 8, GITS_TYPER); return (GITS_TYPER_DEVB(gits_typer)); } static uint32_t its_get_devbits(device_t dev) { const struct its_quirks *quirk; size_t i; for (i = 0; i < nitems(its_quirks); i++) { quirk = &its_quirks[i]; if (CPU_MATCH_RAW(quirk->cpuid_mask, quirk->cpuid)) { if (quirk->devbits_func != NULL) return ((*quirk->devbits_func)(dev)); } } return (its_get_devbits_default(dev)); } static __inline uint32_t its_get_devid_default(device_t pci_dev) { return (PCI_DEVID_GENERIC(pci_dev)); } static uint32_t its_get_devid(device_t pci_dev) { const struct its_quirks *quirk; size_t i; for (i = 0; i < nitems(its_quirks); i++) { quirk = &its_quirks[i]; if (CPU_MATCH_RAW(quirk->cpuid_mask, quirk->cpuid)) { if (quirk->devid_func != NULL) return ((*quirk->devid_func)(pci_dev)); } } return (its_get_devid_default(pci_dev)); } /* * Message signalled interrupts handling. */ /* * XXX ARM64TODO: Watch out for "irq" type. * * In theory GIC can handle up to (2^32 - 1) interrupt IDs whereas * we pass "irq" pointer of type integer. This is obviously wrong but * is determined by the way as PCI layer wants it to be done. */ int gic_v3_its_alloc_msix(device_t dev, device_t pci_dev, int *irq) { struct gic_v3_its_softc *sc; struct its_dev *its_dev; u_int nvecs; sc = device_get_softc(dev); mtx_lock(&sc->its_mtx); nvecs = PCI_MSIX_NUM(pci_dev); /* * Allocate device as seen by ITS if not already available. * Notice that MSI-X interrupts are allocated on one-by-one basis. */ its_dev = its_device_alloc_locked(sc, pci_dev, nvecs); if (its_dev == NULL) { mtx_unlock(&sc->its_mtx); return (ENOMEM); } its_device_asign_lpi_locked(sc, its_dev, irq); mtx_unlock(&sc->its_mtx); return (0); } int gic_v3_its_alloc_msi(device_t dev, device_t pci_dev, int count, int *irqs) { struct gic_v3_its_softc *sc; struct its_dev *its_dev; sc = device_get_softc(dev); /* Allocate device as seen by ITS if not already available. */ mtx_lock(&sc->its_mtx); its_dev = its_device_alloc_locked(sc, pci_dev, count); if (its_dev == NULL) { mtx_unlock(&sc->its_mtx); return (ENOMEM); } for (; count > 0; count--) { its_device_asign_lpi_locked(sc, its_dev, irqs); irqs++; } mtx_unlock(&sc->its_mtx); return (0); } int gic_v3_its_map_msix(device_t dev, device_t pci_dev, int irq, uint64_t *addr, uint32_t *data) { struct gic_v3_its_softc *sc; bus_space_handle_t its_bsh; struct its_dev *its_dev; uint64_t its_pa; uint32_t id; sc = device_get_softc(dev); /* Verify that this device is allocated and owns this LPI */ mtx_lock(&sc->its_mtx); its_dev = its_device_find_locked(sc, pci_dev); mtx_unlock(&sc->its_mtx); if (its_dev == NULL) return (EINVAL); id = irq - its_dev->lpis.lpi_base; lpi_map_to_device(sc, its_dev, id, irq); its_bsh = rman_get_bushandle(&sc->its_res[0]); its_pa = vtophys(its_bsh); *addr = (its_pa + GITS_TRANSLATER); *data = id; return (0); } Index: head/sys/arm64/arm64/gic_v3_reg.h =================================================================== --- head/sys/arm64/arm64/gic_v3_reg.h (revision 286918) +++ head/sys/arm64/arm64/gic_v3_reg.h (revision 286919) @@ -1,413 +1,419 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GIC_V3_REG_H_ #define _GIC_V3_REG_H_ /* * Maximum number of interrupts * supported by GIC (including SGIs, PPIs and SPIs) */ #define GIC_I_NUM_MAX (1020) /* * Priority MAX/MIN values */ #define GIC_PRIORITY_MAX (0x00UL) /* Upper value is determined by LPI max priority */ #define GIC_PRIORITY_MIN (0xFCUL) /* Numbers for software generated interrupts */ #define GIC_FIRST_SGI (0) #define GIC_LAST_SGI (15) /* Numbers for private peripheral interrupts */ #define GIC_FIRST_PPI (16) #define GIC_LAST_PPI (31) /* Numbers for spared peripheral interrupts */ #define GIC_FIRST_SPI (32) #define GIC_LAST_SPI (1019) /* Numbers for local peripheral interrupts */ #define GIC_FIRST_LPI (8192) /* * Registers (v2/v3) */ #define GICD_CTLR (0x0000) #define GICD_CTLR_G1 (1 << 0) #define GICD_CTLR_G1A (1 << 1) #define GICD_CTLR_ARE_NS (1 << 4) #define GICD_CTLR_RWP (1 << 31) #define GICD_TYPER (0x0004) #define GICD_TYPER_IDBITS(n) ((((n) >> 19) & 0x1F) + 1) #define GICD_TYPER_I_NUM(n) ((((n) & 0xF1) + 1) * 32) #define GICD_ISENABLER(n) (0x0100 + (((n) >> 5) * 4)) #define GICD_I_PER_ISENABLERn (32) #define GICD_ICENABLER(n) (0x0180 + (((n) >> 5) * 4)) #define GICD_IPRIORITYR(n) (0x0400 + (((n) >> 2) * 4)) #define GICD_I_PER_IPRIORITYn (4) #define GICD_I_MASK(n) (1 << ((n) % 32)) #define GICD_ICFGR(n) (0x0C00 + (((n) >> 4) * 4)) /* First bit is a polarity bit (0 - low, 1 - high) */ #define GICD_ICFGR_POL_LOW (0 << 0) #define GICD_ICFGR_POL_HIGH (1 << 0) #define GICD_ICFGR_POL_MASK (0x1) /* Second bit is a trigger bit (0 - level, 1 - edge) */ #define GICD_ICFGR_TRIG_LVL (0 << 1) #define GICD_ICFGR_TRIG_EDGE (1 << 1) #define GICD_ICFGR_TRIG_MASK (0x2) #define GICD_I_PER_ICFGRn (16) /* * Registers (v3) */ #define GICD_IROUTER(n) (0x6000 + ((n) * 8)) #define GICD_PIDR2 (0xFFE8) #define GICR_PIDR2_ARCH_MASK (0xF0) #define GICR_PIDR2_ARCH_GICv3 (0x30) #define GICR_PIDR2_ARCH_GICv4 (0x40) /* Redistributor registers */ #define GICR_CTLR GICD_CTLR #define GICR_CTLR_LPI_ENABLE (1 << 0) #define GICR_PIDR2 GICD_PIDR2 #define GICR_TYPER (0x0008) #define GICR_TYPER_PLPIS (1 << 0) #define GICR_TYPER_VLPIS (1 << 1) #define GICR_TYPER_LAST (1 << 4) #define GICR_TYPER_CPUNUM_SHIFT (8) #define GICR_TYPER_CPUNUM_MASK (0xFFFUL << GICR_TYPER_CPUNUM_SHIFT) #define GICR_TYPER_CPUNUM(x) \ (((x) & GICR_TYPER_CPUNUM_MASK) >> GICR_TYPER_CPUNUM_SHIFT) #define GICR_TYPER_AFF_SHIFT (32) #define GICR_WAKER (0x0014) #define GICR_WAKER_PS (1 << 1) /* Processor sleep */ #define GICR_WAKER_CA (1 << 2) /* Children asleep */ #define GICR_PROPBASER (0x0070) #define GICR_PROPBASER_IDBITS_MASK 0x1FUL /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GICR_PROPBASER_CACHE_SHIFT 7 #define GICR_PROPBASER_CACHE_DnGnRnE 0x0UL #define GICR_PROPBASER_CACHE_NIN 0x1UL #define GICR_PROPBASER_CACHE_NIRAWT 0x2UL #define GICR_PROPBASER_CACHE_NIRAWB 0x3UL #define GICR_PROPBASER_CACHE_NIWAWT 0x4UL #define GICR_PROPBASER_CACHE_NIWAWB 0x5UL #define GICR_PROPBASER_CACHE_NIRAWAWT 0x6UL #define GICR_PROPBASER_CACHE_NIRAWAWB 0x7UL /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GICR_PROPBASER_SHARE_SHIFT 10 #define GICR_PROPBASER_SHARE_NS 0x0UL #define GICR_PROPBASER_SHARE_IS 0x1UL #define GICR_PROPBASER_SHARE_OS 0x2UL #define GICR_PROPBASER_SHARE_RES 0x3UL #define GICR_PROPBASER_SHARE_MASK \ (0x3UL << GICR_PROPBASER_SHARE_SHIFT) #define GICR_PENDBASER (0x0078) /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GICR_PENDBASER_CACHE_SHIFT 7 #define GICR_PENDBASER_CACHE_DnGnRnE 0x0UL #define GICR_PENDBASER_CACHE_NIN 0x1UL #define GICR_PENDBASER_CACHE_NIRAWT 0x2UL #define GICR_PENDBASER_CACHE_NIRAWB 0x3UL #define GICR_PENDBASER_CACHE_NIWAWT 0x4UL #define GICR_PENDBASER_CACHE_NIWAWB 0x5UL #define GICR_PENDBASER_CACHE_NIRAWAWT 0x6UL #define GICR_PENDBASER_CACHE_NIRAWAWB 0x7UL /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GICR_PENDBASER_SHARE_SHIFT 10 #define GICR_PENDBASER_SHARE_NS 0x0UL #define GICR_PENDBASER_SHARE_IS 0x1UL #define GICR_PENDBASER_SHARE_OS 0x2UL #define GICR_PENDBASER_SHARE_RES 0x3UL #define GICR_PENDBASER_SHARE_MASK \ (0x3UL << GICR_PENDBASER_SHARE_SHIFT) /* Re-distributor registers for SGIs and PPIs */ #define GICR_RD_BASE_SIZE PAGE_SIZE_64K #define GICR_SGI_BASE_SIZE PAGE_SIZE_64K #define GICR_VLPI_BASE_SIZE PAGE_SIZE_64K #define GICR_RESERVED_SIZE PAGE_SIZE_64K #define GICR_ISENABLER0 (0x0100) #define GICR_ICENABLER0 (0x0180) #define GICR_I_ENABLER_SGI_MASK (0x0000FFFF) #define GICR_I_ENABLER_PPI_MASK (0xFFFF0000) #define GICR_I_PER_IPRIORITYn (GICD_I_PER_IPRIORITYn) /* ITS registers */ #define GITS_PIDR2 GICR_PIDR2 #define GITS_PIDR2_ARCH_MASK GICR_PIDR2_ARCH_MASK #define GITS_PIDR2_ARCH_GICv3 GICR_PIDR2_ARCH_GICv3 #define GITS_PIDR2_ARCH_GICv4 GICR_PIDR2_ARCH_GICv4 #define GITS_CTLR (0x0000) #define GITS_CTLR_EN (1 << 0) #define GITS_CBASER (0x0080) #define GITS_CBASER_VALID (1UL << 63) /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GITS_CBASER_CACHE_SHIFT 59 #define GITS_CBASER_CACHE_DnGnRnE 0x0UL #define GITS_CBASER_CACHE_NIN 0x1UL #define GITS_CBASER_CACHE_NIRAWT 0x2UL #define GITS_CBASER_CACHE_NIRAWB 0x3UL #define GITS_CBASER_CACHE_NIWAWT 0x4UL #define GITS_CBASER_CACHE_NIWAWB 0x5UL #define GITS_CBASER_CACHE_NIRAWAWT 0x6UL #define GITS_CBASER_CACHE_NIRAWAWB 0x7UL #define GITS_CBASER_CACHE_MASK (0x7UL << GITS_CBASER_TYPE_SHIFT) /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GITS_CBASER_SHARE_SHIFT 10 #define GITS_CBASER_SHARE_NS 0x0UL #define GITS_CBASER_SHARE_IS 0x1UL #define GITS_CBASER_SHARE_OS 0x2UL #define GITS_CBASER_SHARE_RES 0x3UL #define GITS_CBASER_SHARE_MASK \ (0x3UL << GITS_CBASER_SHARE_SHIFT) #define GITS_CBASER_PA_SHIFT 12 #define GITS_CBASER_PA_MASK (0xFFFFFFFFFUL << GITS_CBASER_PA_SHIFT) #define GITS_CWRITER (0x0088) #define GITS_CREADR (0x0090) #define GITS_BASER_BASE (0x0100) #define GITS_BASER(x) (GITS_BASER_BASE + (x) * 8) #define GITS_BASER_VALID (1UL << 63) #define GITS_BASER_TYPE_SHIFT 56 #define GITS_BASER_TYPE(x) \ (((x) & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT) #define GITS_BASER_TYPE_UNIMPL 0x0UL /* Unimplemented */ #define GITS_BASER_TYPE_DEV 0x1UL /* Devices */ #define GITS_BASER_TYPE_VP 0x2UL /* Virtual Processors */ #define GITS_BASER_TYPE_PP 0x3UL /* Physical Processors */ #define GITS_BASER_TYPE_IC 0x4UL /* Interrupt Collections */ #define GITS_BASER_TYPE_RES5 0x5UL /* Reserved */ #define GITS_BASER_TYPE_RES6 0x6UL /* Reserved */ #define GITS_BASER_TYPE_RES7 0x7UL /* Reserved */ #define GITS_BASER_TYPE_MASK (0x7UL << GITS_BASER_TYPE_SHIFT) /* * Cacheability * 0x0 - Non-cacheable, non-bufferable * 0x1 - Non-cacheable * 0x2 - Read-allocate, Write-through * 0x3 - Read-allocate, Write-back * 0x4 - Write-allocate, Write-through * 0x5 - Write-allocate, Write-back * 0x6 - Read-allocate, Write-allocate, Write-through * 0x7 - Read-allocate, Write-allocate, Write-back */ #define GITS_BASER_CACHE_SHIFT 59 #define GITS_BASER_CACHE_NCNB 0x0UL #define GITS_BASER_CACHE_NC 0x1UL #define GITS_BASER_CACHE_RAWT 0x2UL #define GITS_BASER_CACHE_RAWB 0x3UL #define GITS_BASER_CACHE_WAWT 0x4UL #define GITS_BASER_CACHE_WAWB 0x5UL #define GITS_BASER_CACHE_RAWAWT 0x6UL #define GITS_BASER_CACHE_RAWAWB 0x7UL #define GITS_BASER_CACHE_MASK (0x7UL << GITS_BASER_CACHE_SHIFT) #define GITS_BASER_ESIZE_SHIFT 48 #define GITS_BASER_ESIZE_MASK (0x1FUL << GITS_BASER_ESIZE_SHIFT) #define GITS_BASER_ESIZE(x) \ ((((x) & GITS_BASER_ESIZE_MASK) >> GITS_BASER_ESIZE_SHIFT) + 1) #define GITS_BASER_PA_SHIFT 12 #define GITS_BASER_PA_MASK (0xFFFFFFFFFUL << GITS_BASER_PA_SHIFT) /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GITS_BASER_SHARE_SHIFT 10 #define GITS_BASER_SHARE_NS 0x0UL #define GITS_BASER_SHARE_IS 0x1UL #define GITS_BASER_SHARE_OS 0x2UL #define GITS_BASER_SHARE_RES 0x3UL #define GITS_BASER_SHARE_MASK (0x3UL << GITS_BASER_SHARE_SHIFT) #define GITS_BASER_PSZ_SHIFT 8 #define GITS_BASER_PSZ_4K 0x0UL #define GITS_BASER_PSZ_16K 0x1UL #define GITS_BASER_PSZ_64K 0x2UL #define GITS_BASER_PSZ_MASK (0x3UL << GITS_BASER_PSZ_SHIFT) #define GITS_BASER_SIZE_MASK 0xFFUL #define GITS_BASER_NUM 8 #define GITS_TYPER (0x0008) #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_DEVB_SHIFT 13 #define GITS_TYPER_DEVB_MASK (0x1FUL << GITS_TYPER_DEVB_SHIFT) /* Number of device identifiers implemented */ #define GITS_TYPER_DEVB(x) \ ((((x) & GITS_TYPER_DEVB_MASK) >> GITS_TYPER_DEVB_SHIFT) + 1) #define GITS_TYPER_ITTES_SHIFT 4 #define GITS_TYPER_ITTES_MASK (0xFUL << GITS_TYPER_ITTES_SHIFT) /* Number of bytes per ITT Entry */ #define GITS_TYPER_ITTES(x) \ ((((x) & GITS_TYPER_ITTES_MASK) >> GITS_TYPER_ITTES_SHIFT) + 1) #define GITS_TRANSLATER (0x10040) /* * LPI related */ #define LPI_CONF_PRIO_MASK (0xFC) #define LPI_CONF_GROUP1 (1 << 1) #define LPI_CONF_ENABLE (1 << 0) /* * CPU interface */ +#define GICI_SGI_TLIST_MASK (0xffffUL) +#define GICI_SGI_AFF1_SHIFT (16UL) +#define GICI_SGI_AFF2_SHIFT (32UL) +#define GICI_SGI_AFF3_SHIFT (48UL) +#define GICI_SGI_IPI_MASK (0xfUL) +#define GICI_SGI_IPI_SHIFT (24UL) /* * Registers list (ICC_xyz_EL1): * * PMR - Priority Mask Register * * interrupts of priority higher than specified * in this mask will be signalled to the CPU. * (0xff - lowest possible prio., 0x00 - highest prio.) * * CTLR - Control Register * * controls behavior of the CPU interface and displays * implemented features. * * IGRPEN1 - Interrupt Group 1 Enable Register * * IAR1 - Interrupt Acknowledge Register Group 1 * * contains number of the highest priority pending * interrupt from the Group 1. * * EOIR1 - End of Interrupt Register Group 1 * * Writes inform CPU interface about completed Group 1 * interrupts processing. */ #define gic_icc_write(reg, val) \ do { \ WRITE_SPECIALREG(ICC_ ##reg ##_EL1, val); \ isb(); \ } while (0) #define gic_icc_read(reg) \ ({ \ uint64_t val; \ \ val = READ_SPECIALREG(ICC_ ##reg ##_EL1); \ (val); \ }) #define gic_icc_set(reg, mask) \ do { \ uint64_t val; \ val = gic_icc_read(reg); \ val |= (mask); \ gic_icc_write(reg, val); \ } while (0) #define gic_icc_clear(reg, mask) \ do { \ uint64_t val; \ val = gic_icc_read(reg); \ val &= ~(mask); \ gic_icc_write(reg, val); \ } while (0) #endif /* _GIC_V3_REG_H_ */ Index: head/sys/arm64/arm64/gic_v3_var.h =================================================================== --- head/sys/arm64/arm64/gic_v3_var.h (revision 286918) +++ head/sys/arm64/arm64/gic_v3_var.h (revision 286919) @@ -1,314 +1,316 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GIC_V3_VAR_H_ #define _GIC_V3_VAR_H_ #define GIC_V3_DEVSTR "ARM Generic Interrupt Controller v3.0" DECLARE_CLASS(gic_v3_driver); #define LPI_FLAGS_CONF_FLUSH (1UL << 0) #define LPI_CONFTAB_SIZE PAGE_SIZE_64K /* 1 bit per LPI + 1 KB more for the obligatory PPI, SGI, SPI stuff */ #define LPI_PENDTAB_SIZE ((LPI_CONFTAB_SIZE / 8) + 0x400) struct redist_lpis { vm_offset_t conf_base; vm_offset_t pend_base[MAXCPU]; uint64_t flags; }; struct gic_redists { /* * Re-Distributor region description. * We will have few of those depending * on the #redistributor-regions property in FDT. */ struct resource ** regions; /* Number of Re-Distributor regions */ u_int nregions; /* Per-CPU Re-Distributor handler */ struct resource * pcpu[MAXCPU]; /* LPIs data */ struct redist_lpis lpis; }; struct gic_v3_softc { device_t dev; struct resource ** gic_res; struct mtx gic_mtx; /* Distributor */ struct resource * gic_dist; /* Re-Distributors */ struct gic_redists gic_redists; u_int gic_nirqs; u_int gic_idbits; boolean_t gic_registered; }; MALLOC_DECLARE(M_GIC_V3); /* Device methods */ int gic_v3_attach(device_t dev); int gic_v3_detach(device_t dev); /* * ITS */ #define GIC_V3_ITS_DEVSTR "ARM GIC Interrupt Translation Service" #define GIC_V3_ITS_COMPSTR "arm,gic-v3-its" DECLARE_CLASS(gic_v3_its_driver); /* LPI chunk owned by ITS device */ struct lpi_chunk { u_int lpi_base; u_int lpi_num; u_int lpi_free; /* First free LPI in set */ }; /* ITS device */ struct its_dev { TAILQ_ENTRY(its_dev) entry; /* PCI device */ device_t pci_dev; /* Device ID (i.e. PCI device ID) */ uint32_t devid; /* List of assigned LPIs */ struct lpi_chunk lpis; /* Virtual address of ITT */ vm_offset_t itt; /* Interrupt collection */ struct its_col * col; }; TAILQ_HEAD(its_dev_list, its_dev); /* ITS private table description */ struct its_ptab { vm_offset_t ptab_vaddr; /* Virtual Address of table */ size_t ptab_pgsz; /* Page size */ size_t ptab_npages; /* Number of pages */ }; /* ITS collection description. */ struct its_col { uint64_t col_target; /* Target Re-Distributor */ uint64_t col_id; /* Collection ID */ }; /* ITS command. Each command is 32 bytes long */ struct its_cmd { uint64_t cmd_dword[4]; /* ITS command double word */ }; /* ITS commands encoding */ #define ITS_CMD_SYNC (0x05) #define ITS_CMD_MAPD (0x08) #define ITS_CMD_MAPC (0x09) #define ITS_CMD_MAPVI (0x0a) #define ITS_CMD_MAPI (0x0b) #define ITS_CMD_INV (0x0c) #define ITS_CMD_INVALL (0x0d) /* Command */ #define CMD_COMMAND_MASK (0xFFUL) /* PCI device ID */ #define CMD_DEVID_SHIFT (32) #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) /* Size of IRQ ID bitfield */ #define CMD_SIZE_MASK (0xFFUL) /* Virtual LPI ID */ #define CMD_ID_MASK (0xFFFFFFFFUL) /* Physical LPI ID */ #define CMD_PID_SHIFT (32) #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) /* Collection */ #define CMD_COL_MASK (0xFFFFUL) /* Target (CPU or Re-Distributor) */ #define CMD_TARGET_SHIFT (16) #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) /* Interrupt Translation Table address */ #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) /* Valid command bit */ #define CMD_VALID_SHIFT (63) #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) /* * ITS command descriptor. * Idea for command description passing taken from Linux. */ struct its_cmd_desc { uint8_t cmd_type; union { struct { struct its_col *col; } cmd_desc_sync; struct { struct its_col *col; uint8_t valid; } cmd_desc_mapc; struct { struct its_dev *its_dev; uint32_t pid; uint32_t id; } cmd_desc_mapvi; struct { struct its_dev *its_dev; uint32_t lpinum; } cmd_desc_mapi; struct { struct its_dev *its_dev; uint8_t valid; } cmd_desc_mapd; struct { struct its_dev *its_dev; uint32_t lpinum; } cmd_desc_inv; struct { struct its_col *col; } cmd_desc_invall; }; }; #define ITS_CMDQ_SIZE PAGE_SIZE_64K #define ITS_CMDQ_NENTRIES (ITS_CMDQ_SIZE / sizeof(struct its_cmd)) #define ITS_FLAGS_CMDQ_FLUSH (1UL << 0) #define ITS_TARGET_NONE 0xFBADBEEF struct gic_v3_its_softc { device_t dev; struct resource * its_res; struct its_cmd * its_cmdq_base; /* ITS command queue base */ struct its_cmd * its_cmdq_write; /* ITS command queue write ptr */ struct its_ptab its_ptabs[GITS_BASER_NUM];/* ITS private tables */ - struct its_col * its_cols; /* Per-CPU collections */ + struct its_col * its_cols[MAXCPU];/* Per-CPU collections */ uint64_t its_flags; struct its_dev_list its_dev_list; unsigned long * its_lpi_bitmap; uint32_t its_lpi_maxid; struct mtx its_mtx; struct mtx its_spin_mtx; }; /* Stuff that is specific to the vendor's implementation */ typedef uint32_t (*its_devbits_func_t)(device_t); typedef uint32_t (*its_devid_func_t)(device_t); struct its_quirks { uint64_t cpuid; uint64_t cpuid_mask; its_devid_func_t devid_func; its_devbits_func_t devbits_func; }; extern devclass_t gic_v3_its_devclass; int gic_v3_its_detach(device_t); int gic_v3_its_alloc_msix(device_t, device_t, int *); int gic_v3_its_alloc_msi(device_t, device_t, int, int *); int gic_v3_its_map_msix(device_t, device_t, int, uint64_t *, uint32_t *); + +int its_init_cpu(struct gic_v3_its_softc *); void lpi_unmask_irq(device_t, uint32_t); void lpi_mask_irq(device_t, uint32_t); /* * GIC Distributor accessors. * Notice that only GIC sofc can be passed. */ #define gic_d_read(sc, len, reg) \ ({ \ bus_read_##len(sc->gic_dist, reg); \ }) #define gic_d_write(sc, len, reg, val) \ ({ \ bus_write_##len(sc->gic_dist, reg, val);\ }) /* GIC Re-Distributor accessors (per-CPU) */ #define gic_r_read(sc, len, reg) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_read_##len( \ sc->gic_redists.pcpu[cpu], \ reg); \ }) #define gic_r_write(sc, len, reg, val) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_write_##len( \ sc->gic_redists.pcpu[cpu], \ reg, val); \ }) #define PCI_DEVID_GENERIC(pci_dev) \ ({ \ ((pci_get_domain(pci_dev) << PCI_RID_DOMAIN_SHIFT) | \ (pci_get_bus(pci_dev) << PCI_RID_BUS_SHIFT) | \ (pci_get_slot(pci_dev) << PCI_RID_SLOT_SHIFT) | \ (pci_get_function(pci_dev) << PCI_RID_FUNC_SHIFT)); \ }) /* * Request number of maximum MSI-X vectors for this device. * Device can ask for less vectors than maximum supported but not more. */ #define PCI_MSIX_NUM(pci_dev) \ ({ \ struct pci_devinfo *dinfo; \ pcicfgregs *cfg; \ \ dinfo = device_get_ivars(pci_dev); \ cfg = &dinfo->cfg; \ \ cfg->msix.msix_msgnum; \ }) #endif /* _GIC_V3_VAR_H_ */ Index: head/sys/arm64/include/cpu.h =================================================================== --- head/sys/arm64/include/cpu.h (revision 286918) +++ head/sys/arm64/include/cpu.h (revision 286919) @@ -1,177 +1,182 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91 * from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29 * $FreeBSD$ */ #ifndef _MACHINE_CPU_H_ #define _MACHINE_CPU_H_ #include #include #include #define TRAPF_PC(tfp) ((tfp)->tf_lr) #define TRAPF_USERMODE(tfp) (((tfp)->tf_elr & (1ul << 63)) == 0) #define cpu_getstack(td) ((td)->td_frame->tf_sp) #define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp)) #define cpu_spinwait() /* nothing */ /* Extract CPU affinity levels 0-3 */ #define CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff) #define CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff) #define CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff) #define CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff) -#define CPU_AFF_MASK 0xff00ffffffUL /* Mask affinity fields in MPIDR_EL1 */ +#define CPU_AFF0_MASK 0xffUL +#define CPU_AFF1_MASK 0xff00UL +#define CPU_AFF2_MASK 0xff0000UL +#define CPU_AFF3_MASK 0xff00000000UL +#define CPU_AFF_MASK (CPU_AFF0_MASK | CPU_AFF1_MASK | \ + CPU_AFF2_MASK| CPU_AFF3_MASK) /* Mask affinity fields in MPIDR_EL1 */ #ifdef _KERNEL #define CPU_IMPL_ARM 0x41 #define CPU_IMPL_BROADCOM 0x42 #define CPU_IMPL_CAVIUM 0x43 #define CPU_IMPL_DEC 0x44 #define CPU_IMPL_INFINEON 0x49 #define CPU_IMPL_FREESCALE 0x4D #define CPU_IMPL_NVIDIA 0x4E #define CPU_IMPL_APM 0x50 #define CPU_IMPL_QUALCOMM 0x51 #define CPU_IMPL_MARVELL 0x56 #define CPU_IMPL_INTEL 0x69 #define CPU_PART_THUNDER 0x0A1 #define CPU_PART_FOUNDATION 0xD00 #define CPU_PART_CORTEX_A53 0xD03 #define CPU_PART_CORTEX_A57 0xD07 #define CPU_REV_THUNDER_1_0 0x00 #define CPU_REV_THUNDER_1_1 0x01 #define CPU_IMPL(midr) (((midr) >> 24) & 0xff) #define CPU_PART(midr) (((midr) >> 4) & 0xfff) #define CPU_VAR(midr) (((midr) >> 20) & 0xf) #define CPU_REV(midr) (((midr) >> 0) & 0xf) #define CPU_IMPL_TO_MIDR(val) (((val) & 0xff) << 24) #define CPU_PART_TO_MIDR(val) (((val) & 0xfff) << 4) #define CPU_VAR_TO_MIDR(val) (((val) & 0xf) << 20) #define CPU_REV_TO_MIDR(val) (((val) & 0xf) << 0) #define CPU_IMPL_MASK (0xff << 24) #define CPU_PART_MASK (0xfff << 4) #define CPU_VAR_MASK (0xf << 20) #define CPU_REV_MASK (0xf << 0) #define CPU_ID_RAW(impl, part, var, rev) \ (CPU_IMPL_TO_MIDR((impl)) | \ CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) | \ CPU_REV_TO_MIDR((rev))) #define CPU_MATCH(mask, impl, part, var, rev) \ (((mask) & PCPU_GET(midr)) == \ ((mask) & CPU_ID_RAW((impl), (part), (var), (rev)))) #define CPU_MATCH_RAW(mask, devid) \ (((mask) & PCPU_GET(midr)) == ((mask) & (devid))) /* * Chip-specific errata. This defines are intended to be * booleans used within if statements. When an appropriate * kernel option is disabled, these defines must be defined * as 0 to allow the compiler to remove a dead code thus * produce better optimized kernel image. */ /* * Vendor: Cavium * Chip: ThunderX * Revision(s): Pass 1.0, Pass 1.1 */ #ifdef THUNDERX_PASS_1_1_ERRATA #define CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 \ (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \ CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, CPU_REV_THUNDER_1_0) || \ CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \ CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, CPU_REV_THUNDER_1_1)) #else #define CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 0 #endif extern char btext[]; extern char etext[]; extern uint64_t __cpu_affinity[]; void cpu_halt(void) __dead2; void cpu_reset(void) __dead2; void fork_trampoline(void); void identify_cpu(void); void swi_vm(void *v); #define CPU_AFFINITY(cpu) __cpu_affinity[(cpu)] static __inline uint64_t get_cyclecount(void) { uint64_t ret; ret = READ_SPECIALREG(cntvct_el0); return (ret); } #define ADDRESS_TRANSLATE_FUNC(stage) \ static inline uint64_t \ arm64_address_translate_ ##stage (uint64_t addr) \ { \ uint64_t ret; \ \ __asm __volatile( \ "at " __STRING(stage) ", %1 \n" \ "mrs %0, par_el1" : "=r"(ret) : "r"(addr)); \ \ return (ret); \ } ADDRESS_TRANSLATE_FUNC(s1e0r) ADDRESS_TRANSLATE_FUNC(s1e0w) ADDRESS_TRANSLATE_FUNC(s1e1r) ADDRESS_TRANSLATE_FUNC(s1e1w) #endif #endif /* !_MACHINE_CPU_H_ */