Index: head/sys/arm64/arm64/gic_v3.c =================================================================== --- head/sys/arm64/arm64/gic_v3.c (revision 301264) +++ head/sys/arm64/arm64/gic_v3.c (revision 301265) @@ -1,1337 +1,1413 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #include "gic_v3_reg.h" #include "gic_v3_var.h" +static bus_read_ivar_t gic_v3_read_ivar; + #ifdef INTRNG static pic_disable_intr_t gic_v3_disable_intr; static pic_enable_intr_t gic_v3_enable_intr; static pic_map_intr_t gic_v3_map_intr; static pic_setup_intr_t gic_v3_setup_intr; static pic_teardown_intr_t gic_v3_teardown_intr; static pic_post_filter_t gic_v3_post_filter; static pic_post_ithread_t gic_v3_post_ithread; static pic_pre_ithread_t gic_v3_pre_ithread; static pic_bind_intr_t gic_v3_bind_intr; #ifdef SMP static pic_init_secondary_t gic_v3_init_secondary; static pic_ipi_send_t gic_v3_ipi_send; static pic_ipi_setup_t gic_v3_ipi_setup; #endif static u_int gic_irq_cpu; #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif #else /* Device and PIC methods */ static int gic_v3_bind(device_t, u_int, u_int); static void gic_v3_dispatch(device_t, struct trapframe *); static void gic_v3_eoi(device_t, u_int); static void gic_v3_mask_irq(device_t, u_int); static void gic_v3_unmask_irq(device_t, u_int); #ifdef SMP static void gic_v3_init_secondary(device_t); static void gic_v3_ipi_send(device_t, cpuset_t, u_int); #endif #endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), + /* Bus interface */ + DEVMETHOD(bus_read_ivar, gic_v3_read_ivar), + #ifdef INTRNG /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gic_v3_disable_intr), DEVMETHOD(pic_enable_intr, gic_v3_enable_intr), DEVMETHOD(pic_map_intr, gic_v3_map_intr), DEVMETHOD(pic_setup_intr, gic_v3_setup_intr), DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr), DEVMETHOD(pic_post_filter, gic_v3_post_filter), DEVMETHOD(pic_post_ithread, gic_v3_post_ithread), DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gic_v3_bind_intr), DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup), #endif #else /* PIC interface */ DEVMETHOD(pic_bind, gic_v3_bind), DEVMETHOD(pic_dispatch, gic_v3_dispatch), DEVMETHOD(pic_eoi, gic_v3_eoi), DEVMETHOD(pic_mask, gic_v3_mask_irq), DEVMETHOD(pic_unmask, gic_v3_unmask_irq), #ifdef SMP DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), #endif #endif /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; #ifdef SMP /* Secondary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_secondary_init[] = { gic_v3_redist_init, gic_v3_cpu_init, NULL }; #endif +#ifdef INTRNG +uint32_t +gic_r_read_4(device_t dev, bus_size_t offset) +{ + struct gic_v3_softc *sc; + + sc = device_get_softc(dev); + return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); +} + +uint64_t +gic_r_read_8(device_t dev, bus_size_t offset) +{ + struct gic_v3_softc *sc; + + sc = device_get_softc(dev); + return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); +} + +void +gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val) +{ + struct gic_v3_softc *sc; + + sc = device_get_softc(dev); + bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); +} + +void +gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val) +{ + struct gic_v3_softc *sc; + + sc = device_get_softc(dev); + bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); +} +#endif + /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; #ifdef INTRNG u_int irq; const char *name; #endif sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; #ifdef INTRNG sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs, M_GIC_V3, M_WAITOK | M_ZERO); name = device_get_nameunit(dev); for (irq = 0; irq < sc->gic_nirqs; irq++) { struct intr_irqsrc *isrc; sc->gic_irqs[irq].gi_irq = irq; sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM; sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &sc->gic_irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { err = intr_isrc_register(isrc, sc->dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (err != 0) { /* XXX call intr_isrc_deregister() */ free(sc->gic_irqs, M_DEVBUF); return (err); } } #endif /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } /* * Full success. * Now register PIC to the interrupts handling layer. */ #ifndef INTRNG arm_register_root_pic(dev, sc->gic_nirqs); sc->gic_registered = TRUE; #endif return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; size_t i; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); for (i = 0; i < mp_ncpus; i++) free(sc->gic_redists.pcpu[i], M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } +static int +gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) +{ + struct gic_v3_softc *sc; + + sc = device_get_softc(dev); + + switch (which) { + case GICV3_IVAR_NIRQS: + *result = sc->gic_nirqs; + return (0); + case GICV3_IVAR_REDIST_VADDR: + *result = (uintptr_t)rman_get_virtual( + sc->gic_redists.pcpu[PCPU_GET(cpuid)]); + return (0); + } + + return (ENOENT); +} + #ifdef INTRNG int arm_gic_v3_intr(void *arg) { struct gic_v3_softc *sc = arg; struct gic_v3_irqsrc *gi; + struct intr_pic *pic; uint64_t active_irq; struct trapframe *tf; bool first; first = true; + pic = sc->gic_pic; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } + if (active_irq >= GIC_FIRST_LPI) { + intr_child_irq_handler(pic, active_irq); + continue; + } + if (__predict_false(active_irq >= sc->gic_nirqs)) return (FILTER_HANDLED); tf = curthread->td_intr_frame; gi = &sc->gic_irqs[active_irq]; if (active_irq <= GIC_LAST_SGI) { /* Call EOI for all IPI before dispatch. */ gic_icc_write(EOIR1, (uint64_t)active_irq); #ifdef SMP intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf); #else device_printf(sc->dev, "SGI %u on UP system detected\n", active_irq - GIC_FIRST_SGI); #endif } else if (active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) { if (gi->gi_pol == INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { if (gi->gi_pol != INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); gic_v3_disable_intr(sc->dev, &gi->gi_isrc); device_printf(sc->dev, "Stray irq %lu disabled\n", active_irq); } } } } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; if (ncells < 3) return (EINVAL); /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = edge triggered * 2 = edge triggered (PPI only) * 4 = level-sensitive * 8 = level-sensitive (PPI only) */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } switch (cells[2] & 0xf) { case 1: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_HIGH; break; case 2: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_LOW; break; case 4: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_HIGH; break; case 8: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_LOW; break; default: device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", cells[2]); return (EINVAL); } /* Check the interrupt is valid */ if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH) return (EINVAL); *irqp = irq; return (0); } #endif static int do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_softc *sc; enum intr_polarity pol; enum intr_trigger trig; #ifdef FDT struct intr_map_data_fdt *daf; #endif u_int irq; sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); break; #endif default: return (EINVAL); } if (irq >= sc->gic_nirqs) return (EINVAL); switch (pol) { case INTR_POLARITY_CONFORM: case INTR_POLARITY_LOW: case INTR_POLARITY_HIGH: break; default: return (EINVAL); } switch (trig) { case INTR_TRIGGER_CONFORM: case INTR_TRIGGER_EDGE: case INTR_TRIGGER_LEVEL: break; default: return (EINVAL); } *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int gic_v3_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct gic_v3_softc *sc; int error; u_int irq; error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } static int gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_softc *sc = device_get_softc(dev); struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; enum intr_trigger trig; enum intr_polarity pol; uint32_t reg; u_int irq; int error; if (data == NULL) return (ENOTSUP); error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig); if (error != 0) return (error); if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM || trig == INTR_TRIGGER_CONFORM) return (EINVAL); /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if (pol != gi->gi_pol || trig != gi->gi_trig) return (EINVAL); else return (0); } gi->gi_pol = pol; gi->gi_trig = trig; /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) { mtx_lock_spin(&sc->gic_mtx); /* Set the trigger and polarity */ if (irq <= GIC_LAST_PPI) reg = gic_r_read(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq)); else reg = gic_d_read(sc, 4, GICD_ICFGR(irq)); if (trig == INTR_TRIGGER_LEVEL) reg &= ~(2 << ((irq % 16) * 2)); else reg |= 2 << ((irq % 16) * 2); if (irq <= GIC_LAST_PPI) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, REDIST); } else { gic_d_write(sc, 4, GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, DIST); } mtx_unlock_spin(&sc->gic_mtx); gic_v3_bind_intr(dev, isrc); } return (0); } static int gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (isrc->isrc_handlers == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; gic_v3_disable_intr(dev, isrc); gic_icc_write(EOIR1, gi->gi_irq); } static void gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc) { gic_v3_enable_intr(dev, isrc); } static void gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_pol == INTR_TRIGGER_EDGE) return; gic_icc_write(EOIR1, gi->gi_irq); } static int gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; int cpu; gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_irq <= GIC_LAST_PPI) return (EINVAL); KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI, ("%s: Attempting to bind an invalid IRQ", __func__)); sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(gic_irq_cpu)); } else { /* * We can only bind to a single CPU so select * the first CPU found. */ cpu = CPU_FFS(&isrc->isrc_cpu) - 1; gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu)); } return (0); } #ifdef SMP static void gic_v3_init_secondary(device_t dev) { + device_t child; struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; struct intr_irqsrc *isrc; u_int cpu, irq; - int err; + int err, i; sc = device_get_softc(dev); cpu = PCPU_GET(cpuid); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", cpu); return; } } /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); } /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); + } + + for (i = 0; i < sc->gic_nchildren; i++) { + child = sc->gic_children[i]; + PIC_INIT_SECONDARY(child); } } static void gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; uint64_t aff, val, irq; int i; #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK) #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK) aff = GIC_AFFINITY(0); irq = gi->gi_irq; val = 0; /* Iterate through all CPUs in set */ for (i = 0; i < mp_ncpus; i++) { /* Move to the next affinity group */ if (aff != GIC_AFFINITY(i)) { /* Send the IPI */ if (val != 0) { gic_icc_write(SGI1R, val); val = 0; } aff = GIC_AFFINITY(i); } /* Send the IPI to this cpu */ if (CPU_ISSET(i, &cpus)) { #define ICC_SGI1R_AFFINITY(aff) \ (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \ ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \ ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT)) /* Set the affinity when the first at this level */ if (val == 0) val = ICC_SGI1R_AFFINITY(aff) | irq << ICC_SGI1R_EL1_SGIID_SHIFT; /* Set the bit to send the IPI to te CPU */ val |= 1 << CPU_AFF0(CPU_AFFINITY(i)); } } /* Send the IPI to the last cpu affinity group */ if (val != 0) gic_icc_write(SGI1R, val); #undef GIC_AFF_MASK #undef GIC_AFFINITY } static int gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct gic_v3_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif /* SMP */ #else /* INTRNG */ /* * PIC interface. */ static int gic_v3_bind(device_t dev, u_int irq, u_int cpuid) { uint64_t aff; struct gic_v3_softc *sc; sc = device_get_softc(dev); if (irq <= GIC_LAST_PPI) { /* Can't bind PPI to another CPU but it's not an error */ return (0); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { aff = CPU_AFFINITY(cpuid); gic_d_write(sc, 4, GICD_IROUTER(irq), aff); return (0); } else if (irq >= GIC_FIRST_LPI) return (lpi_migrate(dev, irq, cpuid)); return (EINVAL); } static void gic_v3_dispatch(device_t dev, struct trapframe *frame) { uint64_t active_irq; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR)) break; if (__predict_true((active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) { arm_dispatch_intr(active_irq, frame); continue; } if (active_irq <= GIC_LAST_SGI) { gic_icc_write(EOIR1, (uint64_t)active_irq); arm_dispatch_intr(active_irq, frame); continue; } } } static void gic_v3_eoi(device_t dev, u_int irq) { gic_icc_write(EOIR1, (uint64_t)irq); } static void gic_v3_mask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_mask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } static void gic_v3_unmask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_unmask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } #ifdef SMP static void gic_v3_init_secondary(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; int err; sc = device_get_softc(dev); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", PCPU_GET(cpuid)); return; } } /* * Try to initialize ITS. * If there is no driver attached this routine will fail but that * does not mean failure here as only LPIs will not be functional * on the current CPU. */ if (its_init_cpu(NULL) != 0) { device_printf(dev, "Could not initialize ITS for CPU%u. " "No LPIs will arrive on this CPU\n", PCPU_GET(cpuid)); } /* * ARM64TODO: Unmask timer PPIs. To be removed when appropriate * mechanism is implemented. * Activate the timer interrupts: virtual (27), secure (29), * and non-secure (30). Use hardcoded values here as there * should be no defines for them. */ gic_v3_unmask_irq(dev, 27); gic_v3_unmask_irq(dev, 29); gic_v3_unmask_irq(dev, 30); } static void gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi) { u_int cpu; uint64_t aff, tlist; uint64_t val; uint64_t aff_mask; /* Set affinity mask to match level 3, 2 and 1 */ aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK; /* Iterate through all CPUs in set */ while (!CPU_EMPTY(&cpuset)) { aff = tlist = 0; for (cpu = 0; cpu < mp_ncpus; cpu++) { /* Compose target list for single AFF3:AFF2:AFF1 set */ if (CPU_ISSET(cpu, &cpuset)) { if (!tlist) { /* * Save affinity of the first CPU to * send IPI to for later comparison. */ aff = CPU_AFFINITY(cpu); tlist |= (1UL << CPU_AFF0(aff)); CPU_CLR(cpu, &cpuset); } /* Check for same Affinity level 3, 2 and 1 */ if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) { tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu))); /* Clear CPU in cpuset from target list */ CPU_CLR(cpu, &cpuset); } } } if (tlist) { KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0, ("Target list too long for GICv3 IPI")); /* Send SGI to CPUs in target list */ val = tlist; val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT; val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT; val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT; val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) << ICC_SGI1R_EL1_SGIID_SHIFT; gic_icc_write(SGI1R, val); } } } #endif #endif /* !INTRNG */ /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; break; case REDIST: res = sc->gic_redists.pcpu[cpuid]; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(0); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 4, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int gic_v3_redist_alloc(struct gic_v3_softc *sc) { u_int cpuid; /* Allocate struct resource for all CPU's Re-Distributor registers */ for (cpuid = 0; cpuid < mp_ncpus; cpuid++) if (CPU_ISSET(cpuid, &all_cpus) != 0) sc->gic_redists.pcpu[cpuid] = malloc(sizeof(*sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); else sc->gic_redists.pcpu[cpuid] = NULL; return (0); } static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource r_res; bus_space_handle_t r_bsh; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = *sc->gic_redists.regions[i]; r_bsh = rman_get_bushandle(&r_res); pidr2 = bus_read_4(&r_res, GICR_PIDR2); switch (pidr2 & GICR_PIDR2_ARCH_MASK) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENODEV); } do { typer = bus_read_8(&r_res, GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ *sc->gic_redists.pcpu[cpuid] = r_res; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { r_bsh += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } rman_set_bushandle(&r_res, r_bsh); } while ((typer & GICR_TYPER_LAST) == 0); } device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } Index: head/sys/arm64/arm64/gic_v3_fdt.c =================================================================== --- head/sys/arm64/arm64/gic_v3_fdt.c (revision 301264) +++ head/sys/arm64/arm64/gic_v3_fdt.c (revision 301265) @@ -1,332 +1,338 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "gic_v3_reg.h" #include "gic_v3_var.h" /* * FDT glue. */ static int gic_v3_fdt_probe(device_t); static int gic_v3_fdt_attach(device_t); static struct resource *gic_v3_ofw_bus_alloc_res(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static const struct ofw_bus_devinfo *gic_v3_ofw_get_devinfo(device_t, device_t); static device_method_t gic_v3_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gic_v3_fdt_probe), DEVMETHOD(device_attach, gic_v3_fdt_attach), /* Bus interface */ DEVMETHOD(bus_alloc_resource, gic_v3_ofw_bus_alloc_res), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, gic_v3_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(gic, gic_v3_fdt_driver, gic_v3_fdt_methods, sizeof(struct gic_v3_softc), gic_v3_driver); static devclass_t gic_v3_fdt_devclass; EARLY_DRIVER_MODULE(gic_v3, simplebus, gic_v3_fdt_driver, gic_v3_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(gic_v3, ofwbus, gic_v3_fdt_driver, gic_v3_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); /* * Helper functions declarations. */ static int gic_v3_ofw_bus_attach(device_t); /* * Device interface. */ static int gic_v3_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "arm,gic-v3")) return (ENXIO); device_set_desc(dev, GIC_V3_DEVSTR); return (BUS_PROBE_DEFAULT); } static int gic_v3_fdt_attach(device_t dev) { struct gic_v3_softc *sc; pcell_t redist_regions; #ifdef INTRNG intptr_t xref; #endif int err; sc = device_get_softc(dev); sc->dev = dev; /* * Recover number of the Re-Distributor regions. */ if (OF_getencprop(ofw_bus_get_node(dev), "#redistributor-regions", &redist_regions, sizeof(redist_regions)) <= 0) sc->gic_redists.nregions = 1; else sc->gic_redists.nregions = redist_regions; err = gic_v3_attach(dev); if (err != 0) goto error; #ifdef INTRNG xref = OF_xref_from_node(ofw_bus_get_node(dev)); - if (intr_pic_register(dev, xref) == NULL) { + sc->gic_pic = intr_pic_register(dev, xref); + if (sc->gic_pic == NULL) { device_printf(dev, "could not register PIC\n"); err = ENXIO; goto error; } if (intr_pic_claim_root(dev, xref, arm_gic_v3_intr, sc, GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) { err = ENXIO; goto error; } #endif /* * Try to register ITS to this GIC. * GIC will act as a bus in that case. * Failure here will not affect main GIC functionality. */ if (gic_v3_ofw_bus_attach(dev) != 0) { if (bootverbose) { device_printf(dev, "Failed to attach ITS to this GIC\n"); } } + +#ifdef INTRNG + if (device_get_children(dev, &sc->gic_children, &sc->gic_nchildren) != 0) + sc->gic_nchildren = 0; +#endif return (err); error: if (bootverbose) { device_printf(dev, "Failed to attach. Error %d\n", err); } /* Failure so free resources */ gic_v3_detach(dev); return (err); } /* OFW bus interface */ struct gic_v3_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; static const struct ofw_bus_devinfo * gic_v3_ofw_get_devinfo(device_t bus __unused, device_t child) { struct gic_v3_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } static struct resource * gic_v3_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gic_v3_ofw_devinfo *di; struct resource_list_entry *rle; int ranges_len; if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type != SYS_RES_MEMORY) return (NULL); /* Find defaults for this rid */ rle = resource_list_find(&di->di_rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } /* * XXX: No ranges remap! * Absolute address is expected. */ if (ofw_bus_has_prop(bus, "ranges")) { ranges_len = OF_getproplen(ofw_bus_get_node(bus), "ranges"); if (ranges_len != 0) { if (bootverbose) { device_printf(child, "Ranges remap not supported\n"); } return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } /* Helper functions */ /* * Bus capability support for GICv3. * Collects and configures device informations and finally * adds ITS device as a child of GICv3 in Newbus hierarchy. */ static int gic_v3_ofw_bus_attach(device_t dev) { struct gic_v3_ofw_devinfo *di; device_t child; phandle_t parent, node; pcell_t addr_cells, size_cells; parent = ofw_bus_get_node(dev); if (parent > 0) { addr_cells = 2; OF_getencprop(parent, "#address-cells", &addr_cells, sizeof(addr_cells)); size_cells = 2; OF_getencprop(parent, "#size-cells", &size_cells, sizeof(size_cells)); /* Iterate through all GIC subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_GIC_V3, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node)) { if (bootverbose) { device_printf(dev, "Could not set up devinfo for ITS\n"); } free(di, M_GIC_V3); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, &di->di_rl); /* Should not have any interrupts, so don't add any */ /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (!child) { if (bootverbose) { device_printf(dev, "Could not add child: %s\n", di->di_dinfo.obd_name); } resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_GIC_V3); continue; } device_set_ivars(child, di); } } return (bus_generic_attach(dev)); } #ifndef INTRNG static int gic_v3_its_fdt_probe(device_t dev); static device_method_t gic_v3_its_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gic_v3_its_fdt_probe), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(its, gic_v3_its_fdt_driver, gic_v3_its_fdt_methods, sizeof(struct gic_v3_its_softc), gic_v3_its_driver); static devclass_t gic_v3_its_fdt_devclass; EARLY_DRIVER_MODULE(its, gic, gic_v3_its_fdt_driver, gic_v3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gic_v3_its_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, GIC_V3_ITS_COMPSTR)) return (ENXIO); device_set_desc(dev, GIC_V3_ITS_DEVSTR); return (BUS_PROBE_DEFAULT); } #endif Index: head/sys/arm64/arm64/gic_v3_var.h =================================================================== --- head/sys/arm64/arm64/gic_v3_var.h (revision 301264) +++ head/sys/arm64/arm64/gic_v3_var.h (revision 301265) @@ -1,349 +1,378 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GIC_V3_VAR_H_ #define _GIC_V3_VAR_H_ #define GIC_V3_DEVSTR "ARM Generic Interrupt Controller v3.0" DECLARE_CLASS(gic_v3_driver); +#ifndef INTRNG #define LPI_FLAGS_CONF_FLUSH (1UL << 0) #define LPI_CONFTAB_SIZE PAGE_SIZE_64K /* 1 bit per LPI + 1 KB more for the obligatory PPI, SGI, SPI stuff */ #define LPI_PENDTAB_SIZE ((LPI_CONFTAB_SIZE / 8) + 0x400) +#endif #ifdef INTRNG struct gic_v3_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; }; #endif struct redist_lpis { vm_offset_t conf_base; vm_offset_t pend_base[MAXCPU]; uint64_t flags; }; struct gic_redists { /* * Re-Distributor region description. * We will have few of those depending * on the #redistributor-regions property in FDT. */ struct resource ** regions; /* Number of Re-Distributor regions */ u_int nregions; /* Per-CPU Re-Distributor handler */ struct resource * pcpu[MAXCPU]; /* LPIs data */ struct redist_lpis lpis; }; struct gic_v3_softc { device_t dev; struct resource ** gic_res; struct mtx gic_mtx; /* Distributor */ struct resource * gic_dist; /* Re-Distributors */ struct gic_redists gic_redists; u_int gic_nirqs; u_int gic_idbits; boolean_t gic_registered; #ifdef INTRNG + int gic_nchildren; + device_t *gic_children; + struct intr_pic *gic_pic; struct gic_v3_irqsrc *gic_irqs; #endif }; #ifdef INTRNG #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc) #endif MALLOC_DECLARE(M_GIC_V3); +/* ivars */ +enum { + GICV3_IVAR_NIRQS, + GICV3_IVAR_REDIST_VADDR, +}; + +__BUS_ACCESSOR(gicv3, nirqs, GICV3, NIRQS, u_int); +__BUS_ACCESSOR(gicv3, redist_vaddr, GICV3, REDIST_VADDR, void *); + /* Device methods */ int gic_v3_attach(device_t dev); int gic_v3_detach(device_t dev); int arm_gic_v3_intr(void *); +#ifdef INTRNG +uint32_t gic_r_read_4(device_t, bus_size_t); +uint64_t gic_r_read_8(device_t, bus_size_t); +void gic_r_write_4(device_t, bus_size_t, uint32_t var); +void gic_r_write_8(device_t, bus_size_t, uint64_t var); +#endif + /* * ITS */ -#define GIC_V3_ITS_DEVSTR "ARM GIC Interrupt Translation Service" -#define GIC_V3_ITS_COMPSTR "arm,gic-v3-its" -DECLARE_CLASS(gic_v3_its_driver); - /* LPI chunk owned by ITS device */ struct lpi_chunk { u_int lpi_base; u_int lpi_free; /* First free LPI in set */ +#ifndef INTRNG u_int *lpi_col_ids; - +#endif u_int lpi_num; /* Total number of LPIs in chunk */ u_int lpi_busy; /* Number of busy LPIs in chink */ }; /* ITS device */ struct its_dev { TAILQ_ENTRY(its_dev) entry; /* PCI device */ device_t pci_dev; /* Device ID (i.e. PCI device ID) */ uint32_t devid; /* List of assigned LPIs */ struct lpi_chunk lpis; /* Virtual address of ITT */ vm_offset_t itt; size_t itt_size; }; +#ifndef INTRNG TAILQ_HEAD(its_dev_list, its_dev); /* ITS private table description */ struct its_ptab { vm_offset_t ptab_vaddr; /* Virtual Address of table */ size_t ptab_pgsz; /* Page size */ size_t ptab_npages; /* Number of pages */ }; /* ITS collection description. */ struct its_col { uint64_t col_target; /* Target Re-Distributor */ uint64_t col_id; /* Collection ID */ }; /* ITS command. Each command is 32 bytes long */ struct its_cmd { uint64_t cmd_dword[4]; /* ITS command double word */ }; +#define GIC_V3_ITS_DEVSTR "ARM GIC Interrupt Translation Service" +#define GIC_V3_ITS_COMPSTR "arm,gic-v3-its" + +DECLARE_CLASS(gic_v3_its_driver); + /* ITS commands encoding */ #define ITS_CMD_MOVI (0x01) #define ITS_CMD_SYNC (0x05) #define ITS_CMD_MAPD (0x08) #define ITS_CMD_MAPC (0x09) #define ITS_CMD_MAPVI (0x0a) #define ITS_CMD_MAPI (0x0b) #define ITS_CMD_INV (0x0c) #define ITS_CMD_INVALL (0x0d) /* Command */ #define CMD_COMMAND_MASK (0xFFUL) /* PCI device ID */ #define CMD_DEVID_SHIFT (32) #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) /* Size of IRQ ID bitfield */ #define CMD_SIZE_MASK (0xFFUL) /* Virtual LPI ID */ #define CMD_ID_MASK (0xFFFFFFFFUL) /* Physical LPI ID */ #define CMD_PID_SHIFT (32) #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) /* Collection */ #define CMD_COL_MASK (0xFFFFUL) /* Target (CPU or Re-Distributor) */ #define CMD_TARGET_SHIFT (16) #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) /* Interrupt Translation Table address */ #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) /* Valid command bit */ #define CMD_VALID_SHIFT (63) #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) +#endif /* INTRNG */ /* * ITS command descriptor. * Idea for command description passing taken from Linux. */ struct its_cmd_desc { uint8_t cmd_type; union { struct { struct its_dev *its_dev; struct its_col *col; uint32_t id; } cmd_desc_movi; struct { struct its_col *col; } cmd_desc_sync; struct { struct its_col *col; uint8_t valid; } cmd_desc_mapc; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; uint32_t id; } cmd_desc_mapvi; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_mapi; struct { struct its_dev *its_dev; uint8_t valid; } cmd_desc_mapd; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_inv; struct { struct its_col *col; } cmd_desc_invall; }; }; +#define ITS_TARGET_NONE 0xFBADBEEF + +#ifndef INTRNG #define ITS_CMDQ_SIZE PAGE_SIZE_64K #define ITS_CMDQ_NENTRIES (ITS_CMDQ_SIZE / sizeof(struct its_cmd)) #define ITS_FLAGS_CMDQ_FLUSH (1UL << 0) -#define ITS_TARGET_NONE 0xFBADBEEF - struct gic_v3_its_softc { device_t dev; struct resource * its_res; struct its_cmd * its_cmdq_base; /* ITS command queue base */ struct its_cmd * its_cmdq_write; /* ITS command queue write ptr */ struct its_ptab its_ptabs[GITS_BASER_NUM];/* ITS private tables */ struct its_col * its_cols[MAXCPU];/* Per-CPU collections */ uint64_t its_flags; +#ifndef INTRNG struct its_dev_list its_dev_list; +#endif bitstr_t * its_lpi_bitmap; uint32_t its_lpi_maxid; struct mtx its_dev_lock; struct mtx its_cmd_lock; uint32_t its_socket; /* Socket number ITS is attached to */ }; /* Stuff that is specific to the vendor's implementation */ typedef uint32_t (*its_devbits_func_t)(device_t); struct its_quirks { uint64_t cpuid; uint64_t cpuid_mask; its_devbits_func_t devbits_func; }; extern devclass_t gic_v3_its_devclass; int gic_v3_its_detach(device_t); int gic_v3_its_alloc_msix(device_t, device_t, int *); int gic_v3_its_release_msix(device_t, device_t, int); int gic_v3_its_alloc_msi(device_t, device_t, int, int *); int gic_v3_its_release_msi(device_t, device_t, int, int *); int gic_v3_its_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); int its_init_cpu(struct gic_v3_its_softc *); int lpi_migrate(device_t, uint32_t, u_int); void lpi_unmask_irq(device_t, uint32_t); void lpi_mask_irq(device_t, uint32_t); +#endif /* * GIC Distributor accessors. * Notice that only GIC sofc can be passed. */ #define gic_d_read(sc, len, reg) \ ({ \ bus_read_##len(sc->gic_dist, reg); \ }) #define gic_d_write(sc, len, reg, val) \ ({ \ bus_write_##len(sc->gic_dist, reg, val);\ }) /* GIC Re-Distributor accessors (per-CPU) */ #define gic_r_read(sc, len, reg) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_read_##len( \ sc->gic_redists.pcpu[cpu], \ reg); \ }) #define gic_r_write(sc, len, reg, val) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_write_##len( \ sc->gic_redists.pcpu[cpu], \ reg, val); \ }) #define PCI_DEVID_GENERIC(pci_dev) \ ({ \ ((pci_get_domain(pci_dev) << PCI_RID_DOMAIN_SHIFT) | \ (pci_get_bus(pci_dev) << PCI_RID_BUS_SHIFT) | \ (pci_get_slot(pci_dev) << PCI_RID_SLOT_SHIFT) | \ (pci_get_function(pci_dev) << PCI_RID_FUNC_SHIFT)); \ }) /* * Request number of maximum MSI-X vectors for this device. * Device can ask for less vectors than maximum supported but not more. */ #define PCI_MSIX_NUM(pci_dev) \ ({ \ struct pci_devinfo *dinfo; \ pcicfgregs *cfg; \ \ dinfo = device_get_ivars(pci_dev); \ cfg = &dinfo->cfg; \ \ cfg->msix.msix_msgnum; \ }) #endif /* _GIC_V3_VAR_H_ */ Index: head/sys/arm64/arm64/gicv3_its.c =================================================================== --- head/sys/arm64/arm64/gicv3_its.c (nonexistent) +++ head/sys/arm64/arm64/gicv3_its.c (revision 301265) @@ -0,0 +1,1585 @@ +/*- + * Copyright (c) 2015-2016 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Andrew Turner under + * the sponsorship of the FreeBSD Foundation. + * + * This software was developed by Semihalf under + * the sponsorship of the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "opt_platform.h" + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#ifdef FDT +#include +#include +#include +#endif +#include +#include + +#include "pcib_if.h" +#include "pic_if.h" +#include "msi_if.h" + +MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", + "ARM GICv3 Interrupt Translation Service"); + +#define LPI_NIRQS (64 * 1024) + +/* The size and alignment of the command circular buffer */ +#define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ +#define ITS_CMDQ_ALIGN (64 * 1024) + +#define LPI_CONFTAB_SIZE LPI_NIRQS +#define LPI_CONFTAB_ALIGN (64 * 1024) +#define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ + +/* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ +#define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) +#define LPI_PENDTAB_ALIGN (64 * 1024) +#define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ + +#define LPI_INT_TRANS_TAB_ALIGN 256 +#define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) + +/* ITS commands encoding */ +#define ITS_CMD_MOVI (0x01) +#define ITS_CMD_SYNC (0x05) +#define ITS_CMD_MAPD (0x08) +#define ITS_CMD_MAPC (0x09) +#define ITS_CMD_MAPTI (0x0a) +#define ITS_CMD_MAPI (0x0b) +#define ITS_CMD_INV (0x0c) +#define ITS_CMD_INVALL (0x0d) +/* Command */ +#define CMD_COMMAND_MASK (0xFFUL) +/* PCI device ID */ +#define CMD_DEVID_SHIFT (32) +#define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) +/* Size of IRQ ID bitfield */ +#define CMD_SIZE_MASK (0xFFUL) +/* Virtual LPI ID */ +#define CMD_ID_MASK (0xFFFFFFFFUL) +/* Physical LPI ID */ +#define CMD_PID_SHIFT (32) +#define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) +/* Collection */ +#define CMD_COL_MASK (0xFFFFUL) +/* Target (CPU or Re-Distributor) */ +#define CMD_TARGET_SHIFT (16) +#define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) +/* Interrupt Translation Table address */ +#define CMD_ITT_MASK (0xFFFFFFFFFF00UL) +/* Valid command bit */ +#define CMD_VALID_SHIFT (63) +#define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) + +/* ITS command. Each command is 32 bytes long */ +struct its_cmd { + uint64_t cmd_dword[4]; /* ITS command double word */ +}; + +/* An ITS private table */ +struct its_ptable { + vm_offset_t ptab_vaddr; + unsigned long ptab_size; +}; + +/* ITS collection description. */ +struct its_col { + uint64_t col_target; /* Target Re-Distributor */ + uint64_t col_id; /* Collection ID */ +}; + +struct gicv3_its_irqsrc { + struct intr_irqsrc gi_isrc; + u_int gi_irq; + struct its_dev *gi_its_dev; +}; + +struct gicv3_its_softc { + struct intr_pic *sc_pic; + struct resource *sc_its_res; + + struct its_ptable sc_its_ptab[GITS_BASER_NUM]; + struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ + + /* + * TODO: We should get these from the parent as we only want a + * single copy of each across the interrupt controller. + */ + vm_offset_t sc_conf_base; + vm_offset_t sc_pend_base[MAXCPU]; + + /* Command handling */ + struct mtx sc_its_cmd_lock; + struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ + size_t sc_its_cmd_next_idx; + + vmem_t *sc_irq_alloc; + struct gicv3_its_irqsrc *sc_irqs; + + struct mtx sc_its_dev_lock; + TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; + +#define ITS_FLAGS_CMDQ_FLUSH 0x00000001 +#define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 +#define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 + u_int sc_its_flags; +}; + +typedef void (its_quirk_func_t)(device_t); +static its_quirk_func_t its_quirk_cavium_22375; + +static const struct { + const char *desc; + uint32_t iidr; + uint32_t iidr_mask; + its_quirk_func_t *func; +} its_quirks[] = { + { + /* Cavium ThunderX Pass 1.x */ + .desc = "Cavoum ThunderX errata: 22375, 24313", + .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, + GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), + .iidr_mask = ~GITS_IIDR_REVISION_MASK, + .func = its_quirk_cavium_22375, + }, +}; + +static u_int gic_irq_cpu; + +#define gic_its_read_4(sc, reg) \ + bus_read_4((sc)->sc_its_res, (reg)) +#define gic_its_read_8(sc, reg) \ + bus_read_8((sc)->sc_its_res, (reg)) + +#define gic_its_write_4(sc, reg, val) \ + bus_write_4((sc)->sc_its_res, (reg), (val)) +#define gic_its_write_8(sc, reg, val) \ + bus_write_8((sc)->sc_its_res, (reg), (val)) + +static device_attach_t gicv3_its_attach; +static device_detach_t gicv3_its_detach; + +static pic_disable_intr_t gicv3_its_disable_intr; +static pic_enable_intr_t gicv3_its_enable_intr; +static pic_map_intr_t gicv3_its_map_intr; +static pic_setup_intr_t gicv3_its_setup_intr; +static pic_post_filter_t gicv3_its_post_filter; +static pic_post_ithread_t gicv3_its_post_ithread; +static pic_pre_ithread_t gicv3_its_pre_ithread; +static pic_bind_intr_t gicv3_its_bind_intr; +#ifdef SMP +static pic_init_secondary_t gicv3_its_init_secondary; +#endif +static msi_alloc_msi_t gicv3_its_alloc_msi; +static msi_release_msi_t gicv3_its_release_msi; +static msi_alloc_msix_t gicv3_its_alloc_msix; +static msi_release_msix_t gicv3_its_release_msix; +static msi_map_msi_t gicv3_its_map_msi; + +static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); +static void its_cmd_mapc(device_t, struct its_col *, uint8_t); +static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); +static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); +static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); +static void its_cmd_invall(device_t, struct its_col *); + +static device_method_t gicv3_its_methods[] = { + /* Device interface */ + DEVMETHOD(device_detach, gicv3_its_detach), + + /* Interrupt controller interface */ + DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), + DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), + DEVMETHOD(pic_map_intr, gicv3_its_map_intr), + DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), + DEVMETHOD(pic_post_filter, gicv3_its_post_filter), + DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), + DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), +#ifdef SMP + DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), + DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), +#endif + + /* MSI/MSI-X */ + DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), + DEVMETHOD(msi_release_msi, gicv3_its_release_msi), + DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), + DEVMETHOD(msi_release_msix, gicv3_its_release_msix), + DEVMETHOD(msi_map_msi, gicv3_its_map_msi), + + /* End */ + DEVMETHOD_END +}; + +static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, + sizeof(struct gicv3_its_softc)); + +static void +gicv3_its_cmdq_init(struct gicv3_its_softc *sc) +{ + vm_paddr_t cmd_paddr; + uint64_t reg, tmp; + + /* Set up the command circular buffer */ + sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS, + M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0); + sc->sc_its_cmd_next_idx = 0; + + cmd_paddr = vtophys(sc->sc_its_cmd_base); + + /* Set the base of the command buffer */ + reg = GITS_CBASER_VALID | + (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | + cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | + (ITS_CMDQ_SIZE / 4096 - 1); + gic_its_write_8(sc, GITS_CBASER, reg); + + /* Read back to check for fixed value fields */ + tmp = gic_its_read_8(sc, GITS_CBASER); + + if ((tmp & GITS_CBASER_SHARE_MASK) != + (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { + /* Check if the hardware reported non-shareable */ + if ((tmp & GITS_CBASER_SHARE_MASK) == + (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { + /* If so remove the cache attribute */ + reg &= ~GITS_CBASER_CACHE_MASK; + reg &= ~GITS_CBASER_SHARE_MASK; + /* Set to Non-cacheable, Non-shareable */ + reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; + reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; + + gic_its_write_8(sc, GITS_CBASER, reg); + } + + /* The command queue has to be flushed after each command */ + sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; + } + + /* Get the next command from the start of the buffer */ + gic_its_write_8(sc, GITS_CWRITER, 0x0); +} + +static int +gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) +{ + vm_offset_t table; + vm_paddr_t paddr; + uint64_t cache, reg, share, tmp, type; + size_t esize, its_tbl_size, nidents, nitspages, npages; + int i, page_size; + int devbits; + + if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { + /* + * GITS_TYPER[17:13] of ThunderX reports that device IDs + * are to be 21 bits in length. The entry size of the ITS + * table can be read from GITS_BASERn[52:48] and on ThunderX + * is supposed to be 8 bytes in length (for device table). + * Finally the page size that is to be used by ITS to access + * this table will be set to 64KB. + * + * This gives 0x200000 entries of size 0x8 bytes covered by + * 256 pages each of which 64KB in size. The number of pages + * (minus 1) should then be written to GITS_BASERn[7:0]. In + * that case this value would be 0xFF but on ThunderX the + * maximum value that HW accepts is 0xFD. + * + * Set an arbitrary number of device ID bits to 20 in order + * to limit the number of entries in ITS device table to + * 0x100000 and the table size to 8MB. + */ + devbits = 20; + cache = 0; + } else { + devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); + cache = GITS_BASER_CACHE_WAWB; + } + share = GITS_BASER_SHARE_IS; + page_size = PAGE_SIZE_64K; + + for (i = 0; i < GITS_BASER_NUM; i++) { + reg = gic_its_read_8(sc, GITS_BASER(i)); + /* The type of table */ + type = GITS_BASER_TYPE(reg); + /* The table entry size */ + esize = GITS_BASER_ESIZE(reg); + + switch(type) { + case GITS_BASER_TYPE_DEV: + nidents = (1 << devbits); + its_tbl_size = esize * nidents; + its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K); + break; + case GITS_BASER_TYPE_VP: + case GITS_BASER_TYPE_PP: /* Undocumented? */ + case GITS_BASER_TYPE_IC: + its_tbl_size = page_size; + break; + default: + continue; + } + npages = howmany(its_tbl_size, PAGE_SIZE); + + /* Allocate the table */ + table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE, + M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, + PAGE_SIZE, 0); + + sc->sc_its_ptab[i].ptab_vaddr = table; + sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE; + + paddr = vtophys(table); + + while (1) { + nitspages = howmany(its_tbl_size, page_size); + + /* Clear the fields we will be setting */ + reg &= ~(GITS_BASER_VALID | + GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | + GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK | + GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | + GITS_BASER_SIZE_MASK); + /* Set the new values */ + reg |= GITS_BASER_VALID | + (cache << GITS_BASER_CACHE_SHIFT) | + (type << GITS_BASER_TYPE_SHIFT) | + ((esize - 1) << GITS_BASER_ESIZE_SHIFT) | + paddr | (share << GITS_BASER_SHARE_SHIFT) | + (nitspages - 1); + + switch (page_size) { + case PAGE_SIZE: /* 4KB */ + reg |= + GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; + break; + case PAGE_SIZE_16K: /* 16KB */ + reg |= + GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; + break; + case PAGE_SIZE_64K: /* 64KB */ + reg |= + GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; + break; + } + + gic_its_write_8(sc, GITS_BASER(i), reg); + + /* Read back to check */ + tmp = gic_its_read_8(sc, GITS_BASER(i)); + + /* Do the snareability masks line up? */ + if ((tmp & GITS_BASER_SHARE_MASK) != + (reg & GITS_BASER_SHARE_MASK)) { + share = (tmp & GITS_BASER_SHARE_MASK) >> + GITS_BASER_SHARE_SHIFT; + continue; + } + + if ((tmp & GITS_BASER_PSZ_MASK) != + (reg & GITS_BASER_PSZ_MASK)) { + switch (page_size) { + case PAGE_SIZE_16K: + page_size = PAGE_SIZE; + continue; + case PAGE_SIZE_64K: + page_size = PAGE_SIZE_16K; + continue; + } + } + + if (tmp != reg) { + device_printf(dev, "GITS_BASER%d: " + "unable to be updated: %lx != %lx\n", + i, reg, tmp); + return (ENXIO); + } + + /* We should have made all needed changes */ + break; + } + } + + return (0); +} + +static void +gicv3_its_conftable_init(struct gicv3_its_softc *sc) +{ + + sc->sc_conf_base = (vm_offset_t)contigmalloc(LPI_CONFTAB_SIZE, + M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, LPI_CONFTAB_ALIGN, + 0); + + /* Set the default configuration */ + memset((void *)sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, + LPI_CONFTAB_SIZE); + + /* Flush the table to memory */ + cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE); +} + +static void +gicv3_its_pendtables_init(struct gicv3_its_softc *sc) +{ + int i; + + for (i = 0; i < mp_ncpus; i++) { + if (CPU_ISSET(i, &all_cpus) == 0) + continue; + + sc->sc_pend_base[i] = (vm_offset_t)contigmalloc( + LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, + 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); + + /* Flush so the ITS can see the memory */ + cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base, + LPI_PENDTAB_SIZE); + } +} + +static int +its_init_cpu(device_t dev, struct gicv3_its_softc *sc) +{ + device_t gicv3; + vm_paddr_t target; + uint64_t xbaser, tmp; + uint32_t ctlr; + u_int cpuid; + + gicv3 = device_get_parent(dev); + cpuid = PCPU_GET(cpuid); + + /* Check if the ITS is enabled on this CPU */ + if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) { + return (ENXIO); + } + + /* Disable LPIs */ + ctlr = gic_r_read_4(gicv3, GICR_CTLR); + ctlr &= ~GICR_CTLR_LPI_ENABLE; + gic_r_write_4(gicv3, GICR_CTLR, ctlr); + + /* Make sure changes are observable my the GIC */ + dsb(sy); + + /* + * Set the redistributor base + */ + xbaser = vtophys(sc->sc_conf_base) | + (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | + (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | + (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); + gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); + + /* Check the cache attributes we set */ + tmp = gic_r_read_8(gicv3, GICR_PROPBASER); + + if ((tmp & GICR_PROPBASER_SHARE_MASK) != + (xbaser & GICR_PROPBASER_SHARE_MASK)) { + if ((tmp & GICR_PROPBASER_SHARE_MASK) == + (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { + /* We need to mark as non-cacheable */ + xbaser &= ~(GICR_PROPBASER_SHARE_MASK | + GICR_PROPBASER_CACHE_MASK); + /* Non-cacheable */ + xbaser |= GICR_PROPBASER_CACHE_NIN << + GICR_PROPBASER_CACHE_SHIFT; + /* Non-sareable */ + xbaser |= GICR_PROPBASER_SHARE_NS << + GICR_PROPBASER_SHARE_SHIFT; + gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); + } + sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; + } + + /* + * Set the LPI pending table base + */ + xbaser = vtophys(sc->sc_pend_base[cpuid]) | + (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | + (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); + + gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); + + tmp = gic_r_read_8(gicv3, GICR_PENDBASER); + + if ((tmp & GICR_PENDBASER_SHARE_MASK) == + (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { + /* Clear the cahce and shareability bits */ + xbaser &= ~(GICR_PENDBASER_CACHE_MASK | + GICR_PENDBASER_SHARE_MASK); + /* Mark as non-shareable */ + xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; + /* And non-cacheable */ + xbaser |= GICR_PENDBASER_CACHE_NIN << + GICR_PENDBASER_CACHE_SHIFT; + } + + /* Enable LPIs */ + ctlr = gic_r_read_4(gicv3, GICR_CTLR); + ctlr |= GICR_CTLR_LPI_ENABLE; + gic_r_write_4(gicv3, GICR_CTLR, ctlr); + + /* Make sure the GIC has seen everything */ + dsb(sy); + + if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { + /* This ITS wants the redistributor physical address */ + target = vtophys(gicv3_get_redist_vaddr(dev)); + } else { + /* This ITS wants the unique processor number */ + target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)); + } + + sc->sc_its_cols[cpuid]->col_target = target; + sc->sc_its_cols[cpuid]->col_id = cpuid; + + its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); + its_cmd_invall(dev, sc->sc_its_cols[cpuid]); + + return (0); +} + +static int +gicv3_its_attach(device_t dev) +{ + struct gicv3_its_softc *sc; + const char *name; + uint32_t iidr; + int err, i, rid; + + sc = device_get_softc(dev); + + rid = 0; + sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->sc_its_res == NULL) { + device_printf(dev, "Could not allocate memory\n"); + return (ENXIO); + } + + iidr = gic_its_read_4(sc, GITS_IIDR); + for (i = 0; i < nitems(its_quirks); i++) { + if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { + if (bootverbose) { + device_printf(dev, "Applying %s\n", + its_quirks[i].desc); + } + its_quirks[i].func(dev); + break; + } + } + + /* Allocate the private tables */ + err = gicv3_its_table_init(dev, sc); + if (err != 0) + return (err); + + /* Protects access to the device list */ + mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); + + /* Protects access to the ITS command circular buffer. */ + mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); + + /* Allocate the command circular buffer */ + gicv3_its_cmdq_init(sc); + + /* Allocate the per-CPU collections */ + for (int cpu = 0; cpu < mp_ncpus; cpu++) + if (CPU_ISSET(cpu, &all_cpus) != 0) + sc->sc_its_cols[cpu] = malloc( + sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, + M_WAITOK | M_ZERO); + else + sc->sc_its_cols[cpu] = NULL; + + /* Enable the ITS */ + gic_its_write_4(sc, GITS_CTLR, + gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN); + + /* Create the LPI configuration table */ + gicv3_its_conftable_init(sc); + + /* And the pending tebles */ + gicv3_its_pendtables_init(sc); + + /* Enable LPIs on this CPU */ + its_init_cpu(dev, sc); + + TAILQ_INIT(&sc->sc_its_dev_list); + + /* + * Create the vmem object to allocate IRQs from. We try to use all + * IRQs not already used by the GICv3. + * XXX: This assumes there are no other interrupt controllers in the + * system. + */ + sc->sc_irq_alloc = vmem_create("GICv3 ITS IRQs", 0, + NIRQ - gicv3_get_nirqs(dev), 1, 1, M_FIRSTFIT | M_WAITOK); + + sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * LPI_NIRQS, M_GICV3_ITS, + M_WAITOK | M_ZERO); + name = device_get_nameunit(dev); + for (i = 0; i < LPI_NIRQS; i++) { + sc->sc_irqs[i].gi_irq = i; + err = intr_isrc_register(&sc->sc_irqs[i].gi_isrc, dev, 0, + "%s,%u", name, i); + } + + return (0); +} + +static int +gicv3_its_detach(device_t dev) +{ + + return (ENXIO); +} + +static void +its_quirk_cavium_22375(device_t dev) +{ + struct gicv3_its_softc *sc; + + sc = device_get_softc(dev); + sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; +} + +static void +gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + uint8_t *conf; + + sc = device_get_softc(dev); + girq = (struct gicv3_its_irqsrc *)isrc; + conf = (uint8_t *)sc->sc_conf_base; + + conf[girq->gi_irq] &= ~LPI_CONF_ENABLE; + + if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { + /* Clean D-cache under command. */ + cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1); + } else { + /* DSB inner shareable, store */ + dsb(ishst); + } + + its_cmd_inv(dev, girq->gi_its_dev, girq); +} + +static void +gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + uint8_t *conf; + + sc = device_get_softc(dev); + girq = (struct gicv3_its_irqsrc *)isrc; + conf = (uint8_t *)sc->sc_conf_base; + + conf[girq->gi_irq] |= LPI_CONF_ENABLE; + + if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { + /* Clean D-cache under command. */ + cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1); + } else { + /* DSB inner shareable, store */ + dsb(ishst); + } + + its_cmd_inv(dev, girq->gi_its_dev, girq); +} + +static int +gicv3_its_intr(void *arg, uintptr_t irq) +{ + struct gicv3_its_softc *sc = arg; + struct gicv3_its_irqsrc *girq; + struct trapframe *tf; + + irq -= GIC_FIRST_LPI; + girq = &sc->sc_irqs[irq]; + if (girq == NULL) + panic("gicv3_its_intr: Invalid interrupt %ld", + irq + GIC_FIRST_LPI); + + tf = curthread->td_intr_frame; + intr_isrc_dispatch(&girq->gi_isrc, tf); + return (FILTER_HANDLED); +} + +static void +gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) +{ + struct gicv3_its_irqsrc *girq; + + girq = (struct gicv3_its_irqsrc *)isrc; + gicv3_its_disable_intr(dev, isrc); + gic_icc_write(EOIR1, girq->gi_irq + GIC_FIRST_LPI); +} + +static void +gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) +{ + + gicv3_its_enable_intr(dev, isrc); +} + +static void +gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) +{ + struct gicv3_its_irqsrc *girq; + + girq = (struct gicv3_its_irqsrc *)isrc; + gic_icc_write(EOIR1, girq->gi_irq + GIC_FIRST_LPI); +} + +static int +gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) +{ + struct gicv3_its_irqsrc *girq; + + girq = (struct gicv3_its_irqsrc *)isrc; + if (CPU_EMPTY(&isrc->isrc_cpu)) { + gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); + CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); + } + + its_cmd_movi(dev, girq); + + return (0); +} + +static int +gicv3_its_map_intr(device_t dev, struct intr_map_data *data, + struct intr_irqsrc **isrcp) +{ + + /* + * This should never happen, we only call this function to map + * interrupts found before the controller driver is ready. + */ + panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); +} + +static int +gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, + struct resource *res, struct intr_map_data *data) +{ + + /* Bind the interrupt to a CPU */ + gicv3_its_bind_intr(dev, isrc); + + return (0); +} + +#ifdef SMP +static void +gicv3_its_init_secondary(device_t dev) +{ + struct gicv3_its_softc *sc; + + sc = device_get_softc(dev); + + /* + * This is fatal as otherwise we may bind interrupts to this CPU. + * We need a way to tell the interrupt framework to only bind to a + * subset of given CPUs when it performs the shuffle. + */ + if (its_init_cpu(dev, sc) != 0) + panic("gicv3_its_init_secondary: No usable ITS on CPU%d", + PCPU_GET(cpuid)); +} +#endif + +static uint32_t +its_get_devid(device_t pci_dev) +{ + uintptr_t id; + + if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) + panic("its_get_devid: Unable to get the MSI DeviceID"); + + return (id); +} + +static struct its_dev * +its_device_find(device_t dev, device_t child) +{ + struct gicv3_its_softc *sc; + struct its_dev *its_dev = NULL; + + sc = device_get_softc(dev); + + mtx_lock_spin(&sc->sc_its_dev_lock); + TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { + if (its_dev->pci_dev == child) + break; + } + mtx_unlock_spin(&sc->sc_its_dev_lock); + + return (its_dev); +} + +static struct its_dev * +its_device_get(device_t dev, device_t child, u_int nvecs) +{ + struct gicv3_its_softc *sc; + struct its_dev *its_dev; + vmem_addr_t irq_base; + size_t esize; + + sc = device_get_softc(dev); + + its_dev = its_device_find(dev, child); + if (its_dev != NULL) + return (its_dev); + + its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); + if (its_dev == NULL) + return (NULL); + + its_dev->pci_dev = child; + its_dev->devid = its_get_devid(child); + + its_dev->lpis.lpi_busy = 0; + its_dev->lpis.lpi_num = nvecs; + its_dev->lpis.lpi_free = nvecs; + + if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, + &irq_base) != 0) { + free(its_dev, M_GICV3_ITS); + return (NULL); + } + its_dev->lpis.lpi_base = irq_base; + + /* Get ITT entry size */ + esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); + + /* + * Allocate ITT for this device. + * PA has to be 256 B aligned. At least two entries for device. + */ + its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); + its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size, + M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR, + LPI_INT_TRANS_TAB_ALIGN, 0); + if (its_dev->itt == 0) { + vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); + free(its_dev, M_GICV3_ITS); + return (NULL); + } + + mtx_lock_spin(&sc->sc_its_dev_lock); + TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); + mtx_unlock_spin(&sc->sc_its_dev_lock); + + /* Map device to its ITT */ + its_cmd_mapd(dev, its_dev, 1); + + return (its_dev); +} + +static void +its_device_release(device_t dev, struct its_dev *its_dev) +{ + struct gicv3_its_softc *sc; + + KASSERT(its_dev->lpis.lpi_busy == 0, + ("its_device_release: Trying to release an inuse ITS device")); + + /* Unmap device in ITS */ + its_cmd_mapd(dev, its_dev, 0); + + sc = device_get_softc(dev); + + /* Remove the device from the list of devices */ + mtx_lock_spin(&sc->sc_its_dev_lock); + TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); + mtx_unlock_spin(&sc->sc_its_dev_lock); + + /* Free ITT */ + KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device")); + contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS); + + /* Free the IRQ allocation */ + vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, + its_dev->lpis.lpi_num); + + free(its_dev, M_GICV3_ITS); +} + +static int +gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, + device_t *pic, struct intr_irqsrc **srcs) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + struct its_dev *its_dev; + u_int irq; + int i; + + its_dev = its_device_get(dev, child, count); + if (its_dev == NULL) + return (ENXIO); + + KASSERT(its_dev->lpis.lpi_free >= count, + ("gicv3_its_alloc_msi: No free LPIs")); + sc = device_get_softc(dev); + irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - + its_dev->lpis.lpi_free; + for (i = 0; i < count; i++, irq++) { + its_dev->lpis.lpi_free--; + girq = &sc->sc_irqs[irq]; + girq->gi_its_dev = its_dev; + srcs[i] = (struct intr_irqsrc *)girq; + } + its_dev->lpis.lpi_busy += count; + *pic = dev; + + return (0); +} + +static int +gicv3_its_release_msi(device_t dev, device_t child, int count, + struct intr_irqsrc **isrc) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + struct its_dev *its_dev; + int i; + + sc = device_get_softc(dev); + its_dev = its_device_find(dev, child); + + KASSERT(its_dev != NULL, + ("gicv3_its_release_msi: Releasing a MSI interrupt with " + "no ITS device")); + KASSERT(its_dev->lpis.lpi_busy >= count, + ("gicv3_its_release_msi: Releasing more interrupts than " + "were allocated: releasing %d, allocated %d", count, + its_dev->lpis.lpi_busy)); + for (i = 0; i < count; i++) { + girq = (struct gicv3_its_irqsrc *)isrc[i]; + girq->gi_its_dev = NULL; + } + its_dev->lpis.lpi_busy -= count; + + if (its_dev->lpis.lpi_busy == 0) + its_device_release(dev, its_dev); + + return (0); +} + +static int +gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, + struct intr_irqsrc **isrcp) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + struct its_dev *its_dev; + u_int nvecs, irq; + + nvecs = pci_msix_count(child); + its_dev = its_device_get(dev, child, nvecs); + if (its_dev == NULL) + return (ENXIO); + + KASSERT(its_dev->lpis.lpi_free > 0, + ("gicv3_its_alloc_msix: No free LPIs")); + sc = device_get_softc(dev); + irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - + its_dev->lpis.lpi_free; + its_dev->lpis.lpi_free--; + its_dev->lpis.lpi_busy++; + girq = &sc->sc_irqs[irq]; + girq->gi_its_dev = its_dev; + + *pic = dev; + *isrcp = (struct intr_irqsrc *)girq; + + return (0); +} + +static int +gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + struct its_dev *its_dev; + + sc = device_get_softc(dev); + its_dev = its_device_find(dev, child); + + KASSERT(its_dev != NULL, + ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " + "no ITS device")); + KASSERT(its_dev->lpis.lpi_busy > 0, + ("gicv3_its_release_msix: Releasing more interrupts than " + "were allocated: allocated %d", its_dev->lpis.lpi_busy)); + girq = (struct gicv3_its_irqsrc *)isrc; + girq->gi_its_dev = NULL; + its_dev->lpis.lpi_busy--; + + if (its_dev->lpis.lpi_busy == 0) + its_device_release(dev, its_dev); + + return (0); +} + +static int +gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, + uint64_t *addr, uint32_t *data) +{ + struct gicv3_its_softc *sc; + struct gicv3_its_irqsrc *girq; + + sc = device_get_softc(dev); + girq = (struct gicv3_its_irqsrc *)isrc; + + /* Map the message to the given IRQ */ + its_cmd_mapti(dev, girq); + + *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; + *data = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base; + + return (0); +} + +/* + * Commands handling. + */ + +static __inline void +cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) +{ + /* Command field: DW0 [7:0] */ + cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); + cmd->cmd_dword[0] |= htole64(cmd_type); +} + +static __inline void +cmd_format_devid(struct its_cmd *cmd, uint32_t devid) +{ + /* Device ID field: DW0 [63:32] */ + cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); + cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); +} + +static __inline void +cmd_format_size(struct its_cmd *cmd, uint16_t size) +{ + /* Size field: DW1 [4:0] */ + cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); + cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); +} + +static __inline void +cmd_format_id(struct its_cmd *cmd, uint32_t id) +{ + /* ID field: DW1 [31:0] */ + cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); + cmd->cmd_dword[1] |= htole64(id); +} + +static __inline void +cmd_format_pid(struct its_cmd *cmd, uint32_t pid) +{ + /* Physical ID field: DW1 [63:32] */ + cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); + cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); +} + +static __inline void +cmd_format_col(struct its_cmd *cmd, uint16_t col_id) +{ + /* Collection field: DW2 [16:0] */ + cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); + cmd->cmd_dword[2] |= htole64(col_id); +} + +static __inline void +cmd_format_target(struct its_cmd *cmd, uint64_t target) +{ + /* Target Address field: DW2 [47:16] */ + cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); + cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); +} + +static __inline void +cmd_format_itt(struct its_cmd *cmd, uint64_t itt) +{ + /* ITT Address field: DW2 [47:8] */ + cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); + cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); +} + +static __inline void +cmd_format_valid(struct its_cmd *cmd, uint8_t valid) +{ + /* Valid field: DW2 [63] */ + cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); + cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); +} + +static inline bool +its_cmd_queue_full(struct gicv3_its_softc *sc) +{ + size_t read_idx, next_write_idx; + + /* Get the index of the next command */ + next_write_idx = (sc->sc_its_cmd_next_idx + 1) % + (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); + /* And the index of the current command being read */ + read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); + + /* + * The queue is full when the write offset points + * at the command before the current read offset. + */ + return (next_write_idx == read_idx); +} + +static inline void +its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) +{ + + if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { + /* Clean D-cache under command. */ + cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); + } else { + /* DSB inner shareable, store */ + dsb(ishst); + } + +} + +static inline uint64_t +its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) +{ + uint64_t off; + + off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); + + return (off); +} + +static void +its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, + struct its_cmd *cmd_last) +{ + struct gicv3_its_softc *sc; + uint64_t first, last, read; + size_t us_left; + + sc = device_get_softc(dev); + + /* + * XXX ARM64TODO: This is obviously a significant delay. + * The reason for that is that currently the time frames for + * the command to complete are not known. + */ + us_left = 1000000; + + first = its_cmd_cwriter_offset(sc, cmd_first); + last = its_cmd_cwriter_offset(sc, cmd_last); + + for (;;) { + read = gic_its_read_8(sc, GITS_CREADR); + if (first < last) { + if (read < first || read >= last) + break; + } else if (read < first && read >= last) + break; + + if (us_left-- == 0) { + /* This means timeout */ + device_printf(dev, + "Timeout while waiting for CMD completion.\n"); + return; + } + DELAY(1); + } +} + + +static struct its_cmd * +its_cmd_alloc_locked(device_t dev) +{ + struct gicv3_its_softc *sc; + struct its_cmd *cmd; + size_t us_left; + + sc = device_get_softc(dev); + + /* + * XXX ARM64TODO: This is obviously a significant delay. + * The reason for that is that currently the time frames for + * the command to complete (and therefore free the descriptor) + * are not known. + */ + us_left = 1000000; + + mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); + while (its_cmd_queue_full(sc)) { + if (us_left-- == 0) { + /* Timeout while waiting for free command */ + device_printf(dev, + "Timeout while waiting for free command\n"); + return (NULL); + } + DELAY(1); + } + + cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; + sc->sc_its_cmd_next_idx++; + sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); + + return (cmd); +} + +static uint64_t +its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) +{ + uint64_t target; + uint8_t cmd_type; + u_int size; + boolean_t error; + + error = FALSE; + cmd_type = desc->cmd_type; + target = ITS_TARGET_NONE; + + switch (cmd_type) { + case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ + target = desc->cmd_desc_movi.col->col_target; + cmd_format_command(cmd, ITS_CMD_MOVI); + cmd_format_id(cmd, desc->cmd_desc_movi.id); + cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); + cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); + break; + case ITS_CMD_SYNC: /* Wait for previous commands completion */ + target = desc->cmd_desc_sync.col->col_target; + cmd_format_command(cmd, ITS_CMD_SYNC); + cmd_format_target(cmd, target); + break; + case ITS_CMD_MAPD: /* Assign ITT to device */ + cmd_format_command(cmd, ITS_CMD_MAPD); + cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); + /* + * Size describes number of bits to encode interrupt IDs + * supported by the device minus one. + * When V (valid) bit is zero, this field should be written + * as zero. + */ + if (desc->cmd_desc_mapd.valid != 0) { + size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); + size = MAX(1, size) - 1; + } else + size = 0; + + cmd_format_size(cmd, size); + cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); + cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); + break; + case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ + target = desc->cmd_desc_mapc.col->col_target; + cmd_format_command(cmd, ITS_CMD_MAPC); + cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); + cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); + cmd_format_target(cmd, target); + break; + case ITS_CMD_MAPTI: + target = desc->cmd_desc_mapvi.col->col_target; + cmd_format_command(cmd, ITS_CMD_MAPTI); + cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); + cmd_format_id(cmd, desc->cmd_desc_mapvi.id); + cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); + cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); + break; + case ITS_CMD_MAPI: + target = desc->cmd_desc_mapi.col->col_target; + cmd_format_command(cmd, ITS_CMD_MAPI); + cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); + cmd_format_id(cmd, desc->cmd_desc_mapi.pid); + cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); + break; + case ITS_CMD_INV: + target = desc->cmd_desc_inv.col->col_target; + cmd_format_command(cmd, ITS_CMD_INV); + cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); + cmd_format_id(cmd, desc->cmd_desc_inv.pid); + break; + case ITS_CMD_INVALL: + cmd_format_command(cmd, ITS_CMD_INVALL); + cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); + break; + default: + panic("its_cmd_prepare: Invalid command: %x", cmd_type); + } + + return (target); +} + +static int +its_cmd_send(device_t dev, struct its_cmd_desc *desc) +{ + struct gicv3_its_softc *sc; + struct its_cmd *cmd, *cmd_sync, *cmd_write; + struct its_col col_sync; + struct its_cmd_desc desc_sync; + uint64_t target, cwriter; + + sc = device_get_softc(dev); + mtx_lock_spin(&sc->sc_its_cmd_lock); + cmd = its_cmd_alloc_locked(dev); + if (cmd == NULL) { + device_printf(dev, "could not allocate ITS command\n"); + mtx_unlock_spin(&sc->sc_its_cmd_lock); + return (EBUSY); + } + + target = its_cmd_prepare(cmd, desc); + its_cmd_sync(sc, cmd); + + if (target != ITS_TARGET_NONE) { + cmd_sync = its_cmd_alloc_locked(dev); + if (cmd_sync != NULL) { + desc_sync.cmd_type = ITS_CMD_SYNC; + col_sync.col_target = target; + desc_sync.cmd_desc_sync.col = &col_sync; + its_cmd_prepare(cmd_sync, &desc_sync); + its_cmd_sync(sc, cmd_sync); + } + } + + /* Update GITS_CWRITER */ + cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); + gic_its_write_8(sc, GITS_CWRITER, cwriter); + cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; + mtx_unlock_spin(&sc->sc_its_cmd_lock); + + its_cmd_wait_completion(dev, cmd, cmd_write); + + return (0); +} + +/* Handlers to send commands */ +static void +its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) +{ + struct gicv3_its_softc *sc; + struct its_cmd_desc desc; + struct its_col *col; + + sc = device_get_softc(dev); + col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; + + desc.cmd_type = ITS_CMD_MOVI; + desc.cmd_desc_movi.its_dev = girq->gi_its_dev; + desc.cmd_desc_movi.col = col; + desc.cmd_desc_movi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base; + + its_cmd_send(dev, &desc); +} + +static void +its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) +{ + struct its_cmd_desc desc; + + desc.cmd_type = ITS_CMD_MAPC; + desc.cmd_desc_mapc.col = col; + /* + * Valid bit set - map the collection. + * Valid bit cleared - unmap the collection. + */ + desc.cmd_desc_mapc.valid = valid; + + its_cmd_send(dev, &desc); +} + +static void +its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) +{ + struct gicv3_its_softc *sc; + struct its_cmd_desc desc; + struct its_col *col; + u_int col_id; + + sc = device_get_softc(dev); + + col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; + col = sc->sc_its_cols[col_id]; + + desc.cmd_type = ITS_CMD_MAPTI; + desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; + desc.cmd_desc_mapvi.col = col; + /* The EventID sent to the device */ + desc.cmd_desc_mapvi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base; + /* The physical interrupt presented to softeware */ + desc.cmd_desc_mapvi.pid = girq->gi_irq + GIC_FIRST_LPI; + + its_cmd_send(dev, &desc); +} + +static void +its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) +{ + struct its_cmd_desc desc; + + desc.cmd_type = ITS_CMD_MAPD; + desc.cmd_desc_mapd.its_dev = its_dev; + desc.cmd_desc_mapd.valid = valid; + + its_cmd_send(dev, &desc); +} + +static void +its_cmd_inv(device_t dev, struct its_dev *its_dev, + struct gicv3_its_irqsrc *girq) +{ + struct gicv3_its_softc *sc; + struct its_cmd_desc desc; + struct its_col *col; + + sc = device_get_softc(dev); + col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; + + desc.cmd_type = ITS_CMD_INV; + /* The EventID sent to the device */ + desc.cmd_desc_inv.pid = girq->gi_irq - its_dev->lpis.lpi_base; + desc.cmd_desc_inv.its_dev = its_dev; + desc.cmd_desc_inv.col = col; + + its_cmd_send(dev, &desc); +} + +static void +its_cmd_invall(device_t dev, struct its_col *col) +{ + struct its_cmd_desc desc; + + desc.cmd_type = ITS_CMD_INVALL; + desc.cmd_desc_invall.col = col; + + its_cmd_send(dev, &desc); +} + +#ifdef FDT +static device_probe_t gicv3_its_fdt_probe; +static device_attach_t gicv3_its_fdt_attach; + +static device_method_t gicv3_its_fdt_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, gicv3_its_fdt_probe), + DEVMETHOD(device_attach, gicv3_its_fdt_attach), + + /* End */ + DEVMETHOD_END +}; + +#define its_baseclasses its_fdt_baseclasses +DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, + sizeof(struct gicv3_its_softc), gicv3_its_driver); +#undef its_baseclasses +static devclass_t gicv3_its_fdt_devclass; + +EARLY_DRIVER_MODULE(its, gic, gicv3_its_fdt_driver, + gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); + +static int +gicv3_its_fdt_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) + return (ENXIO); + + device_set_desc(dev, "ARM GIC Interrupt Translation Service"); + return (BUS_PROBE_DEFAULT); +} + +static int +gicv3_its_fdt_attach(device_t dev) +{ + struct gicv3_its_softc *sc; + phandle_t xref; + int err; + + err = gicv3_its_attach(dev); + if (err != 0) + return (err); + + sc = device_get_softc(dev); + + /* Register this device as a interrupt controller */ + xref = OF_xref_from_node(ofw_bus_get_node(dev)); + sc->sc_pic = intr_pic_register(dev, xref); + intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, + gicv3_its_intr, sc, GIC_FIRST_LPI, LPI_NIRQS); + + /* Register this device to handle MSI interrupts */ + intr_msi_register(dev, xref); + + return (0); +} +#endif Property changes on: head/sys/arm64/arm64/gicv3_its.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/arm64/include/intr.h =================================================================== --- head/sys/arm64/include/intr.h (revision 301264) +++ head/sys/arm64/include/intr.h (revision 301265) @@ -1,82 +1,82 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ #ifdef INTRNG #ifdef FDT #include #endif #include #ifndef NIRQ -#define NIRQ 1024 /* XXX - It should be an option. */ +#define NIRQ 2048 /* XXX - It should be an option. */ #endif static inline void arm_irq_memory_barrier(uintptr_t irq) { } #ifdef SMP void intr_ipi_dispatch(u_int, struct trapframe *); #endif #else int intr_irq_config(u_int, enum intr_trigger, enum intr_polarity); void intr_irq_handler(struct trapframe *); int intr_irq_remove_handler(device_t, u_int, void *); void arm_dispatch_intr(u_int, struct trapframe *); int arm_enable_intr(void); void arm_mask_irq(u_int); void arm_register_root_pic(device_t, u_int); void arm_register_msi_pic(device_t); int arm_alloc_msi(device_t, device_t, int, int, int *); int arm_release_msi(device_t, device_t, int, int *); int arm_alloc_msix(device_t, device_t, int *); int arm_release_msix(device_t, device_t, int); int arm_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); int arm_map_msix(device_t, device_t, int, uint64_t *, uint32_t *); int arm_setup_intr(const char *, driver_filter_t *, driver_intr_t, void *, u_int, enum intr_type, void **); void arm_unmask_irq(u_int); #ifdef SMP int intr_irq_bind(u_int, int); void arm_init_secondary(void); void arm_setup_ipihandler(driver_filter_t *, u_int); void arm_unmask_ipi(u_int); #endif #endif #endif /* _MACHINE_INTR_H */ Index: head/sys/conf/files.arm64 =================================================================== --- head/sys/conf/files.arm64 (revision 301264) +++ head/sys/conf/files.arm64 (revision 301265) @@ -1,105 +1,106 @@ # $FreeBSD$ arm/arm/generic_timer.c standard arm/arm/gic.c optional intrng arm/arm/pmu.c standard arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bcopy.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/bzero.S standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/copystr.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c optional kdb arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/gic.c optional !intrng +arm64/arm64/gicv3_its.c optional intrng arm64/arm64/gic_acpi.c optional !intrng acpi arm64/arm64/gic_fdt.c optional !intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/gic_v3_its.c optional !intrng arm64/arm64/identcpu.c standard arm64/arm64/intr_machdep.c optional !intrng arm64/arm64/in_cksum.c optional inet | inet6 arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/mem.c standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pic_if.m optional !intrng arm64/arm64/pmap.c standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/uma_machdep.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci arm64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb dev/acpica/acpi_if.m optional acpi dev/ahci/ahci_generic.c optional ahci fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/mmc/host/dwmmc.c optional dwmmc dev/mmc/host/dwmmc_hisi.c optional dwmmc soc_hisi_hi6220 dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofwpci.c optional fdt pci dev/pci/pci_host_generic.c optional pci fdt dev/psci/psci.c optional psci dev/psci/psci_arm64.S optional psci dev/uart/uart_cpu_fdt.c optional uart fdt dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg soc_hisi_hi6220 dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional vnic fdt dev/vnic/thunder_bgx.c optional vnic pci dev/vnic/thunder_mdio_fdt.c optional vnic fdt dev/vnic/thunder_mdio.c optional vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic kern/kern_clocksource.c standard kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng libkern/bcmp.c standard libkern/ffs.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/memmove.c standard libkern/memset.c standard cddl/contrib/opensolaris/common/atomic/aarch64/opensolaris_atomic.S optional zfs | dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"