Index: stable/11/sys/arm64/arm64/autoconf.c =================================================================== --- stable/11/sys/arm64/arm64/autoconf.c (revision 305135) +++ stable/11/sys/arm64/arm64/autoconf.c (revision 305136) @@ -1,96 +1,92 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Setup the system to run on the current machine. * * Configure() is called at boot time and initializes the vba * device tables and the memory controller monitoring. Available * devices are determined (from possibilities mentioned in ioconf.c), * and the drivers are initialized. */ #include #include #include #include #include #include static void configure_first(void *); static void configure(void *); static void configure_final(void *); SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL); /* SI_ORDER_SECOND is hookable */ SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL); /* SI_ORDER_MIDDLE is hookable */ SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL); /* * Determine i/o configuration for a machine. */ static void configure_first(void *dummy) { /* nexus0 is the top of the device tree */ device_add_child(root_bus, "nexus", 0); } static void configure(void *dummy) { /* initialize new bus architecture */ root_bus_configure(); } static void configure_final(void *dummy) { -#ifdef INTRNG /* Enable interrupt reception on this CPU */ intr_enable(); -#else - arm_enable_intr(); -#endif cninit_finish(); if (bootverbose) printf("Device configuration finished.\n"); cold = 0; } Index: stable/11/sys/arm64/arm64/gic_v3.c =================================================================== --- stable/11/sys/arm64/arm64/gic_v3.c (revision 305135) +++ stable/11/sys/arm64/arm64/gic_v3.c (revision 305136) @@ -1,1417 +1,1167 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #endif #include "pic_if.h" #include "gic_v3_reg.h" #include "gic_v3_var.h" static bus_read_ivar_t gic_v3_read_ivar; -#ifdef INTRNG static pic_disable_intr_t gic_v3_disable_intr; static pic_enable_intr_t gic_v3_enable_intr; static pic_map_intr_t gic_v3_map_intr; static pic_setup_intr_t gic_v3_setup_intr; static pic_teardown_intr_t gic_v3_teardown_intr; static pic_post_filter_t gic_v3_post_filter; static pic_post_ithread_t gic_v3_post_ithread; static pic_pre_ithread_t gic_v3_pre_ithread; static pic_bind_intr_t gic_v3_bind_intr; #ifdef SMP static pic_init_secondary_t gic_v3_init_secondary; static pic_ipi_send_t gic_v3_ipi_send; static pic_ipi_setup_t gic_v3_ipi_setup; #endif static u_int gic_irq_cpu; #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif -#else -/* Device and PIC methods */ -static int gic_v3_bind(device_t, u_int, u_int); -static void gic_v3_dispatch(device_t, struct trapframe *); -static void gic_v3_eoi(device_t, u_int); -static void gic_v3_mask_irq(device_t, u_int); -static void gic_v3_unmask_irq(device_t, u_int); -#ifdef SMP -static void gic_v3_init_secondary(device_t); -static void gic_v3_ipi_send(device_t, cpuset_t, u_int); -#endif -#endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, gic_v3_read_ivar), -#ifdef INTRNG /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gic_v3_disable_intr), DEVMETHOD(pic_enable_intr, gic_v3_enable_intr), DEVMETHOD(pic_map_intr, gic_v3_map_intr), DEVMETHOD(pic_setup_intr, gic_v3_setup_intr), DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr), DEVMETHOD(pic_post_filter, gic_v3_post_filter), DEVMETHOD(pic_post_ithread, gic_v3_post_ithread), DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gic_v3_bind_intr), DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup), #endif -#else - /* PIC interface */ - DEVMETHOD(pic_bind, gic_v3_bind), - DEVMETHOD(pic_dispatch, gic_v3_dispatch), - DEVMETHOD(pic_eoi, gic_v3_eoi), - DEVMETHOD(pic_mask, gic_v3_mask_irq), - DEVMETHOD(pic_unmask, gic_v3_unmask_irq), -#ifdef SMP - DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), - DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), -#endif -#endif /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; #ifdef SMP /* Secondary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_secondary_init[] = { gic_v3_redist_init, gic_v3_cpu_init, NULL }; #endif -#ifdef INTRNG uint32_t gic_r_read_4(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; sc = device_get_softc(dev); return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); } uint64_t gic_r_read_8(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; sc = device_get_softc(dev); return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); } void gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val) { struct gic_v3_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); } void gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val) { struct gic_v3_softc *sc; sc = device_get_softc(dev); bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); } -#endif /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; -#ifdef INTRNG u_int irq; const char *name; -#endif sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; -#ifdef INTRNG sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs, M_GIC_V3, M_WAITOK | M_ZERO); name = device_get_nameunit(dev); for (irq = 0; irq < sc->gic_nirqs; irq++) { struct intr_irqsrc *isrc; sc->gic_irqs[irq].gi_irq = irq; sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM; sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &sc->gic_irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { err = intr_isrc_register(isrc, sc->dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (err != 0) { /* XXX call intr_isrc_deregister() */ free(sc->gic_irqs, M_DEVBUF); return (err); } } -#endif /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } - /* - * Full success. - * Now register PIC to the interrupts handling layer. - */ -#ifndef INTRNG - arm_register_root_pic(dev, sc->gic_nirqs); - sc->gic_registered = TRUE; -#endif return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; size_t i; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); for (i = 0; i < mp_ncpus; i++) free(sc->gic_redists.pcpu[i], M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } static int gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gic_v3_softc *sc; sc = device_get_softc(dev); switch (which) { case GICV3_IVAR_NIRQS: *result = sc->gic_nirqs; return (0); case GICV3_IVAR_REDIST_VADDR: *result = (uintptr_t)rman_get_virtual( sc->gic_redists.pcpu[PCPU_GET(cpuid)]); return (0); } return (ENOENT); } -#ifdef INTRNG int arm_gic_v3_intr(void *arg) { struct gic_v3_softc *sc = arg; struct gic_v3_irqsrc *gi; struct intr_pic *pic; uint64_t active_irq; struct trapframe *tf; bool first; first = true; pic = sc->gic_pic; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (active_irq >= GIC_FIRST_LPI) { intr_child_irq_handler(pic, active_irq); continue; } if (__predict_false(active_irq >= sc->gic_nirqs)) return (FILTER_HANDLED); tf = curthread->td_intr_frame; gi = &sc->gic_irqs[active_irq]; if (active_irq <= GIC_LAST_SGI) { /* Call EOI for all IPI before dispatch. */ gic_icc_write(EOIR1, (uint64_t)active_irq); #ifdef SMP intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf); #else device_printf(sc->dev, "SGI %u on UP system detected\n", active_irq - GIC_FIRST_SGI); #endif } else if (active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) { if (gi->gi_pol == INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { if (gi->gi_pol != INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); gic_v3_disable_intr(sc->dev, &gi->gi_isrc); device_printf(sc->dev, "Stray irq %lu disabled\n", active_irq); } } } } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; if (ncells < 3) return (EINVAL); /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = edge triggered * 2 = edge triggered (PPI only) * 4 = level-sensitive * 8 = level-sensitive (PPI only) */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } switch (cells[2] & 0xf) { case 1: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_HIGH; break; case 2: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_LOW; break; case 4: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_HIGH; break; case 8: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_LOW; break; default: device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", cells[2]); return (EINVAL); } /* Check the interrupt is valid */ if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH) return (EINVAL); *irqp = irq; return (0); } #endif static int do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_softc *sc; enum intr_polarity pol; enum intr_trigger trig; #ifdef FDT struct intr_map_data_fdt *daf; #endif u_int irq; sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); break; #endif default: return (EINVAL); } if (irq >= sc->gic_nirqs) return (EINVAL); switch (pol) { case INTR_POLARITY_CONFORM: case INTR_POLARITY_LOW: case INTR_POLARITY_HIGH: break; default: return (EINVAL); } switch (trig) { case INTR_TRIGGER_CONFORM: case INTR_TRIGGER_EDGE: case INTR_TRIGGER_LEVEL: break; default: return (EINVAL); } *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int gic_v3_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct gic_v3_softc *sc; int error; u_int irq; error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } static int gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_softc *sc = device_get_softc(dev); struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; enum intr_trigger trig; enum intr_polarity pol; uint32_t reg; u_int irq; int error; if (data == NULL) return (ENOTSUP); error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig); if (error != 0) return (error); if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM || trig == INTR_TRIGGER_CONFORM) return (EINVAL); /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if (pol != gi->gi_pol || trig != gi->gi_trig) return (EINVAL); else return (0); } gi->gi_pol = pol; gi->gi_trig = trig; /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) { mtx_lock_spin(&sc->gic_mtx); /* Set the trigger and polarity */ if (irq <= GIC_LAST_PPI) reg = gic_r_read(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq)); else reg = gic_d_read(sc, 4, GICD_ICFGR(irq)); if (trig == INTR_TRIGGER_LEVEL) reg &= ~(2 << ((irq % 16) * 2)); else reg |= 2 << ((irq % 16) * 2); if (irq <= GIC_LAST_PPI) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, REDIST); } else { gic_d_write(sc, 4, GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, DIST); } mtx_unlock_spin(&sc->gic_mtx); gic_v3_bind_intr(dev, isrc); } return (0); } static int gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (isrc->isrc_handlers == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; gic_v3_disable_intr(dev, isrc); gic_icc_write(EOIR1, gi->gi_irq); } static void gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc) { gic_v3_enable_intr(dev, isrc); } static void gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_pol == INTR_TRIGGER_EDGE) return; gic_icc_write(EOIR1, gi->gi_irq); } static int gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; int cpu; gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_irq <= GIC_LAST_PPI) return (EINVAL); KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI, ("%s: Attempting to bind an invalid IRQ", __func__)); sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(gic_irq_cpu)); } else { /* * We can only bind to a single CPU so select * the first CPU found. */ cpu = CPU_FFS(&isrc->isrc_cpu) - 1; gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu)); } return (0); } #ifdef SMP static void gic_v3_init_secondary(device_t dev) { device_t child; struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; struct intr_irqsrc *isrc; u_int cpu, irq; int err, i; sc = device_get_softc(dev); cpu = PCPU_GET(cpuid); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", cpu); return; } } /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); } /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); } for (i = 0; i < sc->gic_nchildren; i++) { child = sc->gic_children[i]; PIC_INIT_SECONDARY(child); } } static void gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; uint64_t aff, val, irq; int i; #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK) #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK) aff = GIC_AFFINITY(0); irq = gi->gi_irq; val = 0; /* Iterate through all CPUs in set */ for (i = 0; i < mp_ncpus; i++) { /* Move to the next affinity group */ if (aff != GIC_AFFINITY(i)) { /* Send the IPI */ if (val != 0) { gic_icc_write(SGI1R, val); val = 0; } aff = GIC_AFFINITY(i); } /* Send the IPI to this cpu */ if (CPU_ISSET(i, &cpus)) { #define ICC_SGI1R_AFFINITY(aff) \ (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \ ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \ ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT)) /* Set the affinity when the first at this level */ if (val == 0) val = ICC_SGI1R_AFFINITY(aff) | irq << ICC_SGI1R_EL1_SGIID_SHIFT; /* Set the bit to send the IPI to te CPU */ val |= 1 << CPU_AFF0(CPU_AFFINITY(i)); } } /* Send the IPI to the last cpu affinity group */ if (val != 0) gic_icc_write(SGI1R, val); #undef GIC_AFF_MASK #undef GIC_AFFINITY } static int gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct gic_v3_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif /* SMP */ -#else /* INTRNG */ -/* - * PIC interface. - */ - -static int -gic_v3_bind(device_t dev, u_int irq, u_int cpuid) -{ - uint64_t aff; - struct gic_v3_softc *sc; - - sc = device_get_softc(dev); - - if (irq <= GIC_LAST_PPI) { - /* Can't bind PPI to another CPU but it's not an error */ - return (0); - } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { - aff = CPU_AFFINITY(cpuid); - gic_d_write(sc, 4, GICD_IROUTER(irq), aff); - return (0); - } else if (irq >= GIC_FIRST_LPI) - return (lpi_migrate(dev, irq, cpuid)); - - return (EINVAL); -} - -static void -gic_v3_dispatch(device_t dev, struct trapframe *frame) -{ - uint64_t active_irq; - - while (1) { - if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { - /* - * Hardware: Cavium ThunderX - * Chip revision: Pass 1.0 (early version) - * Pass 1.1 (production) - * ERRATUM: 22978, 23154 - */ - __asm __volatile( - "nop;nop;nop;nop;nop;nop;nop;nop; \n" - "mrs %0, ICC_IAR1_EL1 \n" - "nop;nop;nop;nop; \n" - "dsb sy \n" - : "=&r" (active_irq)); - } else { - active_irq = gic_icc_read(IAR1); - } - - if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR)) - break; - - if (__predict_true((active_irq >= GIC_FIRST_PPI && - active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) { - arm_dispatch_intr(active_irq, frame); - continue; - } - - if (active_irq <= GIC_LAST_SGI) { - gic_icc_write(EOIR1, (uint64_t)active_irq); - arm_dispatch_intr(active_irq, frame); - continue; - } - } -} - -static void -gic_v3_eoi(device_t dev, u_int irq) -{ - - gic_icc_write(EOIR1, (uint64_t)irq); -} - -static void -gic_v3_mask_irq(device_t dev, u_int irq) -{ - struct gic_v3_softc *sc; - - sc = device_get_softc(dev); - - if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ - gic_r_write(sc, 4, - GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); - gic_v3_wait_for_rwp(sc, REDIST); - } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ - gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); - gic_v3_wait_for_rwp(sc, DIST); - } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ - lpi_mask_irq(dev, irq); - } else - panic("%s: Unsupported IRQ number %u", __func__, irq); -} - -static void -gic_v3_unmask_irq(device_t dev, u_int irq) -{ - struct gic_v3_softc *sc; - - sc = device_get_softc(dev); - - if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ - gic_r_write(sc, 4, - GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); - gic_v3_wait_for_rwp(sc, REDIST); - } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ - gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); - gic_v3_wait_for_rwp(sc, DIST); - } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ - lpi_unmask_irq(dev, irq); - } else - panic("%s: Unsupported IRQ number %u", __func__, irq); -} - -#ifdef SMP -static void -gic_v3_init_secondary(device_t dev) -{ - struct gic_v3_softc *sc; - gic_v3_initseq_t *init_func; - int err; - - sc = device_get_softc(dev); - - /* Train init sequence for boot CPU */ - for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { - err = (*init_func)(sc); - if (err != 0) { - device_printf(dev, - "Could not initialize GIC for CPU%u\n", - PCPU_GET(cpuid)); - return; - } - } - - /* - * Try to initialize ITS. - * If there is no driver attached this routine will fail but that - * does not mean failure here as only LPIs will not be functional - * on the current CPU. - */ - if (its_init_cpu(NULL) != 0) { - device_printf(dev, - "Could not initialize ITS for CPU%u. " - "No LPIs will arrive on this CPU\n", - PCPU_GET(cpuid)); - } - - /* - * ARM64TODO: Unmask timer PPIs. To be removed when appropriate - * mechanism is implemented. - * Activate the timer interrupts: virtual (27), secure (29), - * and non-secure (30). Use hardcoded values here as there - * should be no defines for them. - */ - gic_v3_unmask_irq(dev, 27); - gic_v3_unmask_irq(dev, 29); - gic_v3_unmask_irq(dev, 30); -} - -static void -gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi) -{ - u_int cpu; - uint64_t aff, tlist; - uint64_t val; - uint64_t aff_mask; - - /* Set affinity mask to match level 3, 2 and 1 */ - aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK; - - /* Iterate through all CPUs in set */ - while (!CPU_EMPTY(&cpuset)) { - aff = tlist = 0; - for (cpu = 0; cpu < mp_ncpus; cpu++) { - /* Compose target list for single AFF3:AFF2:AFF1 set */ - if (CPU_ISSET(cpu, &cpuset)) { - if (!tlist) { - /* - * Save affinity of the first CPU to - * send IPI to for later comparison. - */ - aff = CPU_AFFINITY(cpu); - tlist |= (1UL << CPU_AFF0(aff)); - CPU_CLR(cpu, &cpuset); - } - /* Check for same Affinity level 3, 2 and 1 */ - if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) { - tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu))); - /* Clear CPU in cpuset from target list */ - CPU_CLR(cpu, &cpuset); - } - } - } - if (tlist) { - KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0, - ("Target list too long for GICv3 IPI")); - /* Send SGI to CPUs in target list */ - val = tlist; - val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT; - val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT; - val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT; - val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) << - ICC_SGI1R_EL1_SGIID_SHIFT; - gic_icc_write(SGI1R, val); - } - } -} -#endif -#endif /* !INTRNG */ /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; break; case REDIST: res = sc->gic_redists.pcpu[cpuid]; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(0); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 4, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int gic_v3_redist_alloc(struct gic_v3_softc *sc) { u_int cpuid; /* Allocate struct resource for all CPU's Re-Distributor registers */ for (cpuid = 0; cpuid < mp_ncpus; cpuid++) if (CPU_ISSET(cpuid, &all_cpus) != 0) sc->gic_redists.pcpu[cpuid] = malloc(sizeof(*sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); else sc->gic_redists.pcpu[cpuid] = NULL; return (0); } static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource r_res; bus_space_handle_t r_bsh; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = *sc->gic_redists.regions[i]; r_bsh = rman_get_bushandle(&r_res); pidr2 = bus_read_4(&r_res, GICR_PIDR2); switch (pidr2 & GICR_PIDR2_ARCH_MASK) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENODEV); } do { typer = bus_read_8(&r_res, GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ *sc->gic_redists.pcpu[cpuid] = r_res; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { r_bsh += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } rman_set_bushandle(&r_res, r_bsh); } while ((typer & GICR_TYPER_LAST) == 0); } device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } Index: stable/11/sys/arm64/arm64/gic_v3_fdt.c =================================================================== --- stable/11/sys/arm64/arm64/gic_v3_fdt.c (revision 305135) +++ stable/11/sys/arm64/arm64/gic_v3_fdt.c (revision 305136) @@ -1,338 +1,298 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "gic_v3_reg.h" #include "gic_v3_var.h" /* * FDT glue. */ static int gic_v3_fdt_probe(device_t); static int gic_v3_fdt_attach(device_t); static struct resource *gic_v3_ofw_bus_alloc_res(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static const struct ofw_bus_devinfo *gic_v3_ofw_get_devinfo(device_t, device_t); static device_method_t gic_v3_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gic_v3_fdt_probe), DEVMETHOD(device_attach, gic_v3_fdt_attach), /* Bus interface */ DEVMETHOD(bus_alloc_resource, gic_v3_ofw_bus_alloc_res), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, gic_v3_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(gic, gic_v3_fdt_driver, gic_v3_fdt_methods, sizeof(struct gic_v3_softc), gic_v3_driver); static devclass_t gic_v3_fdt_devclass; EARLY_DRIVER_MODULE(gic_v3, simplebus, gic_v3_fdt_driver, gic_v3_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(gic_v3, ofwbus, gic_v3_fdt_driver, gic_v3_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); /* * Helper functions declarations. */ static int gic_v3_ofw_bus_attach(device_t); /* * Device interface. */ static int gic_v3_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "arm,gic-v3")) return (ENXIO); device_set_desc(dev, GIC_V3_DEVSTR); return (BUS_PROBE_DEFAULT); } static int gic_v3_fdt_attach(device_t dev) { struct gic_v3_softc *sc; pcell_t redist_regions; -#ifdef INTRNG intptr_t xref; -#endif int err; sc = device_get_softc(dev); sc->dev = dev; /* * Recover number of the Re-Distributor regions. */ if (OF_getencprop(ofw_bus_get_node(dev), "#redistributor-regions", &redist_regions, sizeof(redist_regions)) <= 0) sc->gic_redists.nregions = 1; else sc->gic_redists.nregions = redist_regions; err = gic_v3_attach(dev); if (err != 0) goto error; -#ifdef INTRNG xref = OF_xref_from_node(ofw_bus_get_node(dev)); sc->gic_pic = intr_pic_register(dev, xref); if (sc->gic_pic == NULL) { device_printf(dev, "could not register PIC\n"); err = ENXIO; goto error; } if (intr_pic_claim_root(dev, xref, arm_gic_v3_intr, sc, GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) { err = ENXIO; goto error; } -#endif /* * Try to register ITS to this GIC. * GIC will act as a bus in that case. * Failure here will not affect main GIC functionality. */ if (gic_v3_ofw_bus_attach(dev) != 0) { if (bootverbose) { device_printf(dev, "Failed to attach ITS to this GIC\n"); } } -#ifdef INTRNG if (device_get_children(dev, &sc->gic_children, &sc->gic_nchildren) != 0) sc->gic_nchildren = 0; -#endif return (err); error: if (bootverbose) { device_printf(dev, "Failed to attach. Error %d\n", err); } /* Failure so free resources */ gic_v3_detach(dev); return (err); } /* OFW bus interface */ struct gic_v3_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; static const struct ofw_bus_devinfo * gic_v3_ofw_get_devinfo(device_t bus __unused, device_t child) { struct gic_v3_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } static struct resource * gic_v3_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gic_v3_ofw_devinfo *di; struct resource_list_entry *rle; int ranges_len; if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type != SYS_RES_MEMORY) return (NULL); /* Find defaults for this rid */ rle = resource_list_find(&di->di_rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } /* * XXX: No ranges remap! * Absolute address is expected. */ if (ofw_bus_has_prop(bus, "ranges")) { ranges_len = OF_getproplen(ofw_bus_get_node(bus), "ranges"); if (ranges_len != 0) { if (bootverbose) { device_printf(child, "Ranges remap not supported\n"); } return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } /* Helper functions */ /* * Bus capability support for GICv3. * Collects and configures device informations and finally * adds ITS device as a child of GICv3 in Newbus hierarchy. */ static int gic_v3_ofw_bus_attach(device_t dev) { struct gic_v3_ofw_devinfo *di; device_t child; phandle_t parent, node; pcell_t addr_cells, size_cells; parent = ofw_bus_get_node(dev); if (parent > 0) { addr_cells = 2; OF_getencprop(parent, "#address-cells", &addr_cells, sizeof(addr_cells)); size_cells = 2; OF_getencprop(parent, "#size-cells", &size_cells, sizeof(size_cells)); /* Iterate through all GIC subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_GIC_V3, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node)) { if (bootverbose) { device_printf(dev, "Could not set up devinfo for ITS\n"); } free(di, M_GIC_V3); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, &di->di_rl); /* Should not have any interrupts, so don't add any */ /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (!child) { if (bootverbose) { device_printf(dev, "Could not add child: %s\n", di->di_dinfo.obd_name); } resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_GIC_V3); continue; } device_set_ivars(child, di); } } return (bus_generic_attach(dev)); } - -#ifndef INTRNG -static int gic_v3_its_fdt_probe(device_t dev); - -static device_method_t gic_v3_its_fdt_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, gic_v3_its_fdt_probe), - - /* End */ - DEVMETHOD_END -}; - -DEFINE_CLASS_1(its, gic_v3_its_fdt_driver, gic_v3_its_fdt_methods, - sizeof(struct gic_v3_its_softc), gic_v3_its_driver); - -static devclass_t gic_v3_its_fdt_devclass; - -EARLY_DRIVER_MODULE(its, gic, gic_v3_its_fdt_driver, - gic_v3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); - -static int -gic_v3_its_fdt_probe(device_t dev) -{ - - if (!ofw_bus_status_okay(dev)) - return (ENXIO); - - if (!ofw_bus_is_compatible(dev, GIC_V3_ITS_COMPSTR)) - return (ENXIO); - - device_set_desc(dev, GIC_V3_ITS_DEVSTR); - return (BUS_PROBE_DEFAULT); -} -#endif Index: stable/11/sys/arm64/arm64/gic_v3_var.h =================================================================== --- stable/11/sys/arm64/arm64/gic_v3_var.h (revision 305135) +++ stable/11/sys/arm64/arm64/gic_v3_var.h (revision 305136) @@ -1,378 +1,247 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GIC_V3_VAR_H_ #define _GIC_V3_VAR_H_ #define GIC_V3_DEVSTR "ARM Generic Interrupt Controller v3.0" DECLARE_CLASS(gic_v3_driver); -#ifndef INTRNG -#define LPI_FLAGS_CONF_FLUSH (1UL << 0) -#define LPI_CONFTAB_SIZE PAGE_SIZE_64K -/* 1 bit per LPI + 1 KB more for the obligatory PPI, SGI, SPI stuff */ -#define LPI_PENDTAB_SIZE ((LPI_CONFTAB_SIZE / 8) + 0x400) -#endif - -#ifdef INTRNG struct gic_v3_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; }; -#endif struct redist_lpis { vm_offset_t conf_base; vm_offset_t pend_base[MAXCPU]; uint64_t flags; }; struct gic_redists { /* * Re-Distributor region description. * We will have few of those depending * on the #redistributor-regions property in FDT. */ struct resource ** regions; /* Number of Re-Distributor regions */ u_int nregions; /* Per-CPU Re-Distributor handler */ struct resource * pcpu[MAXCPU]; /* LPIs data */ struct redist_lpis lpis; }; struct gic_v3_softc { device_t dev; struct resource ** gic_res; struct mtx gic_mtx; /* Distributor */ struct resource * gic_dist; /* Re-Distributors */ struct gic_redists gic_redists; u_int gic_nirqs; u_int gic_idbits; boolean_t gic_registered; -#ifdef INTRNG int gic_nchildren; device_t *gic_children; struct intr_pic *gic_pic; struct gic_v3_irqsrc *gic_irqs; -#endif }; -#ifdef INTRNG #define GIC_INTR_ISRC(sc, irq) (&sc->gic_irqs[irq].gi_isrc) -#endif MALLOC_DECLARE(M_GIC_V3); /* ivars */ enum { GICV3_IVAR_NIRQS, GICV3_IVAR_REDIST_VADDR, }; __BUS_ACCESSOR(gicv3, nirqs, GICV3, NIRQS, u_int); __BUS_ACCESSOR(gicv3, redist_vaddr, GICV3, REDIST_VADDR, void *); /* Device methods */ int gic_v3_attach(device_t dev); int gic_v3_detach(device_t dev); int arm_gic_v3_intr(void *); -#ifdef INTRNG uint32_t gic_r_read_4(device_t, bus_size_t); uint64_t gic_r_read_8(device_t, bus_size_t); void gic_r_write_4(device_t, bus_size_t, uint32_t var); void gic_r_write_8(device_t, bus_size_t, uint64_t var); -#endif /* * ITS */ /* LPI chunk owned by ITS device */ struct lpi_chunk { u_int lpi_base; u_int lpi_free; /* First free LPI in set */ -#ifndef INTRNG - u_int *lpi_col_ids; -#endif u_int lpi_num; /* Total number of LPIs in chunk */ u_int lpi_busy; /* Number of busy LPIs in chink */ }; /* ITS device */ struct its_dev { TAILQ_ENTRY(its_dev) entry; /* PCI device */ device_t pci_dev; /* Device ID (i.e. PCI device ID) */ uint32_t devid; /* List of assigned LPIs */ struct lpi_chunk lpis; /* Virtual address of ITT */ vm_offset_t itt; size_t itt_size; }; -#ifndef INTRNG -TAILQ_HEAD(its_dev_list, its_dev); -/* ITS private table description */ -struct its_ptab { - vm_offset_t ptab_vaddr; /* Virtual Address of table */ - size_t ptab_pgsz; /* Page size */ - size_t ptab_npages; /* Number of pages */ -}; - -/* ITS collection description. */ -struct its_col { - uint64_t col_target; /* Target Re-Distributor */ - uint64_t col_id; /* Collection ID */ -}; - -/* ITS command. Each command is 32 bytes long */ -struct its_cmd { - uint64_t cmd_dword[4]; /* ITS command double word */ -}; - -#define GIC_V3_ITS_DEVSTR "ARM GIC Interrupt Translation Service" -#define GIC_V3_ITS_COMPSTR "arm,gic-v3-its" - -DECLARE_CLASS(gic_v3_its_driver); - -/* ITS commands encoding */ -#define ITS_CMD_MOVI (0x01) -#define ITS_CMD_SYNC (0x05) -#define ITS_CMD_MAPD (0x08) -#define ITS_CMD_MAPC (0x09) -#define ITS_CMD_MAPVI (0x0a) -#define ITS_CMD_MAPI (0x0b) -#define ITS_CMD_INV (0x0c) -#define ITS_CMD_INVALL (0x0d) -/* Command */ -#define CMD_COMMAND_MASK (0xFFUL) -/* PCI device ID */ -#define CMD_DEVID_SHIFT (32) -#define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) -/* Size of IRQ ID bitfield */ -#define CMD_SIZE_MASK (0xFFUL) -/* Virtual LPI ID */ -#define CMD_ID_MASK (0xFFFFFFFFUL) -/* Physical LPI ID */ -#define CMD_PID_SHIFT (32) -#define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) -/* Collection */ -#define CMD_COL_MASK (0xFFFFUL) -/* Target (CPU or Re-Distributor) */ -#define CMD_TARGET_SHIFT (16) -#define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) -/* Interrupt Translation Table address */ -#define CMD_ITT_MASK (0xFFFFFFFFFF00UL) -/* Valid command bit */ -#define CMD_VALID_SHIFT (63) -#define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) -#endif /* INTRNG */ - /* * ITS command descriptor. * Idea for command description passing taken from Linux. */ struct its_cmd_desc { uint8_t cmd_type; union { struct { struct its_dev *its_dev; struct its_col *col; uint32_t id; } cmd_desc_movi; struct { struct its_col *col; } cmd_desc_sync; struct { struct its_col *col; uint8_t valid; } cmd_desc_mapc; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; uint32_t id; } cmd_desc_mapvi; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_mapi; struct { struct its_dev *its_dev; uint8_t valid; } cmd_desc_mapd; struct { struct its_dev *its_dev; struct its_col *col; uint32_t pid; } cmd_desc_inv; struct { struct its_col *col; } cmd_desc_invall; }; }; #define ITS_TARGET_NONE 0xFBADBEEF -#ifndef INTRNG -#define ITS_CMDQ_SIZE PAGE_SIZE_64K -#define ITS_CMDQ_NENTRIES (ITS_CMDQ_SIZE / sizeof(struct its_cmd)) - -#define ITS_FLAGS_CMDQ_FLUSH (1UL << 0) - -struct gic_v3_its_softc { - device_t dev; - struct resource * its_res; - - struct its_cmd * its_cmdq_base; /* ITS command queue base */ - struct its_cmd * its_cmdq_write; /* ITS command queue write ptr */ - struct its_ptab its_ptabs[GITS_BASER_NUM];/* ITS private tables */ - struct its_col * its_cols[MAXCPU];/* Per-CPU collections */ - - uint64_t its_flags; - -#ifndef INTRNG - struct its_dev_list its_dev_list; -#endif - - bitstr_t * its_lpi_bitmap; - uint32_t its_lpi_maxid; - - struct mtx its_dev_lock; - struct mtx its_cmd_lock; - - uint32_t its_socket; /* Socket number ITS is attached to */ -}; - -/* Stuff that is specific to the vendor's implementation */ -typedef uint32_t (*its_devbits_func_t)(device_t); - -struct its_quirks { - uint64_t cpuid; - uint64_t cpuid_mask; - its_devbits_func_t devbits_func; -}; - -extern devclass_t gic_v3_its_devclass; - -int gic_v3_its_detach(device_t); - -int gic_v3_its_alloc_msix(device_t, device_t, int *); -int gic_v3_its_release_msix(device_t, device_t, int); -int gic_v3_its_alloc_msi(device_t, device_t, int, int *); -int gic_v3_its_release_msi(device_t, device_t, int, int *); -int gic_v3_its_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); - -int its_init_cpu(struct gic_v3_its_softc *); - -int lpi_migrate(device_t, uint32_t, u_int); -void lpi_unmask_irq(device_t, uint32_t); -void lpi_mask_irq(device_t, uint32_t); -#endif /* * GIC Distributor accessors. * Notice that only GIC sofc can be passed. */ #define gic_d_read(sc, len, reg) \ ({ \ bus_read_##len(sc->gic_dist, reg); \ }) #define gic_d_write(sc, len, reg, val) \ ({ \ bus_write_##len(sc->gic_dist, reg, val);\ }) /* GIC Re-Distributor accessors (per-CPU) */ #define gic_r_read(sc, len, reg) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_read_##len( \ sc->gic_redists.pcpu[cpu], \ reg); \ }) #define gic_r_write(sc, len, reg, val) \ ({ \ u_int cpu = PCPU_GET(cpuid); \ \ bus_write_##len( \ sc->gic_redists.pcpu[cpu], \ reg, val); \ }) #define PCI_DEVID_GENERIC(pci_dev) \ ({ \ ((pci_get_domain(pci_dev) << PCI_RID_DOMAIN_SHIFT) | \ (pci_get_bus(pci_dev) << PCI_RID_BUS_SHIFT) | \ (pci_get_slot(pci_dev) << PCI_RID_SLOT_SHIFT) | \ (pci_get_function(pci_dev) << PCI_RID_FUNC_SHIFT)); \ }) /* * Request number of maximum MSI-X vectors for this device. * Device can ask for less vectors than maximum supported but not more. */ #define PCI_MSIX_NUM(pci_dev) \ ({ \ struct pci_devinfo *dinfo; \ pcicfgregs *cfg; \ \ dinfo = device_get_ivars(pci_dev); \ cfg = &dinfo->cfg; \ \ cfg->msix.msix_msgnum; \ }) #endif /* _GIC_V3_VAR_H_ */ Index: stable/11/sys/arm64/arm64/mp_machdep.c =================================================================== --- stable/11/sys/arm64/arm64/mp_machdep.c (revision 305135) +++ stable/11/sys/arm64/arm64/mp_machdep.c (revision 305136) @@ -1,771 +1,710 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef FDT #include #include #endif #include -#ifdef INTRNG #include "pic_if.h" typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t * ii_handler; void * ii_handler_arg; intr_ipi_send_t * ii_send; void * ii_send_arg; char ii_name[INTR_IPI_NAMELEN]; u_long * ii_count; }; static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; static struct intr_ipi *intr_ipi_lookup(u_int); static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); -#endif /* INTRNG */ boolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *); extern struct pcpu __pcpu[]; static enum { CPUS_UNKNOWN, #ifdef FDT CPUS_FDT, #endif } cpu_enum_method; static device_identify_t arm64_cpu_identify; static device_probe_t arm64_cpu_probe; static device_attach_t arm64_cpu_attach; static void ipi_ast(void *); static void ipi_hardclock(void *); static void ipi_preempt(void *); static void ipi_rendezvous(void *); static void ipi_stop(void *); static int ipi_handler(void *arg); struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; #ifdef INVARIANTS static uint32_t cpu_reg[MAXCPU][2]; #endif static device_t cpu_list[MAXCPU]; /* * Not all systems boot from the first CPU in the device tree. To work around * this we need to find which CPU we have booted from so when we later * enable the secondary CPUs we skip this one. */ static int cpu0 = -1; void mpentry(unsigned long cpuid); void init_secondary(uint64_t); uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16); /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static device_method_t arm64_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, arm64_cpu_identify), DEVMETHOD(device_probe, arm64_cpu_probe), DEVMETHOD(device_attach, arm64_cpu_attach), DEVMETHOD_END }; static devclass_t arm64_cpu_devclass; static driver_t arm64_cpu_driver = { "arm64_cpu", arm64_cpu_methods, 0 }; DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0); static void arm64_cpu_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "arm64_cpu", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL) device_printf(parent, "add child failed\n"); } static int arm64_cpu_probe(device_t dev) { u_int cpuid; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); device_quiet(dev); return (0); } static int arm64_cpu_attach(device_t dev) { const uint32_t *reg; size_t reg_size; u_int cpuid; int i; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); reg = cpu_get_cpuid(dev, ®_size); if (reg == NULL) return (EINVAL); if (bootverbose) { device_printf(dev, "register <"); for (i = 0; i < reg_size; i++) printf("%s%x", (i == 0) ? "" : " ", reg[i]); printf(">\n"); } /* Set the device to start it later */ cpu_list[cpuid] = dev; return (0); } static void release_aps(void *dummy __unused) { int cpu, i; -#ifdef INTRNG intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); -#else - /* Setup the IPI handler */ - for (i = 0; i < INTR_IPI_COUNT; i++) - arm_setup_ipihandler(ipi_handler, i); -#endif atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ __asm __volatile("sev"); printf("Release APs\n"); for (i = 0; i < 2000; i++) { if (smp_started) { for (cpu = 0; cpu <= mp_maxid; cpu++) { if (CPU_ABSENT(cpu)) continue; print_cpu_features(cpu); } return; } DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t cpu) { struct pcpu *pcpup; -#ifndef INTRNG - int i; -#endif pcpup = &__pcpu[cpu]; /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); /* Spin until the BSP releases the APs */ while (!aps_ready) __asm __volatile("wfe"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb; /* * Identify current CPU. This is necessary to setup * affinity registers and to provide support for * runtime chip identification. */ identify_cpu(); -#ifdef INTRNG intr_pic_init_secondary(); -#else - /* Configure the interrupt controller */ - arm_init_secondary(); - for (i = 0; i < INTR_IPI_COUNT; i++) - arm_unmask_ipi(i); -#endif - /* Start per-CPU event timers. */ cpu_initclocks_ap(); #ifdef VFP vfp_init(); #endif dbg_monitor_init(); /* Enable interrupts */ intr_enable(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } -#ifdef INTRNG /* * Send IPI thru interrupt controller. */ static void pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) { KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ static void intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { struct intr_irqsrc *isrc; struct intr_ipi *ii; int error; KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); if (error != 0) return; isrc->isrc_handlers++; ii = intr_ipi_lookup(ipi); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = arg; ii->ii_send = pic_ipi_send; ii->ii_send_arg = isrc; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); } static void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); ii->ii_send(ii->ii_send_arg, cpus, ipi); } -#endif static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_hardclock(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } static void ipi_preempt(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_stop(void *dummy __unused) { u_int cpu; CTR0(KTR_SMP, "IPI_STOP"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); } -#ifndef INTRNG -static int -ipi_handler(void *arg) -{ - u_int cpu, ipi; - - arg = (void *)((uintptr_t)arg & ~(1 << 16)); - KASSERT((uintptr_t)arg < INTR_IPI_COUNT, - ("Invalid IPI %ju", (uintptr_t)arg)); - - cpu = PCPU_GET(cpuid); - ipi = (uintptr_t)arg; - - switch(ipi) { - case IPI_AST: - ipi_ast(NULL); - break; - case IPI_PREEMPT: - ipi_preempt(NULL); - break; - case IPI_RENDEZVOUS: - ipi_rendezvous(NULL); - break; - case IPI_STOP: - case IPI_STOP_HARD: - ipi_stop(NULL); - break; - case IPI_HARDCLOCK: - ipi_hardclock(NULL); - break; - default: - panic("Unknown IPI %#0x on cpu %d", ipi, curcpu); - } - - return (FILTER_HANDLED); -} -#endif - struct cpu_group * cpu_topo(void) { return (smp_topo_none()); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */ return (1); } #ifdef FDT static boolean_t cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t target_cpu; struct pcpu *pcpup; vm_paddr_t pa; u_int cpuid; int err; /* Check we are able to start this cpu */ if (id > mp_maxid) return (0); KASSERT(id < MAXCPU, ("Too mant CPUs")); KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size")); #ifdef INVARIANTS cpu_reg[id][0] = reg[0]; if (addr_size == 2) cpu_reg[id][1] = reg[1]; #endif /* We are already running on cpu 0 */ if (id == cpu0) return (1); cpuid = id; if (cpuid < cpu0) cpuid++; pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); target_cpu = reg[0]; if (addr_size == 2) { target_cpu <<= 32; target_cpu |= reg[1]; } printf("Starting CPU %u (%lx)\n", cpuid, target_cpu); pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry); err = psci_cpu_on(target_cpu, pa, cpuid); if (err != PSCI_RETVAL_SUCCESS) { /* Panic here if INVARIANTS are enabled */ KASSERT(0, ("Failed to start CPU %u (%lx)\n", id, target_cpu)); pcpu_destroy(pcpup); kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE); dpcpu[cpuid - 1] = NULL; /* Notify the user that the CPU failed to start */ printf("Failed to start CPU %u (%lx)\n", id, target_cpu); } else CPU_SET(cpuid, &all_cpus); return (1); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_SET(0, &all_cpus); switch(cpu_enum_method) { #ifdef FDT case CPUS_FDT: KASSERT(cpu0 >= 0, ("Current CPU was not found")); ofw_cpu_early_foreach(cpu_init_fdt, true); break; #endif case CPUS_UNKNOWN: break; } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } static boolean_t cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t mpidr_fdt, mpidr_reg; if (cpu0 < 0) { mpidr_fdt = reg[0]; if (addr_size == 2) { mpidr_fdt <<= 32; mpidr_fdt |= reg[1]; } mpidr_reg = READ_SPECIALREG(mpidr_el1); if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt) cpu0 = id; } return (TRUE); } void cpu_mp_setmaxid(void) { #ifdef FDT int cores; cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; cpu_enum_method = CPUS_FDT; return; } #endif if (bootverbose) printf("No CPU data, limiting to 1 core\n"); mp_ncpus = 1; mp_maxid = 0; } -#ifdef INTRNG /* * Lookup IPI source. */ static struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } /* * interrupt controller dispatch function for IPIs. It should * be called straight from the interrupt controller, when associated * interrupt source is learned. Or from anybody who has an interrupt * source mapped. */ void intr_ipi_dispatch(u_int ipi, struct trapframe *tf) { void *arg; struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); /* * Supply ipi filter with trapframe argument * if none is registered. */ arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf; ii->ii_handler(arg); } #ifdef notyet /* * Map IPI into interrupt controller. * * Not SMP coherent. */ static int ipi_map(struct intr_irqsrc *isrc, u_int ipi) { boolean_t is_percpu; int error; if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); isrc->isrc_type = INTR_ISRCT_NAMESPACE; isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; isrc->isrc_nspc_num = ipi_next_num; error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu); if (error == 0) { isrc->isrc_dev = intr_irq_root_dev; ipi_next_num++; } return (error); } /* * Setup IPI handler to interrupt source. * * Note that there could be more ways how to send and receive IPIs * on a platform like fast interrupts for example. In that case, * one can call this function with ASIF_NOALLOC flag set and then * call intr_ipi_dispatch() when appropriate. * * Not SMP coherent. */ int intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, void *arg, u_int flags) { struct intr_irqsrc *isrc; int error; if (filter == NULL) return(EINVAL); isrc = intr_ipi_lookup(ipi); if (isrc->isrc_ipifilter != NULL) return (EEXIST); if ((flags & AISHF_NOALLOC) == 0) { error = ipi_map(isrc, ipi); if (error != 0) return (error); } isrc->isrc_ipifilter = filter; isrc->isrc_arg = arg; isrc->isrc_handlers = 1; isrc->isrc_count = intr_ipi_setup_counters(name); isrc->isrc_index = 0; /* it should not be used in IPI case */ if (isrc->isrc_dev != NULL) { PIC_ENABLE_INTR(isrc->isrc_dev, isrc); PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); } return (0); } #endif /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t cpus; cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } -#endif /* INTRNG */ Index: stable/11/sys/arm64/arm64/nexus.c =================================================================== --- stable/11/sys/arm64/arm64/nexus.c (revision 305135) +++ stable/11/sys/arm64/arm64/nexus.c (revision 305136) @@ -1,503 +1,472 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #include "opt_acpi.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include "ofw_bus_if.h" #endif #ifdef DEV_ACPI #include #include #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static int nexus_attach(device_t); #ifdef FDT static device_probe_t nexus_fdt_probe; static device_attach_t nexus_fdt_attach; #endif #ifdef DEV_ACPI static device_probe_t nexus_acpi_probe; static device_attach_t nexus_acpi_attach; #endif static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t, u_int, const char *, int); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); static struct resource_list *nexus_get_reslist(device_t, device_t); static int nexus_set_resource(device_t, device_t, int, int, rman_res_t, rman_res_t); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif static device_method_t nexus_methods[] = { /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif { 0, 0 } }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int needactivate = flags & RF_ACTIVE; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return(NULL); start = rle->start; end = rle->end; count = rle->count; } switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_MEMORY: case SYS_RES_IOPORT: rm = &mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_bushandle(rv, rman_get_start(rv)); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { -#ifdef INTRNG /* TODO: This is wrong, it's needed for ACPI */ device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); return (EOPNOTSUPP); -#else - return (intr_irq_config(irq, trig, pol)); -#endif } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error) return (error); -#ifdef INTRNG error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); -#else - error = arm_setup_intr(device_get_nameunit(child), filt, intr, - arg, rman_get_start(res), flags, cookiep); -#endif return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { -#ifdef INTRNG return (intr_teardown_irq(child, r, ih)); -#else - return (intr_irq_remove_handler(child, rman_get_start(r), ih)); -#endif } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { -#ifdef INTRNG return (intr_bind_irq(child, irq, cpu)); -#else - return (intr_irq_bind(rman_get_start(irq), cpu)); -#endif } #endif static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return(&memmap_bus); } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int err; bus_addr_t paddr; bus_size_t psize; bus_space_handle_t vaddr; if ((err = rman_activate_resource(r)) != 0) return (err); /* * If this is a memory resource, map it into the kernel. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = (bus_addr_t)rman_get_start(r); psize = (bus_size_t)rman_get_size(r); err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_bustag(r, &memmap_bus); rman_set_virtual(r, (void *)vaddr); rman_set_bushandle(r, vaddr); } return (0); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; /* XXX this should return a success/failure indicator */ resource_list_add(rl, type, rid, start, start + count - 1, count); return(0); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { bus_space_unmap(&memmap_bus, vaddr, psize); rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } return (rman_deactivate_resource(r)); } #ifdef FDT static device_method_t nexus_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_fdt_attach), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), }; #define nexus_baseclasses nexus_fdt_baseclasses DEFINE_CLASS_1(nexus, nexus_fdt_driver, nexus_fdt_methods, 1, nexus_driver); #undef nexus_baseclasses static devclass_t nexus_fdt_devclass; EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, nexus_fdt_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { if (OF_peer(0) == 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_fdt_attach(device_t dev) { nexus_add_child(dev, 10, "ofwbus", 0); return (nexus_attach(dev)); } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { -#ifdef INTRNG - return (INTR_IRQ_INVALID); -#else - int irq; - if (icells == 3) { - irq = intr[1]; - if (intr[0] == 0) - irq += 32; /* SPI */ - else - irq += 16; /* PPI */ - } else - irq = intr[0]; - - return (irq); -#endif + return (INTR_IRQ_INVALID); } #endif #ifdef DEV_ACPI static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), }; #define nexus_baseclasses nexus_acpi_baseclasses DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); #undef nexus_baseclasses static devclass_t nexus_acpi_devclass; EARLY_DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, nexus_acpi_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_acpi_probe(device_t dev) { if (acpi_identify() != 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_LOW_PRIORITY); } static int nexus_acpi_attach(device_t dev) { nexus_add_child(dev, 10, "acpi", 0); return (nexus_attach(dev)); } #endif Index: stable/11/sys/arm64/cavium/thunder_pcie_pem_fdt.c =================================================================== --- stable/11/sys/arm64/cavium/thunder_pcie_pem_fdt.c (revision 305135) +++ stable/11/sys/arm64/cavium/thunder_pcie_pem_fdt.c (revision 305136) @@ -1,222 +1,183 @@ /* * Copyright (C) 2016 Cavium Inc. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_pcie_common.h" #include "thunder_pcie_pem.h" #include "pcib_if.h" static int thunder_pem_fdt_probe(device_t); static int thunder_pem_fdt_alloc_msix(device_t, device_t, int *); static int thunder_pem_fdt_release_msix(device_t, device_t, int); static int thunder_pem_fdt_alloc_msi(device_t, device_t, int, int, int *); static int thunder_pem_fdt_release_msi(device_t, device_t, int, int *); static int thunder_pem_fdt_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int thunder_pem_fdt_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static device_method_t thunder_pem_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pem_fdt_probe), /* pcib interface */ DEVMETHOD(pcib_alloc_msix, thunder_pem_fdt_alloc_msix), DEVMETHOD(pcib_release_msix, thunder_pem_fdt_release_msix), DEVMETHOD(pcib_alloc_msi, thunder_pem_fdt_alloc_msi), DEVMETHOD(pcib_release_msi, thunder_pem_fdt_release_msi), DEVMETHOD(pcib_map_msi, thunder_pem_fdt_map_msi), DEVMETHOD(pcib_get_id, thunder_pem_fdt_get_id), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(pcib, thunder_pem_fdt_driver, thunder_pem_fdt_methods, sizeof(struct thunder_pem_softc), thunder_pem_driver); static devclass_t thunder_pem_fdt_devclass; DRIVER_MODULE(thunder_pem, simplebus, thunder_pem_fdt_driver, thunder_pem_fdt_devclass, 0, 0); DRIVER_MODULE(thunder_pem, ofwbus, thunder_pem_fdt_driver, thunder_pem_fdt_devclass, 0, 0); static int thunder_pem_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "cavium,pci-host-thunder-pem")) { device_set_desc(dev, THUNDER_PEM_DESC); return (BUS_PROBE_DEFAULT); } return (ENXIO); } -#ifdef INTRNG static int thunder_pem_fdt_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, irqs)); } static int thunder_pem_fdt_release_msi(device_t pci, device_t child, int count, int *irqs) { phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_release_msi(pci, child, msi_parent, count, irqs)); } static int thunder_pem_fdt_alloc_msix(device_t pci, device_t child, int *irq) { phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_alloc_msix(pci, child, msi_parent, irq)); } static int thunder_pem_fdt_release_msix(device_t pci, device_t child, int irq) { phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_release_msix(pci, child, msi_parent, irq)); } static int thunder_pem_fdt_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); } -#else -static int -thunder_pem_fdt_alloc_msi(device_t pci, device_t child, int count, int maxcount, - int *irqs) -{ - - return (arm_alloc_msi(pci, child, count, maxcount, irqs)); -} - -static int -thunder_pem_fdt_release_msi(device_t pci, device_t child, int count, int *irqs) -{ - - return (arm_release_msi(pci, child, count, irqs)); -} - -static int -thunder_pem_fdt_alloc_msix(device_t pci, device_t child, int *irq) -{ - - return (arm_alloc_msix(pci, child, irq)); -} - -static int -thunder_pem_fdt_release_msix(device_t pci, device_t child, int irq) -{ - - return (arm_release_msix(pci, child, irq)); -} - -static int -thunder_pem_fdt_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, - uint32_t *data) -{ - - return (arm_map_msi(pci, child, irq, addr, data)); -} -#endif static int thunder_pem_fdt_get_id(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { phandle_t node; uint32_t rid; uint16_t pci_rid; if (type != PCI_ID_MSI) return (pcib_get_id(dev, child, type, id)); node = ofw_bus_get_node(dev); pci_rid = pci_get_rid(child); ofw_bus_msimap(node, pci_rid, NULL, &rid); *id = rid; return (0); } Index: stable/11/sys/arm64/include/intr.h =================================================================== --- stable/11/sys/arm64/include/intr.h (revision 305135) +++ stable/11/sys/arm64/include/intr.h (revision 305136) @@ -1,82 +1,51 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_INTR_H_ #define _MACHINE_INTR_H_ -#ifdef INTRNG - #ifdef FDT #include #endif #include #ifndef NIRQ #define NIRQ 2048 /* XXX - It should be an option. */ #endif static inline void arm_irq_memory_barrier(uintptr_t irq) { } #ifdef SMP void intr_ipi_dispatch(u_int, struct trapframe *); -#endif - -#else -int intr_irq_config(u_int, enum intr_trigger, enum intr_polarity); -void intr_irq_handler(struct trapframe *); -int intr_irq_remove_handler(device_t, u_int, void *); - -void arm_dispatch_intr(u_int, struct trapframe *); -int arm_enable_intr(void); -void arm_mask_irq(u_int); -void arm_register_root_pic(device_t, u_int); -void arm_register_msi_pic(device_t); -int arm_alloc_msi(device_t, device_t, int, int, int *); -int arm_release_msi(device_t, device_t, int, int *); -int arm_alloc_msix(device_t, device_t, int *); -int arm_release_msix(device_t, device_t, int); -int arm_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); -int arm_map_msix(device_t, device_t, int, uint64_t *, uint32_t *); -int arm_setup_intr(const char *, driver_filter_t *, driver_intr_t, - void *, u_int, enum intr_type, void **); -void arm_unmask_irq(u_int); - -#ifdef SMP -int intr_irq_bind(u_int, int); - -void arm_init_secondary(void); -void arm_setup_ipihandler(driver_filter_t *, u_int); -void arm_unmask_ipi(u_int); -#endif #endif #endif /* _MACHINE_INTR_H */ Index: stable/11/sys/dev/pci/pci_host_generic.c =================================================================== --- stable/11/sys/dev/pci/pci_host_generic.c (revision 305135) +++ stable/11/sys/dev/pci/pci_host_generic.c (revision 305136) @@ -1,981 +1,971 @@ /*- * Copyright (c) 2015 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #if defined(INTRNG) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Assembling ECAM Configuration Address */ #define PCIE_BUS_SHIFT 20 #define PCIE_SLOT_SHIFT 15 #define PCIE_FUNC_SHIFT 12 #define PCIE_BUS_MASK 0xFF #define PCIE_SLOT_MASK 0x1F #define PCIE_FUNC_MASK 0x07 #define PCIE_REG_MASK 0xFFF #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ ((reg) & PCIE_REG_MASK)) #define PCI_IO_WINDOW_OFFSET 0x1000 #define SPACE_CODE_SHIFT 24 #define SPACE_CODE_MASK 0x3 #define SPACE_CODE_IO_SPACE 0x1 #define PROPS_CELL_SIZE 1 #define PCI_ADDR_CELL_SIZE 2 /* OFW bus interface */ struct generic_pcie_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; /* Forward prototypes */ static int generic_pcie_probe(device_t dev); static int parse_pci_mem_ranges(struct generic_pcie_softc *sc); static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static struct resource *generic_pcie_alloc_resource_ofw(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static struct resource *generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int generic_pcie_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res); static int generic_pcie_release_resource_ofw(device_t, device_t, int, int, struct resource *); static int generic_pcie_release_resource_pcie(device_t, device_t, int, int, struct resource *); static int generic_pcie_ofw_bus_attach(device_t); static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t, device_t); static __inline void get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) { *addr_cells = 2; /* Find address cells if present */ OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); *size_cells = 2; /* Find size cells if present */ OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); } static int generic_pcie_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) { device_set_desc(dev, "Generic PCI host controller"); return (BUS_PROBE_GENERIC); } if (ofw_bus_is_compatible(dev, "arm,gem5_pcie")) { device_set_desc(dev, "GEM5 PCIe host controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } int pci_host_generic_attach(device_t dev) { struct generic_pcie_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; phandle_t node; int error; int tuple; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Retrieve 'ranges' property from FDT */ if (bootverbose) device_printf(dev, "parsing FDT for ECAM%d:\n", sc->ecam); if (parse_pci_mem_ranges(sc)) return (ENXIO); /* Attach OFW bus */ if (generic_pcie_ofw_bus_attach(dev) != 0) return (ENXIO); node = ofw_bus_get_node(dev); if (sc->coherent == 0) { sc->coherent = OF_hasprop(node, "dma-coherent"); } if (bootverbose) device_printf(dev, "Bus is%s cache-coherent\n", sc->coherent ? "" : " not"); /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not map memory.\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PCIe Memory"; sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PCIe IO window"; /* Initialize rman and allocate memory regions */ error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->ranges[tuple].phys_base; pci_base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; if (phys_base == 0 || size == 0) continue; /* empty range element */ if (sc->ranges[tuple].flags & FLAG_MEM) { error = rman_manage_region(&sc->mem_rman, phys_base, phys_base + size - 1); } else if (sc->ranges[tuple].flags & FLAG_IO) { error = rman_manage_region(&sc->io_rman, pci_base + PCI_IO_WINDOW_OFFSET, pci_base + PCI_IO_WINDOW_OFFSET + size - 1); } else continue; if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); rman_fini(&sc->mem_rman); return (error); } } ofw_bus_setup_iinfo(node, &sc->pci_iinfo, sizeof(cell_t)); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int parse_pci_mem_ranges(struct generic_pcie_softc *sc) { pcell_t pci_addr_cells, parent_addr_cells; pcell_t attributes, size_cells; cell_t *base_ranges; int nbase_ranges; phandle_t node; int i, j, k; int tuple; node = ofw_bus_get_node(sc->dev); OF_getencprop(node, "#address-cells", &pci_addr_cells, sizeof(pci_addr_cells)); OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells, sizeof(parent_addr_cells)); if (parent_addr_cells != 2 || pci_addr_cells != 3 || size_cells != 2) { device_printf(sc->dev, "Unexpected number of address or size cells in FDT\n"); return (ENXIO); } nbase_ranges = OF_getproplen(node, "ranges"); sc->nranges = nbase_ranges / sizeof(cell_t) / (parent_addr_cells + pci_addr_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \ SPACE_CODE_MASK; if (attributes == SPACE_CODE_IO_SPACE) { sc->ranges[i].flags |= FLAG_IO; } else { sc->ranges[i].flags |= FLAG_MEM; } sc->ranges[i].pci_base = 0; for (k = 0; k < (pci_addr_cells - 1); k++) { sc->ranges[i].pci_base <<= 32; sc->ranges[i].pci_base |= base_ranges[j++]; } sc->ranges[i].phys_base = 0; for (k = 0; k < parent_addr_cells; k++) { sc->ranges[i].phys_base <<= 32; sc->ranges[i].phys_base |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < size_cells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } for (; i < MAX_RANGES_TUPLES; i++) { /* zero-fill remaining tuples to mark empty elements in array */ sc->ranges[i].pci_base = 0; sc->ranges[i].phys_base = 0; sc->ranges[i].size = 0; } if (bootverbose) { for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { device_printf(sc->dev, "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n", sc->ranges[tuple].pci_base, sc->ranges[tuple].phys_base, sc->ranges[tuple].size); } } free(base_ranges, M_DEVBUF); return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; uint32_t data; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); sc = device_get_softc(dev); offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: data = bus_space_read_1(t, h, offset); break; case 2: data = le16toh(bus_space_read_2(t, h, offset)); break; case 4: data = le32toh(bus_space_read_4(t, h, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; sc = device_get_softc(dev); offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: bus_space_write_1(t, h, offset, val); break; case 2: bus_space_write_2(t, h, offset, htole16(val)); break; case 4: bus_space_write_4(t, h, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_route_interrupt(device_t bus, device_t dev, int pin) { struct generic_pcie_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[2]; phandle_t iparent; int intrcells; sc = device_get_softc(bus); pintr = pin; bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_softc *sc; int secondary_bus; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { /* this pcib adds only pci bus 0 as child */ secondary_bus = 0; *result = secondary_bus; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_rman(struct generic_pcie_softc *sc, int type) { switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } static int generic_pcie_release_resource_pcie(device_t dev, device_t child, int type, int rid, struct resource *res) { struct generic_pcie_softc *sc; struct rman *rm; sc = device_get_softc(dev); rm = generic_pcie_rman(sc, type); if (rm != NULL) { KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); rman_release_resource(res); } return (bus_generic_release_resource(dev, child, type, rid, res)); } static int generic_pcie_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_softc *sc; if (type == PCI_RES_BUS) { sc = device_get_softc(dev); return (pci_domain_release_bus(sc->ecam, child, rid, res)); } #endif /* For PCIe devices that do not have FDT nodes, use PCIB method */ if ((int)ofw_bus_get_node(child) <= 0) { return (generic_pcie_release_resource_pcie(dev, child, type, rid, res)); } /* For other devices use OFW method */ return (generic_pcie_release_resource_ofw(dev, child, type, rid, res)); } struct resource * pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_softc *sc; if (type == PCI_RES_BUS) { sc = device_get_softc(dev); return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, count, flags)); } #endif /* For PCIe devices that do not have FDT nodes, use PCIB method */ if ((int)ofw_bus_get_node(child) <= 0) return (generic_pcie_alloc_resource_pcie(dev, child, type, rid, start, end, count, flags)); /* For other devices use OFW method */ return (generic_pcie_alloc_resource_ofw(dev, child, type, rid, start, end, count, flags)); } static struct resource * generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_softc *sc; struct resource *res; struct rman *rm; sc = device_get_softc(dev); rm = generic_pcie_rman(sc, type); if (rm == NULL) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); if (bootverbose) { device_printf(dev, "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", start, end, count); } res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) goto fail; rman_set_rid(res, *rid); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); goto fail; } return (res); fail: device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } static int generic_pcie_adjust_resource(device_t dev, device_t child, int type, struct resource *res, rman_res_t start, rman_res_t end) { struct generic_pcie_softc *sc; struct rman *rm; sc = device_get_softc(dev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ecam, child, res, start, end)); #endif rm = generic_pcie_rman(sc, type); if (rm != NULL) return (rman_adjust_resource(res, start, end)); return (bus_generic_adjust_resource(dev, child, type, res, start, end)); } static int generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct generic_pcie_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; int found; int res; int i; sc = device_get_softc(dev); if ((res = rman_activate_resource(r)) != 0) return (res); switch(type) { case SYS_RES_IOPORT: found = 0; for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; phys_base = sc->ranges[i].phys_base; size = sc->ranges[i].size; if ((rid > pci_base) && (rid < (pci_base + size))) { found = 1; break; } } if (found) { rman_set_start(r, rman_get_start(r) + phys_base); rman_set_end(r, rman_get_end(r) + phys_base); BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); } else { device_printf(dev, "Failed to activate IOPORT resource\n"); res = 0; } break; case SYS_RES_MEMORY: BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); break; default: break; } return (res); } static int generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct generic_pcie_softc *sc; vm_offset_t vaddr; int res; sc = device_get_softc(dev); if ((res = rman_deactivate_resource(r)) != 0) return (res); switch(type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: vaddr = (vm_offset_t)rman_get_virtual(r); pmap_unmapdev(vaddr, rman_get_size(r)); break; default: break; } return (res); } static bus_dma_tag_t generic_pcie_get_dma_tag(device_t dev, device_t child) { struct generic_pcie_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static int generic_pcie_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_alloc_msi(pci, child, msi_parent, count, maxcount, irqs)); -#elif defined(__aarch64__) - return (arm_alloc_msi(pci, child, count, maxcount, irqs)); #else return (ENXIO); #endif } static int generic_pcie_release_msi(device_t pci, device_t child, int count, int *irqs) { #if defined(INTRNG) phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_release_msi(pci, child, msi_parent, count, irqs)); -#elif defined(__aarch64__) - return (arm_release_msi(pci, child, count, irqs)); #else return (ENXIO); #endif } static int generic_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_map_msi(pci, child, msi_parent, irq, addr, data)); -#elif defined(__aarch64__) - return (arm_map_msi(pci, child, irq, addr, data)); #else return (ENXIO); #endif } static int generic_pcie_alloc_msix(device_t pci, device_t child, int *irq) { #if defined(INTRNG) phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_alloc_msix(pci, child, msi_parent, irq)); -#elif defined(__aarch64__) - return (arm_alloc_msix(pci, child, irq)); #else return (ENXIO); #endif } static int generic_pcie_release_msix(device_t pci, device_t child, int irq) { #if defined(INTRNG) phandle_t msi_parent; ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent, NULL); return (intr_release_msix(pci, child, msi_parent, irq)); -#elif defined(__aarch64__) - return (arm_release_msix(pci, child, irq)); #else return (ENXIO); #endif } int generic_pcie_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { phandle_t node; uint32_t rid; uint16_t pci_rid; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); node = ofw_bus_get_node(pci); pci_rid = pci_get_rid(child); ofw_bus_msimap(node, pci_rid, NULL, &rid); *id = rid; return (0); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_probe, generic_pcie_probe), DEVMETHOD(device_attach, pci_host_generic_attach), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_release_resource, generic_pcie_release_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_route_interrupt, generic_pcie_route_interrupt), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), DEVMETHOD(pcib_alloc_msi, generic_pcie_alloc_msi), DEVMETHOD(pcib_release_msi, generic_pcie_release_msi), DEVMETHOD(pcib_alloc_msix, generic_pcie_alloc_msix), DEVMETHOD(pcib_release_msix, generic_pcie_release_msix), DEVMETHOD(pcib_map_msi, generic_pcie_map_msi), DEVMETHOD(pcib_get_id, generic_pcie_get_id), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, generic_pcie_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static const struct ofw_bus_devinfo * generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) { struct generic_pcie_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } static struct resource * generic_pcie_alloc_resource_ofw(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_softc *sc; struct generic_pcie_ofw_devinfo *di; struct resource_list_entry *rle; int i; sc = device_get_softc(bus); if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* Find defaults for this rid */ rle = resource_list_find(&di->di_rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (i = 0; i < MAX_RANGES_TUPLES; i++) { if (start >= sc->ranges[i].phys_base && end < sc->ranges[i].pci_base + sc->ranges[i].size) { start -= sc->ranges[i].phys_base; start += sc->ranges[i].pci_base; end -= sc->ranges[i].phys_base; end += sc->ranges[i].pci_base; break; } } if (i == MAX_RANGES_TUPLES) { device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int generic_pcie_release_resource_ofw(device_t bus, device_t child, int type, int rid, struct resource *res) { return (bus_generic_release_resource(bus, child, type, rid, res)); } /* Helper functions */ static int generic_pcie_ofw_bus_attach(device_t dev) { struct generic_pcie_ofw_devinfo *di; device_t child; phandle_t parent, node; pcell_t addr_cells, size_cells; parent = ofw_bus_get_node(dev); if (parent > 0) { get_addr_size_cells(parent, &addr_cells, &size_cells); /* Iterate through all bus subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { free(di, M_DEVBUF); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, &di->di_rl); #ifndef INTRNG ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); #endif /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (child == NULL) { resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_DEVBUF); continue; } device_set_ivars(child, di); } } return (0); } DEFINE_CLASS_0(pcib, generic_pcie_driver, generic_pcie_methods, sizeof(struct generic_pcie_softc)); devclass_t generic_pcie_devclass; DRIVER_MODULE(pcib, simplebus, generic_pcie_driver, generic_pcie_devclass, 0, 0); DRIVER_MODULE(pcib, ofwbus, generic_pcie_driver, generic_pcie_devclass, 0, 0); Index: stable/11 =================================================================== --- stable/11 (revision 305135) +++ stable/11 (revision 305136) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r302847-302848,302852-302853