Index: head/sys/arm64/arm64/gic_v3.c =================================================================== --- head/sys/arm64/arm64/gic_v3.c (revision 335051) +++ head/sys/arm64/arm64/gic_v3.c (revision 335052) @@ -1,1263 +1,1263 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * the sponsorship of the FreeBSD Foundation. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DEV_ACPI #include #include #endif #include "pic_if.h" #include #include "gic_v3_reg.h" #include "gic_v3_var.h" static bus_get_domain_t gic_v3_get_domain; static bus_read_ivar_t gic_v3_read_ivar; static pic_disable_intr_t gic_v3_disable_intr; static pic_enable_intr_t gic_v3_enable_intr; static pic_map_intr_t gic_v3_map_intr; static pic_setup_intr_t gic_v3_setup_intr; static pic_teardown_intr_t gic_v3_teardown_intr; static pic_post_filter_t gic_v3_post_filter; static pic_post_ithread_t gic_v3_post_ithread; static pic_pre_ithread_t gic_v3_pre_ithread; static pic_bind_intr_t gic_v3_bind_intr; #ifdef SMP static pic_init_secondary_t gic_v3_init_secondary; static pic_ipi_send_t gic_v3_ipi_send; static pic_ipi_setup_t gic_v3_ipi_setup; #endif static u_int gic_irq_cpu; #ifdef SMP static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; static u_int sgi_first_unused = GIC_FIRST_SGI; #endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), /* Bus interface */ DEVMETHOD(bus_get_domain, gic_v3_get_domain), DEVMETHOD(bus_read_ivar, gic_v3_read_ivar), /* Interrupt controller interface */ DEVMETHOD(pic_disable_intr, gic_v3_disable_intr), DEVMETHOD(pic_enable_intr, gic_v3_enable_intr), DEVMETHOD(pic_map_intr, gic_v3_map_intr), DEVMETHOD(pic_setup_intr, gic_v3_setup_intr), DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr), DEVMETHOD(pic_post_filter, gic_v3_post_filter), DEVMETHOD(pic_post_ithread, gic_v3_post_ithread), DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread), #ifdef SMP DEVMETHOD(pic_bind_intr, gic_v3_bind_intr), DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup), #endif /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; struct gic_v3_irqsrc { struct intr_irqsrc gi_isrc; uint32_t gi_irq; enum intr_polarity gi_pol; enum intr_trigger gi_trig; }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; #ifdef SMP /* Secondary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_secondary_init[] = { gic_v3_redist_init, gic_v3_cpu_init, NULL }; #endif uint32_t gic_r_read_4(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; sc = device_get_softc(dev); return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); } uint64_t gic_r_read_8(device_t dev, bus_size_t offset) { struct gic_v3_softc *sc; sc = device_get_softc(dev); return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); } void gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val) { struct gic_v3_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); } void gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val) { struct gic_v3_softc *sc; sc = device_get_softc(dev); bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); } /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; u_int irq; const char *name; sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs, M_GIC_V3, M_WAITOK | M_ZERO); name = device_get_nameunit(dev); for (irq = 0; irq < sc->gic_nirqs; irq++) { struct intr_irqsrc *isrc; sc->gic_irqs[irq].gi_irq = irq; sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM; sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; isrc = &sc->gic_irqs[irq].gi_isrc; if (irq <= GIC_LAST_SGI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); } else if (irq <= GIC_LAST_PPI) { err = intr_isrc_register(isrc, sc->dev, INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); } else { err = intr_isrc_register(isrc, sc->dev, 0, "%s,s%u", name, irq - GIC_FIRST_SPI); } if (err != 0) { /* XXX call intr_isrc_deregister() */ free(sc->gic_irqs, M_DEVBUF); return (err); } } /* * Read the Peripheral ID2 register. This is an implementation * defined register, but seems to be implemented in all GICv3 * parts and Linux expects it to be there. */ sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2); /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; size_t i; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); for (i = 0; i <= mp_maxid; i++) free(sc->gic_redists.pcpu[i], M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } static int gic_v3_get_domain(device_t dev, device_t child, int *domain) { struct gic_v3_devinfo *di; di = device_get_ivars(child); if (di->gic_domain < 0) return (ENOENT); *domain = di->gic_domain; return (0); } static int gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gic_v3_softc *sc; sc = device_get_softc(dev); switch (which) { case GICV3_IVAR_NIRQS: *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren; return (0); case GICV3_IVAR_REDIST_VADDR: *result = (uintptr_t)rman_get_virtual( sc->gic_redists.pcpu[PCPU_GET(cpuid)]); return (0); case GIC_IVAR_HW_REV: KASSERT( GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 || GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4, ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)", GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2)); *result = GICR_PIDR2_ARCH(sc->gic_pidr2); return (0); case GIC_IVAR_BUS: KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN, ("gic_v3_read_ivar: Unknown bus type")); KASSERT(sc->gic_bus <= GIC_BUS_MAX, ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus)); *result = sc->gic_bus; return (0); } return (ENOENT); } int arm_gic_v3_intr(void *arg) { struct gic_v3_softc *sc = arg; struct gic_v3_irqsrc *gi; struct intr_pic *pic; uint64_t active_irq; struct trapframe *tf; pic = sc->gic_pic; while (1) { - if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { + if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (active_irq >= GIC_FIRST_LPI) { intr_child_irq_handler(pic, active_irq); continue; } if (__predict_false(active_irq >= sc->gic_nirqs)) return (FILTER_HANDLED); tf = curthread->td_intr_frame; gi = &sc->gic_irqs[active_irq]; if (active_irq <= GIC_LAST_SGI) { /* Call EOI for all IPI before dispatch. */ gic_icc_write(EOIR1, (uint64_t)active_irq); #ifdef SMP intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf); #else device_printf(sc->dev, "SGI %ju on UP system detected\n", (uintmax_t)(active_irq - GIC_FIRST_SGI)); #endif } else if (active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) { if (gi->gi_trig == INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { if (gi->gi_trig != INTR_TRIGGER_EDGE) gic_icc_write(EOIR1, gi->gi_irq); gic_v3_disable_intr(sc->dev, &gi->gi_isrc); device_printf(sc->dev, "Stray irq %lu disabled\n", active_irq); } } } } #ifdef FDT static int gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { u_int irq; if (ncells < 3) return (EINVAL); /* * The 1st cell is the interrupt type: * 0 = SPI * 1 = PPI * The 2nd cell contains the interrupt number: * [0 - 987] for SPI * [0 - 15] for PPI * The 3rd cell is the flags, encoded as follows: * bits[3:0] trigger type and level flags * 1 = edge triggered * 2 = edge triggered (PPI only) * 4 = level-sensitive * 8 = level-sensitive (PPI only) */ switch (cells[0]) { case 0: irq = GIC_FIRST_SPI + cells[1]; /* SPI irq is checked later. */ break; case 1: irq = GIC_FIRST_PPI + cells[1]; if (irq > GIC_LAST_PPI) { device_printf(dev, "unsupported PPI interrupt " "number %u\n", cells[1]); return (EINVAL); } break; default: device_printf(dev, "unsupported interrupt type " "configuration %u\n", cells[0]); return (EINVAL); } switch (cells[2] & FDT_INTR_MASK) { case FDT_INTR_EDGE_RISING: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_HIGH; break; case FDT_INTR_EDGE_FALLING: *trigp = INTR_TRIGGER_EDGE; *polp = INTR_POLARITY_LOW; break; case FDT_INTR_LEVEL_HIGH: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_HIGH; break; case FDT_INTR_LEVEL_LOW: *trigp = INTR_TRIGGER_LEVEL; *polp = INTR_POLARITY_LOW; break; default: device_printf(dev, "unsupported trigger/polarity " "configuration 0x%02x\n", cells[2]); return (EINVAL); } /* Check the interrupt is valid */ if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH) return (EINVAL); *irqp = irq; return (0); } #endif static int gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_irqsrc *gi; /* SPI-mapped MSI */ gi = (struct gic_v3_irqsrc *)msi_data->isrc; if (gi == NULL) return (ENXIO); *irqp = gi->gi_irq; /* MSI/MSI-X interrupts are always edge triggered with high polarity */ *polp = INTR_POLARITY_HIGH; *trigp = INTR_TRIGGER_EDGE; return (0); } static int do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, enum intr_polarity *polp, enum intr_trigger *trigp) { struct gic_v3_softc *sc; enum intr_polarity pol; enum intr_trigger trig; struct intr_map_data_msi *dam; #ifdef FDT struct intr_map_data_fdt *daf; #endif #ifdef DEV_ACPI struct intr_map_data_acpi *daa; #endif u_int irq; sc = device_get_softc(dev); switch (data->type) { #ifdef FDT case INTR_MAP_DATA_FDT: daf = (struct intr_map_data_fdt *)data; if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, &trig) != 0) return (EINVAL); break; #endif #ifdef DEV_ACPI case INTR_MAP_DATA_ACPI: daa = (struct intr_map_data_acpi *)data; irq = daa->irq; pol = daa->pol; trig = daa->trig; break; #endif case INTR_MAP_DATA_MSI: /* SPI-mapped MSI */ dam = (struct intr_map_data_msi *)data; if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0) return (EINVAL); break; default: return (EINVAL); } if (irq >= sc->gic_nirqs) return (EINVAL); switch (pol) { case INTR_POLARITY_CONFORM: case INTR_POLARITY_LOW: case INTR_POLARITY_HIGH: break; default: return (EINVAL); } switch (trig) { case INTR_TRIGGER_CONFORM: case INTR_TRIGGER_EDGE: case INTR_TRIGGER_LEVEL: break; default: return (EINVAL); } *irqp = irq; if (polp != NULL) *polp = pol; if (trigp != NULL) *trigp = trig; return (0); } static int gic_v3_map_intr(device_t dev, struct intr_map_data *data, struct intr_irqsrc **isrcp) { struct gic_v3_softc *sc; int error; u_int irq; error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL); if (error == 0) { sc = device_get_softc(dev); *isrcp = GIC_INTR_ISRC(sc, irq); } return (error); } static int gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_softc *sc = device_get_softc(dev); struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; enum intr_trigger trig; enum intr_polarity pol; uint32_t reg; u_int irq; int error; if (data == NULL) return (ENOTSUP); error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig); if (error != 0) return (error); if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM || trig == INTR_TRIGGER_CONFORM) return (EINVAL); /* Compare config if this is not first setup. */ if (isrc->isrc_handlers != 0) { if (pol != gi->gi_pol || trig != gi->gi_trig) return (EINVAL); else return (0); } gi->gi_pol = pol; gi->gi_trig = trig; /* * XXX - In case that per CPU interrupt is going to be enabled in time * when SMP is already started, we need some IPI call which * enables it on others CPUs. Further, it's more complicated as * pic_enable_source() and pic_disable_source() should act on * per CPU basis only. Thus, it should be solved here somehow. */ if (isrc->isrc_flags & INTR_ISRCF_PPI) CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) { mtx_lock_spin(&sc->gic_mtx); /* Set the trigger and polarity */ if (irq <= GIC_LAST_PPI) reg = gic_r_read(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq)); else reg = gic_d_read(sc, 4, GICD_ICFGR(irq)); if (trig == INTR_TRIGGER_LEVEL) reg &= ~(2 << ((irq % 16) * 2)); else reg |= 2 << ((irq % 16) * 2); if (irq <= GIC_LAST_PPI) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, REDIST); } else { gic_d_write(sc, 4, GICD_ICFGR(irq), reg); gic_v3_wait_for_rwp(sc, DIST); } mtx_unlock_spin(&sc->gic_mtx); gic_v3_bind_intr(dev, isrc); } return (0); } static int gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc, struct resource *res, struct intr_map_data *data) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (isrc->isrc_handlers == 0) { gi->gi_pol = INTR_POLARITY_CONFORM; gi->gi_trig = INTR_TRIGGER_CONFORM; } return (0); } static void gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; u_int irq; sc = device_get_softc(dev); gi = (struct gic_v3_irqsrc *)isrc; irq = gi->gi_irq; if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else panic("%s: Unsupported IRQ %u", __func__, irq); } static void gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; gic_v3_disable_intr(dev, isrc); gic_icc_write(EOIR1, gi->gi_irq); } static void gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc) { gic_v3_enable_intr(dev, isrc); } static void gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_trig == INTR_TRIGGER_EDGE) return; gic_icc_write(EOIR1, gi->gi_irq); } static int gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc) { struct gic_v3_softc *sc; struct gic_v3_irqsrc *gi; int cpu; gi = (struct gic_v3_irqsrc *)isrc; if (gi->gi_irq <= GIC_LAST_PPI) return (EINVAL); KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI, ("%s: Attempting to bind an invalid IRQ", __func__)); sc = device_get_softc(dev); if (CPU_EMPTY(&isrc->isrc_cpu)) { gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(gic_irq_cpu)); } else { /* * We can only bind to a single CPU so select * the first CPU found. */ cpu = CPU_FFS(&isrc->isrc_cpu) - 1; gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu)); } return (0); } #ifdef SMP static void gic_v3_init_secondary(device_t dev) { device_t child; struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; struct intr_irqsrc *isrc; u_int cpu, irq; int err, i; sc = device_get_softc(dev); cpu = PCPU_GET(cpuid); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", cpu); return; } } /* Unmask attached SGI interrupts. */ for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); } /* Unmask attached PPI interrupts. */ for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { isrc = GIC_INTR_ISRC(sc, irq); if (intr_isrc_init_on_cpu(isrc, cpu)) gic_v3_enable_intr(dev, isrc); } for (i = 0; i < sc->gic_nchildren; i++) { child = sc->gic_children[i]; PIC_INIT_SECONDARY(child); } } static void gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, u_int ipi) { struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; uint64_t aff, val, irq; int i; #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK) #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK) aff = GIC_AFFINITY(0); irq = gi->gi_irq; val = 0; /* Iterate through all CPUs in set */ for (i = 0; i <= mp_maxid; i++) { /* Move to the next affinity group */ if (aff != GIC_AFFINITY(i)) { /* Send the IPI */ if (val != 0) { gic_icc_write(SGI1R, val); val = 0; } aff = GIC_AFFINITY(i); } /* Send the IPI to this cpu */ if (CPU_ISSET(i, &cpus)) { #define ICC_SGI1R_AFFINITY(aff) \ (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \ ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \ ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT)) /* Set the affinity when the first at this level */ if (val == 0) val = ICC_SGI1R_AFFINITY(aff) | irq << ICC_SGI1R_EL1_SGIID_SHIFT; /* Set the bit to send the IPI to te CPU */ val |= 1 << CPU_AFF0(CPU_AFFINITY(i)); } } /* Send the IPI to the last cpu affinity group */ if (val != 0) gic_icc_write(SGI1R, val); #undef GIC_AFF_MASK #undef GIC_AFFINITY } static int gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) { struct intr_irqsrc *isrc; struct gic_v3_softc *sc = device_get_softc(dev); if (sgi_first_unused > GIC_LAST_SGI) return (ENOSPC); isrc = GIC_INTR_ISRC(sc, sgi_first_unused); sgi_to_ipi[sgi_first_unused++] = ipi; CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); *isrcp = isrc; return (0); } #endif /* SMP */ /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; break; case REDIST: res = sc->gic_redists.pcpu[cpuid]; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all SPIs to be Group 1 Non-secure */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn) gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF); /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(0); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 4, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int gic_v3_redist_alloc(struct gic_v3_softc *sc) { u_int cpuid; /* Allocate struct resource for all CPU's Re-Distributor registers */ for (cpuid = 0; cpuid <= mp_maxid; cpuid++) if (CPU_ISSET(cpuid, &all_cpus) != 0) sc->gic_redists.pcpu[cpuid] = malloc(sizeof(*sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); else sc->gic_redists.pcpu[cpuid] = NULL; return (0); } static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource r_res; bus_space_handle_t r_bsh; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = *sc->gic_redists.regions[i]; r_bsh = rman_get_bushandle(&r_res); pidr2 = bus_read_4(&r_res, GICR_PIDR2); switch (GICR_PIDR2_ARCH(pidr2)) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENODEV); } do { typer = bus_read_8(&r_res, GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ *sc->gic_redists.pcpu[cpuid] = r_res; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { r_bsh += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } rman_set_bushandle(&r_res, r_bsh); } while ((typer & GICR_TYPER_LAST) == 0); } device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Configure SGIs and PPIs to be Group1 Non-secure */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0, 0xFFFFFFFF); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } Index: head/sys/arm64/arm64/identcpu.c =================================================================== --- head/sys/arm64/arm64/identcpu.c (revision 335051) +++ head/sys/arm64/arm64/identcpu.c (revision 335052) @@ -1,1114 +1,1114 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include static int ident_lock; char machine[] = "arm64"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "Machine class"); /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { u_int cpu_impl; u_int cpu_part_num; u_int cpu_variant; u_int cpu_revision; const char *cpu_impl_name; const char *cpu_part_name; uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; }; struct cpu_desc cpu_desc[MAXCPU]; static u_int cpu_print_regs; #define PRINT_ID_AA64_AFR0 0x00000001 #define PRINT_ID_AA64_AFR1 0x00000002 #define PRINT_ID_AA64_DFR0 0x00000010 #define PRINT_ID_AA64_DFR1 0x00000020 #define PRINT_ID_AA64_ISAR0 0x00000100 #define PRINT_ID_AA64_ISAR1 0x00000200 #define PRINT_ID_AA64_MMFR0 0x00001000 #define PRINT_ID_AA64_MMFR1 0x00002000 #define PRINT_ID_AA64_MMFR2 0x00004000 #define PRINT_ID_AA64_PFR0 0x00010000 #define PRINT_ID_AA64_PFR1 0x00020000 struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, "Unknown Processor" } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { - { CPU_PART_THUNDER, "Thunder" }, + { CPU_PART_THUNDERX, "ThunderX" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_none }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, CPU_IMPLEMENTER_NONE, }; static void identify_cpu_sysinit(void *dummy __unused) { int cpu; CPU_FOREACH(cpu) { print_cpu_features(cpu); } } SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL); void print_cpu_features(u_int cpu) { int printed; printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); printf(" affinity:"); switch(cpu_aff_levels) { default: case 4: printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 3: printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 2: printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr)); break; } printf("\n"); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* - * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on its own also + * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && - CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) + CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); if (cpu != 0 && cpu_print_regs == 0) return; #define SEP_STR ((printed++) == 0) ? "" : "," /* AArch64 Instruction Set Attribute Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) { printed = 0; printf(" Instruction Set Attributes 0 = <"); switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_RDM_NONE: break; case ID_AA64ISAR0_RDM_IMPL: printf("%sRDM", SEP_STR); break; default: printf("%sUnknown RDM", SEP_STR); } switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_ATOMIC_NONE: break; case ID_AA64ISAR0_ATOMIC_IMPL: printf("%sAtomic", SEP_STR); break; default: printf("%sUnknown Atomic", SEP_STR); } switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_AES_NONE: break; case ID_AA64ISAR0_AES_BASE: printf("%sAES", SEP_STR); break; case ID_AA64ISAR0_AES_PMULL: printf("%sAES+PMULL", SEP_STR); break; default: printf("%sUnknown AES", SEP_STR); break; } switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA1_NONE: break; case ID_AA64ISAR0_SHA1_BASE: printf("%sSHA1", SEP_STR); break; default: printf("%sUnknown SHA1", SEP_STR); break; } switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA2_NONE: break; case ID_AA64ISAR0_SHA2_BASE: printf("%sSHA2", SEP_STR); break; case ID_AA64ISAR0_SHA2_512: printf("%sSHA2+SHA512", SEP_STR); break; default: printf("%sUnknown SHA2", SEP_STR); break; } switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_CRC32_NONE: break; case ID_AA64ISAR0_CRC32_BASE: printf("%sCRC32", SEP_STR); break; default: printf("%sUnknown CRC32", SEP_STR); break; } switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA3_NONE: break; case ID_AA64ISAR0_SHA3_IMPL: printf("%sSHA3", SEP_STR); break; default: printf("%sUnknown SHA3", SEP_STR); break; } switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SM3_NONE: break; case ID_AA64ISAR0_SM3_IMPL: printf("%sSM3", SEP_STR); break; default: printf("%sUnknown SM3", SEP_STR); break; } switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SM4_NONE: break; case ID_AA64ISAR0_SM4_IMPL: printf("%sSM4", SEP_STR); break; default: printf("%sUnknown SM4", SEP_STR); break; } switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_DP_NONE: break; case ID_AA64ISAR0_DP_IMPL: printf("%sDotProd", SEP_STR); break; default: printf("%sUnknown DP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK); printf(">\n"); } /* AArch64 Instruction Set Attribute Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) { printed = 0; printf(" Instruction Set Attributes 1 = <"); switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_GPI_NONE: break; case ID_AA64ISAR1_GPI_IMPL: printf("%sImpl GenericAuth", SEP_STR); break; default: printf("%sUnknown GenericAuth", SEP_STR); break; } switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_GPA_NONE: break; case ID_AA64ISAR1_GPA_IMPL: printf("%sPrince GenericAuth", SEP_STR); break; default: printf("%sUnknown GenericAuth", SEP_STR); break; } switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_LRCPC_NONE: break; case ID_AA64ISAR1_LRCPC_IMPL: printf("%sRCpc", SEP_STR); break; default: printf("%sUnknown RCpc", SEP_STR); break; } switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_FCMA_NONE: break; case ID_AA64ISAR1_FCMA_IMPL: printf("%sFCMA", SEP_STR); break; default: printf("%sUnknown FCMA", SEP_STR); break; } switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_JSCVT_NONE: break; case ID_AA64ISAR1_JSCVT_IMPL: printf("%sJS Conv", SEP_STR); break; default: printf("%sUnknown JS Conv", SEP_STR); break; } switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_API_NONE: break; case ID_AA64ISAR1_API_IMPL: printf("%sImpl AddrAuth", SEP_STR); break; default: printf("%sUnknown Impl AddrAuth", SEP_STR); break; } switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_APA_NONE: break; case ID_AA64ISAR1_APA_IMPL: printf("%sPrince AddrAuth", SEP_STR); break; default: printf("%sUnknown Prince AddrAuth", SEP_STR); break; } switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) { case ID_AA64ISAR1_DPB_NONE: break; case ID_AA64ISAR1_DPB_IMPL: printf("%sDC CVAP", SEP_STR); break; default: printf("%sUnknown DC CVAP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK); printf(">\n"); } /* AArch64 Processor Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) { printed = 0; printf(" Processor Features 0 = <"); switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_SVE_NONE: break; case ID_AA64PFR0_SVE_IMPL: printf("%sSVE", SEP_STR); break; default: printf("%sUnknown SVE", SEP_STR); break; } switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_RAS_NONE: break; case ID_AA64PFR0_RAS_V1: printf("%sRASv1", SEP_STR); break; default: printf("%sUnknown RAS", SEP_STR); break; } switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_GIC_CPUIF_NONE: break; case ID_AA64PFR0_GIC_CPUIF_EN: printf("%sGIC", SEP_STR); break; default: printf("%sUnknown GIC interface", SEP_STR); break; } switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_ADV_SIMD_NONE: break; case ID_AA64PFR0_ADV_SIMD_IMPL: printf("%sAdvSIMD", SEP_STR); break; case ID_AA64PFR0_ADV_SIMD_HP: printf("%sAdvSIMD+HP", SEP_STR); break; default: printf("%sUnknown AdvSIMD", SEP_STR); break; } switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_FP_NONE: break; case ID_AA64PFR0_FP_IMPL: printf("%sFloat", SEP_STR); break; case ID_AA64PFR0_FP_HP: printf("%sFloat+HP", SEP_STR); break; default: printf("%sUnknown Float", SEP_STR); break; } switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL3_NONE: printf("%sNo EL3", SEP_STR); break; case ID_AA64PFR0_EL3_64: printf("%sEL3", SEP_STR); break; case ID_AA64PFR0_EL3_64_32: printf("%sEL3 32", SEP_STR); break; default: printf("%sUnknown EL3", SEP_STR); break; } switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL2_NONE: printf("%sNo EL2", SEP_STR); break; case ID_AA64PFR0_EL2_64: printf("%sEL2", SEP_STR); break; case ID_AA64PFR0_EL2_64_32: printf("%sEL2 32", SEP_STR); break; default: printf("%sUnknown EL2", SEP_STR); break; } switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL1_64: printf("%sEL1", SEP_STR); break; case ID_AA64PFR0_EL1_64_32: printf("%sEL1 32", SEP_STR); break; default: printf("%sUnknown EL1", SEP_STR); break; } switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_EL0_64: printf("%sEL0", SEP_STR); break; case ID_AA64PFR0_EL0_64_32: printf("%sEL0 32", SEP_STR); break; default: printf("%sUnknown EL0", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK); printf(">\n"); } /* AArch64 Processor Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) { printf(" Processor Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64pfr1); } /* AArch64 Memory Model Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) { printed = 0; printf(" Memory Model Features 0 = <"); switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN4_NONE: break; case ID_AA64MMFR0_TGRAN4_IMPL: printf("%s4k Granule", SEP_STR); break; default: printf("%sUnknown 4k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN16_NONE: break; case ID_AA64MMFR0_TGRAN16_IMPL: printf("%s16k Granule", SEP_STR); break; default: printf("%sUnknown 16k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_TGRAN64_NONE: break; case ID_AA64MMFR0_TGRAN64_IMPL: printf("%s64k Granule", SEP_STR); break; default: printf("%sUnknown 64k Granule", SEP_STR); break; } switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_BIGEND_FIXED: break; case ID_AA64MMFR0_BIGEND_MIXED: printf("%sMixedEndian", SEP_STR); break; default: printf("%sUnknown Endian switching", SEP_STR); break; } switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_BIGEND_EL0_FIXED: break; case ID_AA64MMFR0_BIGEND_EL0_MIXED: printf("%sEL0 MixEndian", SEP_STR); break; default: printf("%sUnknown EL0 Endian switching", SEP_STR); break; } switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_S_NS_MEM_NONE: break; case ID_AA64MMFR0_S_NS_MEM_DISTINCT: printf("%sS/NS Mem", SEP_STR); break; default: printf("%sUnknown S/NS Mem", SEP_STR); break; } switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_ASID_BITS_8: printf("%s8bit ASID", SEP_STR); break; case ID_AA64MMFR0_ASID_BITS_16: printf("%s16bit ASID", SEP_STR); break; default: printf("%sUnknown ASID", SEP_STR); break; } switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) { case ID_AA64MMFR0_PA_RANGE_4G: printf("%s4GB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_64G: printf("%s64GB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_1T: printf("%s1TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_4T: printf("%s4TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_16T: printf("%s16TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_256T: printf("%s256TB PA", SEP_STR); break; case ID_AA64MMFR0_PA_RANGE_4P: printf("%s4PB PA", SEP_STR); break; default: printf("%sUnknown PA Range", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK); printf(">\n"); } /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) { printed = 0; printf(" Memory Model Features 1 = <"); switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_XNX_NONE: break; case ID_AA64MMFR1_XNX_IMPL: printf("%sEL2 XN", SEP_STR); break; default: printf("%sUnknown XNX", SEP_STR); break; } switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_SPEC_SEI_NONE: break; case ID_AA64MMFR1_SPEC_SEI_IMPL: printf("%sSpecSEI", SEP_STR); break; default: printf("%sUnknown SpecSEI", SEP_STR); break; } switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_PAN_NONE: break; case ID_AA64MMFR1_PAN_IMPL: printf("%sPAN", SEP_STR); break; case ID_AA64MMFR1_PAN_ATS1E1: printf("%sPAN+AT", SEP_STR); break; default: printf("%sUnknown PAN", SEP_STR); break; } switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_LO_NONE: break; case ID_AA64MMFR1_LO_IMPL: printf("%sLO", SEP_STR); break; default: printf("%sUnknown LO", SEP_STR); break; } switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_HPDS_NONE: break; case ID_AA64MMFR1_HPDS_HPD: printf("%sHPDS", SEP_STR); break; case ID_AA64MMFR1_HPDS_TTPBHA: printf("%sTTPBHA", SEP_STR); break; default: printf("%sUnknown HPDS", SEP_STR); break; } switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_VH_NONE: break; case ID_AA64MMFR1_VH_IMPL: printf("%sVHE", SEP_STR); break; default: printf("%sUnknown VHE", SEP_STR); break; } switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_VMIDBITS_8: break; case ID_AA64MMFR1_VMIDBITS_16: printf("%s16 VMID bits", SEP_STR); break; default: printf("%sUnknown VMID bits", SEP_STR); break; } switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) { case ID_AA64MMFR1_HAFDBS_NONE: break; case ID_AA64MMFR1_HAFDBS_AF: printf("%sAF", SEP_STR); break; case ID_AA64MMFR1_HAFDBS_AF_DBS: printf("%sAF+DBS", SEP_STR); break; default: printf("%sUnknown Hardware update AF/DBS", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK); printf(">\n"); } /* AArch64 Memory Model Feature Register 2 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) { printed = 0; printf(" Memory Model Features 2 = <"); switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_NV_NONE: break; case ID_AA64MMFR2_NV_IMPL: printf("%sNestedVirt", SEP_STR); break; default: printf("%sUnknown NestedVirt", SEP_STR); break; } switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_CCIDX_32: printf("%s32b CCIDX", SEP_STR); break; case ID_AA64MMFR2_CCIDX_64: printf("%s64b CCIDX", SEP_STR); break; default: printf("%sUnknown CCIDX", SEP_STR); break; } switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_VA_RANGE_48: printf("%s48b VA", SEP_STR); break; case ID_AA64MMFR2_VA_RANGE_52: printf("%s52b VA", SEP_STR); break; default: printf("%sUnknown VA Range", SEP_STR); break; } switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_IESB_NONE: break; case ID_AA64MMFR2_IESB_IMPL: printf("%sIESB", SEP_STR); break; default: printf("%sUnknown IESB", SEP_STR); break; } switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_LSM_NONE: break; case ID_AA64MMFR2_LSM_IMPL: printf("%sLSM", SEP_STR); break; default: printf("%sUnknown LSM", SEP_STR); break; } switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_UAO_NONE: break; case ID_AA64MMFR2_UAO_IMPL: printf("%sUAO", SEP_STR); break; default: printf("%sUnknown UAO", SEP_STR); break; } switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) { case ID_AA64MMFR2_CNP_NONE: break; case ID_AA64MMFR2_CNP_IMPL: printf("%sCnP", SEP_STR); break; default: printf("%sUnknown CnP", SEP_STR); break; } if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK); printf(">\n"); } /* AArch64 Debug Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) { printed = 0; printf(" Debug Features 0 = <"); switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_PMS_VER_NONE: break; case ID_AA64DFR0_PMS_VER_V1: printf("%sSPE v1", SEP_STR); break; default: printf("%sUnknown SPE", SEP_STR); break; } printf("%s%lu CTX Breakpoints", SEP_STR, ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0)); printf("%s%lu Watchpoints", SEP_STR, ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0)); printf("%s%lu Breakpoints", SEP_STR, ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0)); switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_PMU_VER_NONE: break; case ID_AA64DFR0_PMU_VER_3: printf("%sPMUv3", SEP_STR); break; case ID_AA64DFR0_PMU_VER_3_1: printf("%sPMUv3+16 bit evtCount", SEP_STR); break; case ID_AA64DFR0_PMU_VER_IMPL: printf("%sImplementation defined PMU", SEP_STR); break; default: printf("%sUnknown PMU", SEP_STR); break; } switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_TRACE_VER_NONE: break; case ID_AA64DFR0_TRACE_VER_IMPL: printf("%sTrace", SEP_STR); break; default: printf("%sUnknown Trace", SEP_STR); break; } switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) { case ID_AA64DFR0_DEBUG_VER_8: printf("%sDebug v8", SEP_STR); break; case ID_AA64DFR0_DEBUG_VER_8_VHE: printf("%sDebug v8+VHE", SEP_STR); break; case ID_AA64DFR0_DEBUG_VER_8_2: printf("%sDebug v8.2", SEP_STR); break; default: printf("%sUnknown Debug", SEP_STR); break; } if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK) printf("%s%#lx", SEP_STR, cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK); printf(">\n"); } /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) { printf(" Debug Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64dfr1); } /* AArch64 Auxiliary Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) { printf(" Auxiliary Features 0 = <%#lx>\n", cpu_desc[cpu].id_aa64afr0); } /* AArch64 Auxiliary Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) { printf(" Auxiliary Features 1 = <%#lx>\n", cpu_desc[cpu].id_aa64afr1); } #undef SEP_STR } void identify_cpu(void) { u_int midr; u_int impl_id; u_int part_id; u_int cpu; size_t i; const struct cpu_parts *cpu_partsp = NULL; cpu = PCPU_GET(cpuid); midr = get_midr(); /* * Store midr to pcpu to allow fast reading * from EL0, EL1 and assembly code. */ PCPU_SET(midr, midr); impl_id = CPU_IMPL(midr); for (i = 0; i < nitems(cpu_implementers); i++) { if (impl_id == cpu_implementers[i].impl_id || cpu_implementers[i].impl_id == 0) { cpu_desc[cpu].cpu_impl = impl_id; cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } part_id = CPU_PART(midr); for (i = 0; &cpu_partsp[i] != NULL; i++) { if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) { cpu_desc[cpu].cpu_part_num = part_id; cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name; break; } } cpu_desc[cpu].cpu_revision = CPU_REV(midr); cpu_desc[cpu].cpu_variant = CPU_VAR(midr); /* Save affinity for current CPU */ cpu_desc[cpu].mpidr = get_mpidr(); CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK; cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1); cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1); cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1); cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1); cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1); cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1); cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1); cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1); cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1); if (cpu != 0) { /* * This code must run on one cpu at a time, but we are * not scheduling on the current core so implement a * simple spinlock. */ while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0) __asm __volatile("wfe" ::: "memory"); switch (cpu_aff_levels) { case 0: if (CPU_AFF0(cpu_desc[cpu].mpidr) != CPU_AFF0(cpu_desc[0].mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(cpu_desc[cpu].mpidr) != CPU_AFF1(cpu_desc[0].mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(cpu_desc[cpu].mpidr) != CPU_AFF2(cpu_desc[0].mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(cpu_desc[cpu].mpidr) != CPU_AFF3(cpu_desc[0].mpidr)) cpu_aff_levels = 4; break; } if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0) cpu_print_regs |= PRINT_ID_AA64_AFR0; if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1) cpu_print_regs |= PRINT_ID_AA64_AFR1; if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0) cpu_print_regs |= PRINT_ID_AA64_DFR0; if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1) cpu_print_regs |= PRINT_ID_AA64_DFR1; if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0) cpu_print_regs |= PRINT_ID_AA64_ISAR0; if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1) cpu_print_regs |= PRINT_ID_AA64_ISAR1; if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0) cpu_print_regs |= PRINT_ID_AA64_MMFR0; if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1) cpu_print_regs |= PRINT_ID_AA64_MMFR1; if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2) cpu_print_regs |= PRINT_ID_AA64_MMFR2; if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0) cpu_print_regs |= PRINT_ID_AA64_PFR0; if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1) cpu_print_regs |= PRINT_ID_AA64_PFR1; /* Wake up the other CPUs */ atomic_store_rel_int(&ident_lock, 0); __asm __volatile("sev" ::: "memory"); } } Index: head/sys/arm64/cavium/thunder_pcie_common.c =================================================================== --- head/sys/arm64/cavium/thunder_pcie_common.c (revision 335051) +++ head/sys/arm64/cavium/thunder_pcie_common.c (revision 335052) @@ -1,210 +1,210 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Common PCIe functions for Cavium Thunder SOC */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include #endif #include #include #include #include #include #include #ifdef FDT #include #endif #include "thunder_pcie_common.h" MALLOC_DEFINE(M_THUNDER_PCIE, "Thunder PCIe driver", "Thunder PCIe driver memory"); #define THUNDER_CFG_BASE_TO_ECAM(x) ((((x) >> 36UL) & 0x3) | (((x) >> 42UL) & 0x4)) uint32_t range_addr_is_pci(struct pcie_range *ranges, uint64_t addr, uint64_t size) { struct pcie_range *r; int tuple; for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { r = &ranges[tuple]; if (addr >= r->pci_base && addr < (r->pci_base + r->size) && size < r->size) { /* Address is within PCI range */ return (1); } } /* Address is outside PCI range */ return (0); } uint32_t range_addr_is_phys(struct pcie_range *ranges, uint64_t addr, uint64_t size) { struct pcie_range *r; int tuple; for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { r = &ranges[tuple]; if (addr >= r->phys_base && addr < (r->phys_base + r->size) && size < r->size) { /* Address is within Physical range */ return (1); } } /* Address is outside Physical range */ return (0); } uint64_t range_addr_phys_to_pci(struct pcie_range *ranges, uint64_t phys_addr) { struct pcie_range *r; uint64_t offset; int tuple; /* Find physical address corresponding to given bus address */ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { r = &ranges[tuple]; if (phys_addr >= r->phys_base && phys_addr < (r->phys_base + r->size)) { /* Given phys addr is in this range. * Translate phys addr to bus addr. */ offset = phys_addr - r->phys_base; return (r->pci_base + offset); } } return (0); } uint64_t range_addr_pci_to_phys(struct pcie_range *ranges, uint64_t pci_addr) { struct pcie_range *r; uint64_t offset; int tuple; /* Find physical address corresponding to given bus address */ for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { r = &ranges[tuple]; if (pci_addr >= r->pci_base && pci_addr < (r->pci_base + r->size)) { /* Given pci addr is in this range. * Translate bus addr to phys addr. */ offset = pci_addr - r->pci_base; return (r->phys_base + offset); } } return (0); } int thunder_pcie_identify_ecam(device_t dev, int *ecam) { rman_res_t start; /* Check if we're running on Cavium ThunderX */ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, - CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, 0)) + CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0)) return (EINVAL); start = bus_get_resource_start(dev, SYS_RES_MEMORY, 0); *ecam = THUNDER_CFG_BASE_TO_ECAM(start); device_printf(dev, "ThunderX quirk, setting ECAM to %d\n", *ecam); return (0); } #ifdef THUNDERX_PASS_1_1_ERRATA struct resource * thunder_pcie_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { pci_addr_t map, testval; /* * If Enhanced Allocation is not used, we can't allocate any random * range. All internal devices have hardcoded place where they can * be located within PCI address space. Fortunately, we can read * this value from BAR. */ if (((type == SYS_RES_IOPORT) || (type == SYS_RES_MEMORY)) && RMAN_IS_DEFAULT_RANGE(start, end)) { /* Read BAR manually to get resource address and size */ pci_read_bar(child, *rid, &map, &testval, NULL); /* Mask the information bits */ if (PCI_BAR_MEM(map)) map &= PCIM_BAR_MEM_BASE; else map &= PCIM_BAR_IO_BASE; if (PCI_BAR_MEM(testval)) testval &= PCIM_BAR_MEM_BASE; else testval &= PCIM_BAR_IO_BASE; start = map; end = start + count - 1; } return (pci_host_generic_core_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif Index: head/sys/arm64/cavium/thunder_pcie_fdt.c =================================================================== --- head/sys/arm64/cavium/thunder_pcie_fdt.c (revision 335051) +++ head/sys/arm64/cavium/thunder_pcie_fdt.c (revision 335052) @@ -1,161 +1,161 @@ /* * Copyright (C) 2016 Cavium Inc. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_pcie_common.h" #include "pcib_if.h" #ifdef THUNDERX_PASS_1_1_ERRATA static struct resource * thunder_pcie_fdt_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); #endif static int thunder_pcie_fdt_attach(device_t); static int thunder_pcie_fdt_probe(device_t); static int thunder_pcie_fdt_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static device_method_t thunder_pcie_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pcie_fdt_probe), DEVMETHOD(device_attach, thunder_pcie_fdt_attach), #ifdef THUNDERX_PASS_1_1_ERRATA DEVMETHOD(bus_alloc_resource, thunder_pcie_fdt_alloc_resource), #endif /* pcib interface */ DEVMETHOD(pcib_get_id, thunder_pcie_fdt_get_id), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(pcib, thunder_pcie_fdt_driver, thunder_pcie_fdt_methods, sizeof(struct generic_pcie_fdt_softc), generic_pcie_fdt_driver); static devclass_t thunder_pcie_fdt_devclass; DRIVER_MODULE(thunder_pcib, simplebus, thunder_pcie_fdt_driver, thunder_pcie_fdt_devclass, 0, 0); DRIVER_MODULE(thunder_pcib, ofwbus, thunder_pcie_fdt_driver, thunder_pcie_fdt_devclass, 0, 0); static int thunder_pcie_fdt_probe(device_t dev) { /* Check if we're running on Cavium ThunderX */ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, - CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, 0)) + CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0)) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic") || ofw_bus_is_compatible(dev, "cavium,thunder-pcie") || ofw_bus_is_compatible(dev, "cavium,pci-host-thunder-ecam")) { device_set_desc(dev, "Cavium Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int thunder_pcie_fdt_attach(device_t dev) { struct generic_pcie_fdt_softc *sc; sc = device_get_softc(dev); thunder_pcie_identify_ecam(dev, &sc->base.ecam); sc->base.coherent = 1; return (pci_host_generic_attach(dev)); } static int thunder_pcie_fdt_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { phandle_t node; int bsf; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); node = ofw_bus_get_node(pci); if (OF_hasprop(node, "msi-map")) return (generic_pcie_get_id(pci, child, type, id)); bsf = pci_get_rid(child); *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf; return (0); } #ifdef THUNDERX_PASS_1_1_ERRATA static struct resource * thunder_pcie_fdt_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { if ((int)ofw_bus_get_node(child) > 0) return (pci_host_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (thunder_pcie_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif Index: head/sys/arm64/include/cpu.h =================================================================== --- head/sys/arm64/include/cpu.h (revision 335051) +++ head/sys/arm64/include/cpu.h (revision 335052) @@ -1,197 +1,197 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * Copyright (c) 2014-2016 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)cpu.h 5.4 (Berkeley) 5/9/91 * from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29 * $FreeBSD$ */ #ifndef _MACHINE_CPU_H_ #define _MACHINE_CPU_H_ #include #include #include #define TRAPF_PC(tfp) ((tfp)->tf_lr) #define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t) #define cpu_getstack(td) ((td)->td_frame->tf_sp) #define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp)) #define cpu_spinwait() __asm __volatile("yield" ::: "memory") /* Extract CPU affinity levels 0-3 */ #define CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff) #define CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff) #define CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff) #define CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff) #define CPU_AFF0_MASK 0xffUL #define CPU_AFF1_MASK 0xff00UL #define CPU_AFF2_MASK 0xff0000UL #define CPU_AFF3_MASK 0xff00000000UL #define CPU_AFF_MASK (CPU_AFF0_MASK | CPU_AFF1_MASK | \ CPU_AFF2_MASK| CPU_AFF3_MASK) /* Mask affinity fields in MPIDR_EL1 */ #ifdef _KERNEL #define CPU_IMPL_ARM 0x41 #define CPU_IMPL_BROADCOM 0x42 #define CPU_IMPL_CAVIUM 0x43 #define CPU_IMPL_DEC 0x44 #define CPU_IMPL_INFINEON 0x49 #define CPU_IMPL_FREESCALE 0x4D #define CPU_IMPL_NVIDIA 0x4E #define CPU_IMPL_APM 0x50 #define CPU_IMPL_QUALCOMM 0x51 #define CPU_IMPL_MARVELL 0x56 #define CPU_IMPL_INTEL 0x69 /* ARM Part numbers */ #define CPU_PART_FOUNDATION 0xD00 #define CPU_PART_CORTEX_A35 0xD04 #define CPU_PART_CORTEX_A53 0xD03 #define CPU_PART_CORTEX_A55 0xD05 #define CPU_PART_CORTEX_A57 0xD07 #define CPU_PART_CORTEX_A72 0xD08 #define CPU_PART_CORTEX_A73 0xD09 #define CPU_PART_CORTEX_A75 0xD0A /* Cavium Part numbers */ -#define CPU_PART_THUNDER 0x0A1 +#define CPU_PART_THUNDERX 0x0A1 #define CPU_PART_THUNDERX_81XX 0x0A2 #define CPU_PART_THUNDERX_83XX 0x0A3 #define CPU_PART_THUNDERX2 0x0AF -#define CPU_REV_THUNDER_1_0 0x00 -#define CPU_REV_THUNDER_1_1 0x01 +#define CPU_REV_THUNDERX_1_0 0x00 +#define CPU_REV_THUNDERX_1_1 0x01 #define CPU_IMPL(midr) (((midr) >> 24) & 0xff) #define CPU_PART(midr) (((midr) >> 4) & 0xfff) #define CPU_VAR(midr) (((midr) >> 20) & 0xf) #define CPU_REV(midr) (((midr) >> 0) & 0xf) #define CPU_IMPL_TO_MIDR(val) (((val) & 0xff) << 24) #define CPU_PART_TO_MIDR(val) (((val) & 0xfff) << 4) #define CPU_VAR_TO_MIDR(val) (((val) & 0xf) << 20) #define CPU_REV_TO_MIDR(val) (((val) & 0xf) << 0) #define CPU_IMPL_MASK (0xff << 24) #define CPU_PART_MASK (0xfff << 4) #define CPU_VAR_MASK (0xf << 20) #define CPU_REV_MASK (0xf << 0) #define CPU_ID_RAW(impl, part, var, rev) \ (CPU_IMPL_TO_MIDR((impl)) | \ CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) | \ CPU_REV_TO_MIDR((rev))) #define CPU_MATCH(mask, impl, part, var, rev) \ (((mask) & PCPU_GET(midr)) == \ ((mask) & CPU_ID_RAW((impl), (part), (var), (rev)))) #define CPU_MATCH_RAW(mask, devid) \ (((mask) & PCPU_GET(midr)) == ((mask) & (devid))) /* * Chip-specific errata. This defines are intended to be * booleans used within if statements. When an appropriate * kernel option is disabled, these defines must be defined * as 0 to allow the compiler to remove a dead code thus * produce better optimized kernel image. */ /* * Vendor: Cavium * Chip: ThunderX * Revision(s): Pass 1.0, Pass 1.1 */ #ifdef THUNDERX_PASS_1_1_ERRATA -#define CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 \ +#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 \ (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \ - CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, CPU_REV_THUNDER_1_0) || \ + CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_0) || \ CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \ - CPU_IMPL_CAVIUM, CPU_PART_THUNDER, 0, CPU_REV_THUNDER_1_1)) + CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_1)) #else -#define CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 0 +#define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 0 #endif extern char btext[]; extern char etext[]; extern uint64_t __cpu_affinity[]; void cpu_halt(void) __dead2; void cpu_reset(void) __dead2; void fork_trampoline(void); void identify_cpu(void); void install_cpu_errata(void); void print_cpu_features(u_int); void swi_vm(void *v); #define CPU_AFFINITY(cpu) __cpu_affinity[(cpu)] #define CPU_CURRENT_SOCKET \ (CPU_AFF2(CPU_AFFINITY(PCPU_GET(cpuid)))) static __inline uint64_t get_cyclecount(void) { uint64_t ret; ret = READ_SPECIALREG(cntvct_el0); return (ret); } #define ADDRESS_TRANSLATE_FUNC(stage) \ static inline uint64_t \ arm64_address_translate_ ##stage (uint64_t addr) \ { \ uint64_t ret; \ \ __asm __volatile( \ "at " __STRING(stage) ", %1 \n" \ "mrs %0, par_el1" : "=r"(ret) : "r"(addr)); \ \ return (ret); \ } ADDRESS_TRANSLATE_FUNC(s1e0r) ADDRESS_TRANSLATE_FUNC(s1e0w) ADDRESS_TRANSLATE_FUNC(s1e1r) ADDRESS_TRANSLATE_FUNC(s1e1w) #endif #endif /* !_MACHINE_CPU_H_ */